v6.1.0 release

-----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmElJYgZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3hUEEACqIL6/qyHjqKVX5+0zgJpo
 0iMVUjKGtQhHFdgZi/JI/uUcT6FsClGqx+XRtxWUP10kGV6iPF+GVlnXSx+iZCui
 +Is1p9ajLuDcrTj+nOOI9r9Km05C25Ub0A3YropzHMTKx64M3CwXJKvSz42lFHur
 A1qx1BefzC0JzVv8fa8TAca0JKGC+Hv/2Lstw/ctxdKIG94mcFjlQrzK4DZxsu/g
 QC3kZ7ABJu8UJTzB0KgtD2krsnnO5smePkS9WVlnyK86KWtp99U/T7HyCmRsbxy0
 EhPzK61bumMynFZaqOsoj+ppE0ued6hEHdKASCXGa/GEa/RE2YbW/gq9nJ1OR/ii
 EPYljNR6Jv5NfO79zDC+jW9EOk1JnP1A4x95WVSJSaJr+OEKgocxouI2IV1o0epf
 1xHI1aBJauIZ7mklGaCWJ/uvMoxcH+ngqbVDCX9jnzYYKadoWu8Tv5Zyam6bOhKC
 Y4FOfnp/a+wjS8eMNmn+ios/WjTss9AILyn6GaTVLSxJ278Iqz28nlRAVjiKDiTS
 KQYX9mp0ScpUzZBhES4mzlPpCyRc0fC1vM1jfRwn+N8cPslBLOEDB6gLnwpRmadO
 26CRiuA8d3qu3Efkdqz6xRcnIM9opPG/4Gj/SZw4V0kS87z/Y5o1nQiZgaU1tZ8q
 tOXcPDnMXJbCly7veMRfLQ==
 =XneO
 -----END PGP SIGNATURE-----

Merge tag 'v6.1.0' into merge/qemu-v6.1.0

v6.1.0 release
This commit is contained in:
Matt Borgerson 2021-09-04 15:11:03 -07:00
commit 0e63232072
2555 changed files with 125585 additions and 89914 deletions

View File

@ -1,61 +1,6 @@
env:
CIRRUS_CLONE_DEPTH: 1
freebsd_12_task:
freebsd_instance:
image_family: freebsd-12-2
cpu: 8
memory: 8G
install_script:
- ASSUME_ALWAYS_YES=yes pkg bootstrap -f ;
- pkg install -y bash curl cyrus-sasl git glib gmake gnutls gsed
nettle perl5 pixman pkgconf png usbredir ninja
script:
- mkdir build
- cd build
# TODO: Enable gnutls again once FreeBSD's libtasn1 got fixed
# See: https://gitlab.com/gnutls/libtasn1/-/merge_requests/71
- ../configure --enable-werror --disable-gnutls
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- gmake -j$(sysctl -n hw.ncpu)
- gmake -j$(sysctl -n hw.ncpu) check V=1
macos_task:
osx_instance:
image: catalina-base
install_script:
- brew install pkg-config python gnu-sed glib pixman make sdl2 bash ninja
script:
- mkdir build
- cd build
- ../configure --python=/usr/local/bin/python3 --enable-werror
--extra-cflags='-Wno-error=deprecated-declarations'
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- gmake -j$(sysctl -n hw.ncpu)
- gmake check-unit V=1
- gmake check-block V=1
- gmake check-qapi-schema V=1
- gmake check-softfloat V=1
- gmake check-qtest-x86_64 V=1
macos_xcode_task:
osx_instance:
# this is an alias for the latest Xcode
image: catalina-xcode
install_script:
- brew install pkg-config gnu-sed glib pixman make sdl2 bash ninja
script:
- mkdir build
- cd build
- ../configure --extra-cflags='-Wno-error=deprecated-declarations' --enable-modules
--enable-werror --cc=clang || { cat config.log meson-logs/meson-log.txt; exit 1; }
- gmake -j$(sysctl -n hw.ncpu)
- gmake check-unit V=1
- gmake check-block V=1
- gmake check-qapi-schema V=1
- gmake check-softfloat V=1
- gmake check-qtest-x86_64 V=1
windows_msys2_task:
timeout_in: 90m
windows_container:
@ -67,7 +12,7 @@ windows_msys2_task:
CIRRUS_SHELL: powershell
MSYS: winsymlinks:nativestrict
MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2021-01-05/msys2-base-x86_64-20210105.sfx.exe
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2021-04-19/msys2-base-x86_64-20210419.sfx.exe
MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed
@ -130,7 +75,7 @@ windows_msys2_task:
taskkill /F /FI "MODULES eq msys-2.0.dll"
tasklist
C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Suu --overwrite=*"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*"
Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date

View File

@ -14,11 +14,11 @@ issues:
at https://gitlab.com/qemu-project/qemu.git.
The project does not process issues filed on GitHub.
The project issues are tracked on Launchpad:
https://bugs.launchpad.net/qemu
The project issues are tracked on GitLab:
https://gitlab.com/qemu-project/qemu/-/issues
QEMU welcomes bug report contributions. You can file new ones on:
https://bugs.launchpad.net/qemu/+filebug
https://gitlab.com/qemu-project/qemu/-/issues/new
pulls:
comment: |

4
.gitignore vendored
View File

@ -13,7 +13,9 @@ GTAGS
*~
*.ast_raw
*.depend_raw
build.log
*.swp
*.patch
build.log
/dist
/xemu.ini

View File

@ -0,0 +1,81 @@
.native_build_job_template:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- if test -n "$LD_JOBS";
then
scripts/git-submodule.sh update meson ;
fi
- mkdir build
- cd build
- if test -n "$TARGETS";
then
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS ;
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
- make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS";
then
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
.native_test_job_template:
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- scripts/git-submodule.sh update
$(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
- cd build
- find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":"
- make NINJA=":" $MAKE_CHECK_ARGS
.acceptance_test_job_template:
extends: .native_test_job_template
cache:
key: "${CI_JOB_NAME}-cache"
paths:
- ${CI_PROJECT_DIR}/avocado-cache
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- mkdir -p ~/.config/avocado
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
- echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
>> ~/.config/avocado/avocado.conf
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
>> ~/.config/avocado/avocado.conf
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
fi
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
after_script:
- cd build
- du -chs ${CI_PROJECT_DIR}/avocado-cache
rules:
# Only run these jobs if running on the mainstream namespace,
# or if the user set the QEMU_CI_AVOCADO_TESTING variable (either
# in its namespace setting or via git-push option, see documentation
# in /.gitlab-ci.yml of this repository).
- if: '$CI_PROJECT_NAMESPACE == "qemu-project"'
when: on_success
- if: '$QEMU_CI_AVOCADO_TESTING'
when: on_success
# Otherwise, set to manual (the jobs are created but not run).
- when: manual
allow_failure: true

724
.gitlab-ci.d/buildtest.yml Normal file
View File

@ -0,0 +1,724 @@
include:
- local: '/.gitlab-ci.d/buildtest-template.yml'
build-system-alpine:
extends: .native_build_job_template
needs:
- job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine:
extends: .native_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check
acceptance-system-alpine:
extends: .acceptance_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check-acceptance
build-system-ubuntu:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-slirp=system
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu:
extends: .native_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
acceptance-system-ubuntu:
extends: .acceptance_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-acceptance
build-system-debian:
extends: .native_build_job_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian:
extends: .native_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check
acceptance-system-debian:
extends: .acceptance_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-acceptance
build-system-fedora:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
--enable-fdt=system --enable-slirp=system --enable-capstone=system
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora:
extends: .native_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-system-fedora:
extends: .acceptance_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
build-system-centos:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
--enable-modules --enable-trace-backends=dtrace
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos:
extends: .native_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check
acceptance-system-centos:
extends: .acceptance_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check-acceptance
build-system-opensuse:
extends: .native_build_job_template
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse:
extends: .native_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check
acceptance-system-opensuse:
extends: .acceptance_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-acceptance
build-disabled:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS:
--disable-attr
--disable-auth-pam
--disable-avx2
--disable-bochs
--disable-brlapi
--disable-bzip2
--disable-cap-ng
--disable-capstone
--disable-cloop
--disable-coroutine-pool
--disable-curl
--disable-curses
--disable-dmg
--disable-docs
--disable-gcrypt
--disable-glusterfs
--disable-gnutls
--disable-gtk
--disable-guest-agent
--disable-iconv
--disable-keyring
--disable-kvm
--disable-libiscsi
--disable-libpmem
--disable-libssh
--disable-libudev
--disable-libusb
--disable-libxml2
--disable-linux-aio
--disable-live-block-migration
--disable-lzo
--disable-malloc-trim
--disable-mpath
--disable-nettle
--disable-numa
--disable-opengl
--disable-parallels
--disable-pie
--disable-qcow1
--disable-qed
--disable-qom-cast-debug
--disable-rbd
--disable-rdma
--disable-replication
--disable-sdl
--disable-seccomp
--disable-slirp
--disable-smartcard
--disable-snappy
--disable-sparse
--disable-spice
--disable-strip
--disable-tpm
--disable-usb-redir
--disable-vdi
--disable-vhost-crypto
--disable-vhost-net
--disable-vhost-scsi
--disable-vhost-kernel
--disable-vhost-user
--disable-vhost-vdpa
--disable-vhost-vsock
--disable-virglrenderer
--disable-vnc
--disable-vte
--disable-vvfat
--disable-xen
--disable-zstd
TARGETS: arm-softmmu i386-softmmu ppc64-softmmu mips64-softmmu
s390x-softmmu i386-linux-user
MAKE_CHECK_ARGS: check-qtest SPEED=slow
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
# the configure script. The container doesn't contain Xen headers so
# Xen accelerator is not detected / selected. As result it build the
# i386-softmmu and x86_64-softmmu with KVM being the single accelerator
# available.
# Also use a different coroutine implementation (which is only really of
# interest to KVM users, i.e. with TCG disabled)
build-tcg-disabled:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
script:
- mkdir build
- cd build
- ../configure --disable-tcg --audio-drv-list="" --with-coroutine=ucontext
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make check-unit
- make check-qapi-schema
- cd tests/qemu-iotests/
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
170 171 183 184 192 194 208 221 222 226 227 236 253 277
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
208 209 216 218 222 227 234 246 247 248 250 254 255 257 258
260 261 262 263 264 270 272 273 277 279
build-user:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system
MAKE_CHECK_ARGS: check-tcg
build-user-static:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system --static
MAKE_CHECK_ARGS: check-tcg
# Because the hexagon cross-compiler takes so long to build we don't rely
# on the CI system to build it and hence this job has an optional dependency
# declared. The image is manually uploaded.
build-user-hexagon:
extends: .native_build_job_template
needs:
job: hexagon-cross-container
optional: true
variables:
IMAGE: debian-hexagon-cross
TARGETS: hexagon-linux-user
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
MAKE_CHECK_ARGS: check-tcg
# Only build the softmmu targets we have check-tcg tests for
build-some-softmmu:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --enable-debug
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
MAKE_CHECK_ARGS: check-tcg
# We build tricore in a very minimal tricore only container
build-tricore-softmmu:
extends: .native_build_job_template
needs:
job: tricore-debian-cross-container
variables:
IMAGE: debian-tricore-cross
CONFIGURE_ARGS: --disable-tools --disable-fdt --enable-debug
TARGETS: tricore-softmmu
MAKE_CHECK_ARGS: check-tcg
clang-system:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-qtest check-tcg
clang-user:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
MAKE_CHECK_ARGS: check-unit check-tcg
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
# On gitlab runners, default value sometimes end up calling 2 lds concurrently and
# triggers an Out-Of-Memory error
#
# Since slirp callbacks are used in QEMU Timers, slirp needs to be compiled together
# with QEMU and linked as a static library to avoid false positives in CFI checks.
# This can be accomplished by using -enable-slirp=git, which avoids the use of
# a system-wide version of the library
#
# Split in three sets of build/check/acceptance to limit the execution time of each
# job
build-cfi-aarch64:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: aarch64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
rules:
# FIXME: This job is often failing, likely due to out-of-memory problems in
# the constrained containers of the shared runners. Thus this is marked as
# manual until the situation has been solved.
- when: manual
allow_failure: true
check-cfi-aarch64:
extends: .native_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-aarch64:
extends: .acceptance_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
build-cfi-ppc64-s390x:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: ppc64-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
rules:
# FIXME: This job is often failing, likely due to out-of-memory problems in
# the constrained containers of the shared runners. Thus this is marked as
# manual until the situation has been solved.
- when: manual
allow_failure: true
check-cfi-ppc64-s390x:
extends: .native_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-ppc64-s390x:
extends: .acceptance_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
build-cfi-x86_64:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64:
extends: .native_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-x86_64:
extends: .acceptance_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
tsan-build:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --enable-fdt=system --enable-slirp=system
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# These targets are on the way out
build-deprecated:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools
MAKE_CHECK_ARGS: build-tcg
TARGETS: ppc64abi32-linux-user
artifacts:
expire_in: 2 days
paths:
- build
# We split the check-tcg step as test failures are expected but we still
# want to catch the build breaking.
check-deprecated:
extends: .native_test_job_template
needs:
- job: build-deprecated
artifacts: true
variables:
IMAGE: debian-all-test-cross
MAKE_CHECK_ARGS: check-tcg
allow_failure: true
# gprof/gcov are GCC features
build-gprof-gcov:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-gprof --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
artifacts:
expire_in: 1 days
paths:
- build
check-gprof-gcov:
extends: .native_test_job_template
needs:
- job: build-gprof-gcov
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
after_script:
- ${CI_PROJECT_DIR}/scripts/ci/coverage-summary.sh
build-oss-fuzz:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
script:
- mkdir build-oss-fuzz
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
./scripts/oss-fuzz/build.sh
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
| grep -v slirp); do
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
echo Testing ${fuzzer} ... ;
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
done
# Unrelated to fuzzer: run some tests with -fsanitize=address
- cd build-oss-fuzz && make check-qtest-i386 check-unit
build-tci:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
script:
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
- mkdir build
- cd build
- ../configure --enable-tcg-interpreter
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" || { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
- for tg in $TARGETS ; do
export QTEST_QEMU_BINARY="./qemu-system-${tg}" ;
./tests/qtest/boot-serial-test || exit 1 ;
./tests/qtest/cdrom-test || exit 1 ;
done
- QTEST_QEMU_BINARY="./qemu-system-x86_64" ./tests/qtest/pxe-test
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
build-coroutine-sigaltstack:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
--enable-trace-backends=ftrace
MAKE_CHECK_ARGS: check-unit
# Check our reduced build configurations
build-without-default-devices:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --without-default-devices --disable-user
build-without-default-features:
extends: .native_build_job_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --without-default-features --disable-user
--target-list-exclude=arm-softmmu,i386-softmmu,mipsel-softmmu,mips64-softmmu,ppc-softmmu
MAKE_CHECK_ARGS: check-unit
build-libvhost-user:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
needs:
job: amd64-fedora-container
before_script:
- dnf install -y meson ninja-build
script:
- mkdir subprojects/libvhost-user/build
- cd subprojects/libvhost-user/build
- meson
- ninja
# No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian:
extends: .native_build_job_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-unit check-softfloat ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT
#
# GitLab publishes from any branch that triggers a CI pipeline
#
# For the main repo we don't want to publish from 'staging'
# since that content may not be pushed, nor do we wish to
# publish from 'stable-NNN' branches as that content is outdated.
# Thus we restrict to just the default branch
#
# For contributor forks we want to publish from any repo so
# that users can see the results of their commits, regardless
# of what topic branch they're currently using
pages:
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
stage: test
needs:
- job: build-tools-and-docs-debian
script:
- mkdir -p public
# HTML-ised source tree
- make gtags
- htags -anT --tree-view=filetree -m qemu_init
-t "Welcome to the QEMU sourcecode"
- mv HTML public/src
# Project documentation
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
paths:
- public
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: on_success
- if: '$CI_PROJECT_NAMESPACE == "qemu-project"'
when: never
- if: '$CI_PROJECT_NAMESPACE != "qemu-project"'
when: on_success

87
.gitlab-ci.d/cirrus.yml Normal file
View File

@ -0,0 +1,87 @@
# Jobs that we delegate to Cirrus CI because they require an operating
# system other than Linux. These jobs will only run if the required
# setup has been performed on the GitLab account.
#
# The Cirrus CI configuration is generated by replacing target-specific
# variables in a generic template: some of these variables are provided
# when the GitLab CI job is defined, others are taken from a shell
# snippet generated using lcitool.
#
# Note that the $PATH environment variable has to be treated with
# special care, because we can't just override it at the GitLab CI job
# definition level or we risk breaking it completely.
.cirrus_build_job:
stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
needs: []
allow_failure: true
script:
- source .gitlab-ci.d/cirrus/$NAME.vars
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
-e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g"
-e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g"
-e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g"
-e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g"
-e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g"
-e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g"
-e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g"
-e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g"
-e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g"
-e "s|[@]PKGS@|$PKGS|g"
-e "s|[@]MAKE@|$MAKE|g"
-e "s|[@]PYTHON@|$PYTHON|g"
-e "s|[@]PIP3@|$PIP3|g"
-e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
-e "s|[@]TEST_TARGETSS@|$TEST_TARGETSS|g"
<.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
- cat .gitlab-ci.d/cirrus/$NAME.yml
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
rules:
- if: "$CIRRUS_GITHUB_REPO && $CIRRUS_API_TOKEN"
x64-freebsd-12-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-12
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-12-2
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update
INSTALL_COMMAND: pkg install -y
# TODO: Enable gnutls again once FreeBSD's libtasn1 got fixed
# See: https://gitlab.com/gnutls/libtasn1/-/merge_requests/71
CONFIGURE_ARGS: --disable-gnutls
TEST_TARGETS: check
x64-freebsd-13-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-0
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-macos-11-base-build:
extends: .cirrus_build_job
variables:
NAME: macos-11
CIRRUS_VM_INSTANCE_TYPE: osx_instance
CIRRUS_VM_IMAGE_SELECTOR: image
CIRRUS_VM_IMAGE_NAME: big-sur-base
CIRRUS_VM_CPUS: 12
CIRRUS_VM_RAM: 24G
UPDATE_COMMAND: brew update
INSTALL_COMMAND: brew install
PATH_EXTRA: /usr/local/opt/ccache/libexec:/usr/local/opt/gettext/bin
PKG_CONFIG_PATH: /usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/ncurses/lib/pkgconfig:/usr/local/opt/readline/lib/pkgconfig
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64

View File

@ -0,0 +1,54 @@
Cirrus CI integration
=====================
GitLab CI shared runners only provide a docker environment running on Linux.
While it is possible to provide private runners for non-Linux platforms this
is not something most contributors/maintainers will wish to do.
To work around this limitation, we take advantage of `Cirrus CI`_'s free
offering: more specifically, we use the `cirrus-run`_ script to trigger Cirrus
CI jobs from GitLab CI jobs so that Cirrus CI job output is integrated into
the main GitLab CI pipeline dashboard.
There is, however, some one-time setup required. If you want FreeBSD and macOS
builds to happen when you push to your GitLab repository, you need to
* set up a GitHub repository for the project, eg. ``yourusername/qemu``.
This repository needs to exist for cirrus-run to work, but it doesn't need to
be kept up to date, so you can create it and then forget about it;
* enable the `Cirrus CI GitHub app`_ for your GitHub account;
* sign up for Cirrus CI. It's enough to log into the website using your GitHub
account;
* grab an API token from the `Cirrus CI settings`_ page;
* it may be necessary to push an empty ``.cirrus.yml`` file to your github fork
for Cirrus CI to properly recognize the project. You can check whether
Cirrus CI knows about your project by navigating to:
``https://cirrus-ci.com/yourusername/qemu``
* in the *CI/CD / Variables* section of the settings page for your GitLab
repository, create two new variables:
* ``CIRRUS_GITHUB_REPO``, containing the name of the GitHub repository
created earlier, eg. ``yourusername/qemu``;
* ``CIRRUS_API_TOKEN``, containing the Cirrus CI API token generated earlier.
This variable **must** be marked as *Masked*, because anyone with knowledge
of it can impersonate you as far as Cirrus CI is concerned.
Neither of these variables should be marked as *Protected*, because in
general you'll want to be able to trigger Cirrus CI builds from non-protected
branches.
Once this one-time setup is complete, you can just keep pushing to your GitLab
repository as usual and you'll automatically get the additional CI coverage.
.. _Cirrus CI GitHub app: https://github.com/marketplace/cirrus-ci
.. _Cirrus CI settings: https://cirrus-ci.com/settings/profile/
.. _Cirrus CI: https://cirrus-ci.com/
.. _cirrus-run: https://github.com/sio/cirrus-run/

View File

@ -0,0 +1,35 @@
@CIRRUS_VM_INSTANCE_TYPE@:
@CIRRUS_VM_IMAGE_SELECTOR@: @CIRRUS_VM_IMAGE_NAME@
cpu: @CIRRUS_VM_CPUS@
memory: @CIRRUS_VM_RAM@
env:
CIRRUS_CLONE_DEPTH: 1
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
PATH: "@PATH@"
PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@"
PYTHON: "@PYTHON@"
MAKE: "@MAKE@"
CONFIGURE_ARGS: "@CONFIGURE_ARGS@"
build_task:
install_script:
- @UPDATE_COMMAND@
- @INSTALL_COMMAND@ @PKGS@
- if test -n "@PYPI_PKGS@" ; then @PIP3@ install @PYPI_PKGS@ ; fi
clone_script:
- git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME"
- git reset --hard "$CI_COMMIT_SHA"
build_script:
- mkdir build
- cd build
- ../configure --enable-werror $CONFIGURE_ARGS
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ;
do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ;
done

View File

@ -0,0 +1,13 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1
PACKAGING_COMMAND='pkg'
CCACHE='/usr/local/bin/ccache'
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PYTHON='/usr/local/bin/python3'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 libxml2 llvm lttng-ust lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'

View File

@ -0,0 +1,13 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-13 qemu
#
# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1
PACKAGING_COMMAND='pkg'
CCACHE='/usr/local/bin/ccache'
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PYTHON='/usr/local/bin/python3'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 libxml2 llvm lttng-ust lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'

View File

@ -0,0 +1,15 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables macos-11 qemu
#
# https://gitlab.com/libvirt/libvirt-ci/-/commit/c7e275ab27ac0dcd09da290817b9adeea1fd1eb1
PACKAGING_COMMAND='brew'
CCACHE='/usr/local/bin/ccache'
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PYTHON='/usr/local/bin/python3'
PIP3='/usr/local/bin/pip3'
PKGS='bash bc bzip2 capstone ccache cpanminus ctags curl dbus diffutils gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb libxml2 llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme virtualenv'
CPAN_PKGS='Test::Harness'

View File

@ -0,0 +1,17 @@
include:
- local: '/.gitlab-ci.d/container-template.yml'
amd64-centos8-container:
extends: .container_job_template
variables:
NAME: centos8
amd64-fedora-container:
extends: .container_job_template
variables:
NAME: fedora
amd64-debian10-container:
extends: .container_job_template
variables:
NAME: debian10

View File

@ -0,0 +1,192 @@
alpha-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-alpha-cross
amd64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-amd64-cross
amd64-debian-user-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-all-test-cross
arm64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-arm64-cross
arm64-test-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian11-container']
variables:
NAME: debian-arm64-test-cross
armel-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armel-cross
armhf-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armhf-cross
# We never want to build hexagon in the CI system and by default we
# always want to refer to the master registry where it lives.
hexagon-cross-container:
image: docker:stable
stage: containers
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project"'
when: never
- when: always
variables:
NAME: debian-hexagon-cross
GIT_DEPTH: 1
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- docker pull $COMMON_TAG
- docker tag $COMMON_TAG $TAG
- docker push "$TAG"
after_script:
- docker logout
hppa-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-hppa-cross
m68k-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-m68k-cross
mips64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64-cross
mips64el-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64el-cross
mips-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips-cross
mipsel-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mipsel-cross
powerpc-test-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian11-container']
variables:
NAME: debian-powerpc-test-cross
ppc64el-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-ppc64el-cross
riscv64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-riscv64-cross
s390x-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-s390x-cross
sh4-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sh4-cross
sparc64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sparc64-cross
tricore-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-tricore-cross
xtensa-debian-cross-container:
extends: .container_job_template
variables:
NAME: debian-xtensa-cross
cris-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-cris-cross
i386-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-i386-cross
win32-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-win32-cross
win64-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-win64-cross

View File

@ -0,0 +1,21 @@
.container_job_template:
image: docker:stable
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY/qemu-project/qemu
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG"
after_script:
- docker logout

View File

@ -1,251 +1,45 @@
.container_job_template: &container_job_definition
image: docker:stable
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- docker pull "$TAG" || docker pull "$COMMON_TAG" || true
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY_IMAGE
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG"
after_script:
- docker logout
include:
- local: '/.gitlab-ci.d/container-core.yml'
- local: '/.gitlab-ci.d/container-cross.yml'
amd64-alpine-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: alpine
amd64-centos7-container:
<<: *container_job_definition
variables:
NAME: centos7
amd64-centos8-container:
<<: *container_job_definition
variables:
NAME: centos8
amd64-debian10-container:
<<: *container_job_definition
variables:
NAME: debian10
amd64-debian11-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: debian11
alpha-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-alpha-cross
amd64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-amd64-cross
amd64-debian-user-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-all-test-cross
amd64-debian-container:
<<: *container_job_definition
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-amd64
arm64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-arm64-cross
arm64-test-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian11-container']
variables:
NAME: debian-arm64-test-cross
armel-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armel-cross
armhf-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armhf-cross
hppa-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-hppa-cross
m68k-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-m68k-cross
mips64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64-cross
mips64el-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64el-cross
mips-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips-cross
mipsel-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mipsel-cross
powerpc-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-powerpc-cross
ppc64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-ppc64-cross
ppc64el-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-ppc64el-cross
riscv64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-riscv64-cross
s390x-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-s390x-cross
sh4-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sh4-cross
sparc64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sparc64-cross
tricore-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-tricore-cross
xtensa-debian-cross-container:
<<: *container_job_definition
variables:
NAME: debian-xtensa-cross
cris-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-cris-cross
amd64-fedora-container:
<<: *container_job_definition
variables:
NAME: fedora
i386-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-i386-cross
win32-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-win32-cross
win64-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-win64-cross
amd64-ubuntu1804-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: ubuntu1804
amd64-ubuntu2004-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: ubuntu2004
amd64-ubuntu-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: ubuntu
amd64-opensuse-leap-container:
<<: *container_job_definition
extends: .container_job_template
variables:
NAME: opensuse-leap
python-container:
extends: .container_job_template
variables:
NAME: python

View File

@ -0,0 +1,47 @@
.cross_system_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 80m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-user --target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
- if grep -q "EXESUF=.exe" config-host.mak;
then make installer;
version="$(git describe --match v[0-9]*)";
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
fi
# Job to cross-build specific accelerators.
#
# Set the $ACCEL variable to select the specific accelerator (default to
# KVM), and set extra options (such disabling other accelerators) via the
# $EXTRA_CONFIGURE_OPTS variable.
.cross_accel_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 30m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
.cross_user_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-system
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS

View File

@ -1,44 +1,5 @@
.cross_system_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 80m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-user --target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu sh4-softmmu xtensa-softmmu"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
# Job to cross-build specific accelerators.
#
# Set the $ACCEL variable to select the specific accelerator (default to
# KVM), and set extra options (such disabling other accelerators) via the
# $ACCEL_CONFIGURE_OPTS variable.
.cross_accel_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 30m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-tools --enable-${ACCEL:-kvm} $ACCEL_CONFIGURE_OPTS
- make -j$(expr $(nproc) + 1) all check-build
.cross_user_build_job:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-system
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
cross-armel-system:
extends: .cross_system_build_job
@ -98,6 +59,15 @@ cross-i386-user:
IMAGE: fedora-i386-cross
MAKE_CHECK_ARGS: check
cross-i386-tci:
extends: .cross_accel_build_job
timeout: 60m
variables:
IMAGE: fedora-i386-cross
ACCEL: tcg-interpreter
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg
cross-mips-system:
extends: .cross_system_build_job
needs:
@ -174,7 +144,15 @@ cross-s390x-kvm-only:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
ACCEL_CONFIGURE_OPTS: --disable-tcg
EXTRA_CONFIGURE_OPTS: --disable-tcg
cross-mips64el-kvm-only:
extends: .cross_accel_build_job
needs:
job: mips64el-debian-cross-container
variables:
IMAGE: debian-mips64el-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
cross-win32-system:
extends: .cross_system_build_job
@ -182,6 +160,11 @@ cross-win32-system:
job: win32-fedora-cross-container
variables:
IMAGE: fedora-win32-cross
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
paths:
- build/qemu-setup*.exe
cross-win64-system:
extends: .cross_system_build_job
@ -189,6 +172,11 @@ cross-win64-system:
job: win64-fedora-cross-container
variables:
IMAGE: fedora-win64-cross
CROSS_SKIP_TARGETS: or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
tricore-softmmu xtensaeb-softmmu
artifacts:
paths:
- build/qemu-setup*.exe
cross-amd64-xen-only:
extends: .cross_accel_build_job
@ -197,7 +185,7 @@ cross-amd64-xen-only:
variables:
IMAGE: debian-amd64-cross
ACCEL: xen
ACCEL_CONFIGURE_OPTS: --disable-tcg --disable-kvm
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm
cross-arm64-xen-only:
extends: .cross_accel_build_job
@ -206,4 +194,4 @@ cross-arm64-xen-only:
variables:
IMAGE: debian-arm64-cross
ACCEL: xen
ACCEL_CONFIGURE_OPTS: --disable-tcg --disable-kvm
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm

View File

@ -0,0 +1,238 @@
# The CI jobs defined here require GitLab runners installed and
# registered on machines that match their operating system names,
# versions and architectures. This is in contrast to the other CI
# jobs that are intended to run on GitLab's "shared" runners.
# Different than the default approach on "shared" runners, based on
# containers, the custom runners have no such *requirement*, as those
# jobs should be capable of running on operating systems with no
# compatible container implementation, or no support from
# gitlab-runner. To avoid problems that gitlab-runner can cause while
# reusing the GIT repository, let's enable the clone strategy, which
# guarantees a fresh repository on each job run.
variables:
GIT_STRATEGY: clone
# All ubuntu-18.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-18.04-s390x-all-linux-static:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
- mkdir build
- cd build
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-18.04-s390x-all:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --disable-libssh
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-alldbg:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --enable-debug --disable-libssh
- make clean
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-clang:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
script:
- mkdir build
- cd build
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-tci:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --disable-libssh --enable-tcg-interpreter
- make --output-sync -j`nproc`
ubuntu-18.04-s390x-notcg:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_18.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
script:
- mkdir build
- cd build
- ../configure --disable-libssh --disable-tcg
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
# All ubuntu-20.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-20.04-aarch64-all-linux-static:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
- mkdir build
- cd build
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-20.04-aarch64-all:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --disable-libssh
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-alldbg:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --enable-debug --disable-libssh
- make clean
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-clang:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
script:
- mkdir build
- cd build
- ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-tci:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --disable-libssh --enable-tcg-interpreter
- make --output-sync -j`nproc`
ubuntu-20.04-aarch64-notcg:
allow_failure: true
needs: []
stage: build
tags:
- ubuntu_20.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
script:
- mkdir build
- cd build
- ../configure --disable-libssh --disable-tcg
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1

View File

@ -1,10 +1,22 @@
docker-edk2:
stage: containers
rules: # Only run this job when the Dockerfile is modified
# All jobs needing docker-edk2 must use the same rules it uses.
.edk2_job_rules:
rules: # Only run this job when ...
- changes:
# this file is modified
- .gitlab-ci.d/edk2.yml
# or the Dockerfile is modified
- .gitlab-ci.d/edk2/Dockerfile
when: always
# or roms/edk2/ is modified (submodule updated)
- roms/edk2/*
when: on_success
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 'edk2'
when: on_success
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 'EDK2'
when: on_success
docker-edk2:
extends: .edk2_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
@ -24,16 +36,9 @@ docker-edk2:
- docker push $IMAGE_TAG
build-edk2:
extends: .edk2_job_rules
stage: build
needs: ['docker-edk2']
rules: # Only run this job when ...
- changes: # ... roms/edk2/ is modified (submodule updated)
- roms/edk2/*
when: always
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 'edk2'
when: always
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 'EDK2'
when: always
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/edk2*bz2

View File

@ -1,10 +1,23 @@
docker-opensbi:
stage: containers
rules: # Only run this job when the Dockerfile is modified
# All jobs needing docker-opensbi must use the same rules it uses.
.opensbi_job_rules:
rules: # Only run this job when ...
- changes:
# this file is modified
- .gitlab-ci.d/opensbi.yml
# or the Dockerfile is modified
- .gitlab-ci.d/opensbi/Dockerfile
when: always
when: on_success
- changes: # or roms/opensbi/ is modified (submodule updated)
- roms/opensbi/*
when: on_success
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/' # or the branch/tag starts with 'opensbi'
when: on_success
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i' # or last commit description contains 'OpenSBI'
when: on_success
docker-opensbi:
extends: .opensbi_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
@ -24,16 +37,9 @@ docker-opensbi:
- docker push $IMAGE_TAG
build-opensbi:
extends: .opensbi_job_rules
stage: build
needs: ['docker-opensbi']
rules: # Only run this job when ...
- changes: # ... roms/opensbi/ is modified (submodule updated)
- roms/opensbi/*
when: always
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/' # or the branch/tag starts with 'opensbi'
when: always
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i' # or last commit description contains 'OpenSBI'
when: always
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin

View File

@ -0,0 +1,13 @@
# This file contains the set of jobs run by the QEMU project:
# https://gitlab.com/qemu-project/qemu/-/pipelines
include:
- local: '/.gitlab-ci.d/stages.yml'
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'
- local: '/.gitlab-ci.d/buildtest.yml'
- local: '/.gitlab-ci.d/static_checks.yml'
- local: '/.gitlab-ci.d/custom-runners.yml'
- local: '/.gitlab-ci.d/cirrus.yml'

8
.gitlab-ci.d/stages.yml Normal file
View File

@ -0,0 +1,8 @@
# Currently we have two build stages after our containers are built:
# - build (for traditional build and test or first stage build)
# - test (for test stages, using build artefacts from a build stage)
stages:
- containers
- containers-layer2
- build
- test

View File

@ -0,0 +1,49 @@
check-patch:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
needs:
job: amd64-centos8-container
script:
- .gitlab-ci.d/check-patch.py
variables:
GIT_DEPTH: 1000
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: never
- when: on_success
allow_failure: true
check-dco:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
needs:
job: amd64-centos8-container
script: .gitlab-ci.d/check-dco.py
variables:
GIT_DEPTH: 1000
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: never
- when: on_success
check-python-pipenv:
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-pipenv
variables:
GIT_DEPTH: 1
needs:
job: python-container
check-python-tox:
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-tox
variables:
GIT_DEPTH: 1
QEMU_TOX_EXTRA_ARGS: --skip-missing-interpreters=false
needs:
job: python-container
allow_failure: true

View File

@ -1,837 +1,24 @@
# Currently we have two build stages after our containers are built:
# - build (for traditional build and test or first stage build)
# - test (for test stages, using build artefacts from a build stage)
stages:
- containers
- containers-layer2
- build
- test
#
# This is the GitLab CI configuration file for the mainstream QEMU
# project: https://gitlab.com/qemu-project/qemu/-/pipelines
#
# !!! DO NOT ADD ANY NEW CONFIGURATION TO THIS FILE !!!
#
# Only documentation or comments is accepted.
#
# To use a different set of jobs than the mainstream QEMU project,
# you need to set the location of your custom yml file at "custom CI/CD
# configuration path", on your GitLab CI namespace:
# https://docs.gitlab.com/ee/ci/pipelines/settings.html#custom-cicd-configuration-path
#
# ----------------------------------------------------------------------
#
# QEMU CI jobs are based on templates. Some templates provide
# user-configurable options, modifiable via configuration variables.
#
# See https://qemu-project.gitlab.io/qemu/devel/ci.html#custom-ci-cd-variables
# for more information.
#
include:
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'
.native_build_job_template: &native_build_job_definition
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- mkdir build
- cd build
- if test -n "$TARGETS";
then
../configure --enable-werror --disable-docs $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror --disable-docs $CONFIGURE_ARGS ;
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
meson configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
- make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS";
then
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
.native_test_job_template: &native_test_job_definition
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- scripts/git-submodule.sh update
$(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
- cd build
- find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":"
- make NINJA=":" $MAKE_CHECK_ARGS
.acceptance_template: &acceptance_definition
cache:
key: "${CI_JOB_NAME}-cache"
paths:
- ${CI_PROJECT_DIR}/avocado-cache
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 2 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- mkdir -p ~/.config/avocado
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
- echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
>> ~/.config/avocado/avocado.conf
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
>> ~/.config/avocado/avocado.conf
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
fi
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
after_script:
- cd build
- du -chs ${CI_PROJECT_DIR}/avocado-cache
build-system-alpine:
<<: *native_build_job_definition
needs:
- job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
moxie-softmmu microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine:
<<: *native_test_job_definition
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check
acceptance-system-alpine:
<<: *native_test_job_definition
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-system-ubuntu:
<<: *native_build_job_definition
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-slirp=system
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
moxie-softmmu microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu:
<<: *native_test_job_definition
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
acceptance-system-ubuntu:
<<: *native_test_job_definition
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-system-debian:
<<: *native_build_job_definition
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian:
<<: *native_test_job_definition
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check
acceptance-system-debian:
<<: *native_test_job_definition
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-system-fedora:
<<: *native_build_job_definition
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
--enable-fdt=system --enable-slirp=system --enable-capstone=system
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora:
<<: *native_test_job_definition
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-system-fedora:
<<: *native_test_job_definition
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-system-centos:
<<: *native_build_job_definition
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
--enable-modules --enable-trace-backends=dtrace
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos:
<<: *native_test_job_definition
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check
acceptance-system-centos:
<<: *native_test_job_definition
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-system-opensuse:
<<: *native_build_job_definition
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse:
<<: *native_test_job_definition
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check
acceptance-system-opensuse:
<<: *native_test_job_definition
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-disabled:
<<: *native_build_job_definition
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS:
--disable-attr
--disable-auth-pam
--disable-avx2
--disable-bochs
--disable-brlapi
--disable-bzip2
--disable-cap-ng
--disable-capstone
--disable-cloop
--disable-coroutine-pool
--disable-curl
--disable-curses
--disable-dmg
--disable-docs
--disable-gcrypt
--disable-glusterfs
--disable-gnutls
--disable-gtk
--disable-guest-agent
--disable-iconv
--disable-keyring
--disable-kvm
--disable-libiscsi
--disable-libpmem
--disable-libssh
--disable-libudev
--disable-libusb
--disable-libxml2
--disable-linux-aio
--disable-live-block-migration
--disable-lzo
--disable-malloc-trim
--disable-mpath
--disable-nettle
--disable-numa
--disable-opengl
--disable-parallels
--disable-pie
--disable-qcow1
--disable-qed
--disable-qom-cast-debug
--disable-rbd
--disable-rdma
--disable-replication
--disable-sdl
--disable-seccomp
--disable-sheepdog
--disable-slirp
--disable-smartcard
--disable-snappy
--disable-sparse
--disable-spice
--disable-strip
--disable-tpm
--disable-usb-redir
--disable-vdi
--disable-vhost-crypto
--disable-vhost-net
--disable-vhost-scsi
--disable-vhost-kernel
--disable-vhost-user
--disable-vhost-vdpa
--disable-vhost-vsock
--disable-virglrenderer
--disable-vnc
--disable-vte
--disable-vvfat
--disable-xen
--disable-zstd
TARGETS: arm-softmmu i386-softmmu ppc64-softmmu mips64-softmmu
s390x-softmmu i386-linux-user
MAKE_CHECK_ARGS: check-qtest SPEED=slow
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
# the configure script. The container doesn't contain Xen headers so
# Xen accelerator is not detected / selected. As result it build the
# i386-softmmu and x86_64-softmmu with KVM being the single accelerator
# available.
# Also use a different coroutine implementation (which is only really of
# interest to KVM users, i.e. with TCG disabled)
build-tcg-disabled:
<<: *native_build_job_definition
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
script:
- mkdir build
- cd build
- ../configure --disable-tcg --audio-drv-list="" --with-coroutine=ucontext
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make check-unit
- make check-qapi-schema
- cd tests/qemu-iotests/
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
170 171 183 184 192 194 197 208 215 221 222 226 227 236 253 277
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
124 132 139 142 144 145 151 152 155 157 165 194 196 197 200 202
208 209 215 216 218 222 227 234 246 247 248 250 254 255 257 258
260 261 262 263 264 270 272 273 277 279
build-user:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system
MAKE_CHECK_ARGS: check-tcg
build-user-static:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system --static
MAKE_CHECK_ARGS: check-tcg
# Only build the softmmu targets we have check-tcg tests for
build-some-softmmu:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --enable-debug
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
MAKE_CHECK_ARGS: check-tcg
# Run check-tcg against linux-user (with plugins)
# we skip sparc64-linux-user until it has been fixed somewhat
# we skip cris-linux-user as it doesn't use the common run loop
build-user-plugins:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system --enable-plugins --enable-debug-tcg --target-list-exclude=sparc64-linux-user,cris-linux-user
MAKE_CHECK_ARGS: check-tcg
timeout: 1h 30m
build-user-centos7:
<<: *native_build_job_definition
needs:
job: amd64-centos7-container
variables:
IMAGE: centos7
CONFIGURE_ARGS: --disable-system --disable-tools --disable-docs
MAKE_CHECK_ARGS: check-tcg
build-some-softmmu-plugins:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-user --enable-plugins --enable-debug-tcg
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
MAKE_CHECK_ARGS: check-tcg
clang-system:
<<: *native_build_job_definition
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-qtest check-tcg
clang-user:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
MAKE_CHECK_ARGS: check-unit check-tcg
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
# On gitlab runners, default value sometimes end up calling 2 lds concurrently and
# triggers an Out-Of-Memory error
#
# Since slirp callbacks are used in QEMU Timers, slirp needs to be compiled together
# with QEMU and linked as a static library to avoid false positives in CFI checks.
# This can be accomplished by using -enable-slirp=git, which avoids the use of
# a system-wide version of the library
#
# Split in three sets of build/check/acceptance to limit the execution time of each
# job
build-cfi-aarch64:
<<: *native_build_job_definition
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: aarch64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-aarch64:
<<: *native_test_job_definition
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-aarch64:
<<: *native_test_job_definition
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-cfi-ppc64-s390x:
<<: *native_build_job_definition
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: ppc64-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-ppc64-s390x:
<<: *native_test_job_definition
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-ppc64-s390x:
<<: *native_test_job_definition
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
build-cfi-x86_64:
<<: *native_build_job_definition
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git
TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64:
<<: *native_test_job_definition
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-x86_64:
<<: *native_test_job_definition
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
<<: *acceptance_definition
tsan-build:
<<: *native_build_job_definition
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --enable-fdt=system --enable-slirp=system
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# These targets are on the way out
build-deprecated:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools
MAKE_CHECK_ARGS: build-tcg
TARGETS: ppc64abi32-linux-user lm32-softmmu unicore32-softmmu
artifacts:
expire_in: 2 days
paths:
- build
# We split the check-tcg step as test failures are expected but we still
# want to catch the build breaking.
check-deprecated:
<<: *native_test_job_definition
needs:
- job: build-deprecated
artifacts: true
variables:
IMAGE: debian-all-test-cross
MAKE_CHECK_ARGS: check-tcg
allow_failure: true
# gprof/gcov are GCC features
gprof-gcov:
<<: *native_build_job_definition
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-gprof --enable-gcov
MAKE_CHECK_ARGS: check
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
timeout: 70m
after_script:
- ${CI_PROJECT_DIR}/scripts/ci/coverage-summary.sh
build-oss-fuzz:
<<: *native_build_job_definition
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
script:
- mkdir build-oss-fuzz
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
./scripts/oss-fuzz/build.sh
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
| grep -v slirp); do
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
echo Testing ${fuzzer} ... ;
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
done
# Unrelated to fuzzer: run some tests with -fsanitize=address
- cd build-oss-fuzz && make check-qtest-i386 check-unit
build-tci:
<<: *native_build_job_definition
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
script:
- TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64"
- mkdir build
- cd build
- ../configure --enable-tcg-interpreter
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" || { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
- for tg in $TARGETS ; do
export QTEST_QEMU_BINARY="./qemu-system-${tg}" ;
./tests/qtest/boot-serial-test || exit 1 ;
./tests/qtest/cdrom-test || exit 1 ;
done
- QTEST_QEMU_BINARY="./qemu-system-x86_64" ./tests/qtest/pxe-test
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
build-coroutine-sigaltstack:
<<: *native_build_job_definition
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
--enable-trace-backends=ftrace
MAKE_CHECK_ARGS: check-unit
# Most jobs test latest gcrypt or nettle builds
#
# These jobs test old gcrypt and nettle from RHEL7
# which had some API differences.
crypto-old-nettle:
<<: *native_build_job_definition
needs:
job: amd64-centos7-container
variables:
IMAGE: centos7
TARGETS: x86_64-softmmu x86_64-linux-user
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle
MAKE_CHECK_ARGS: check
crypto-old-gcrypt:
<<: *native_build_job_definition
needs:
job: amd64-centos7-container
variables:
IMAGE: centos7
TARGETS: x86_64-softmmu x86_64-linux-user
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt
MAKE_CHECK_ARGS: check
crypto-only-gnutls:
<<: *native_build_job_definition
needs:
job: amd64-centos7-container
variables:
IMAGE: centos7
TARGETS: x86_64-softmmu x86_64-linux-user
CONFIGURE_ARGS: --disable-nettle --disable-gcrypt --enable-gnutls
MAKE_CHECK_ARGS: check
# Check our reduced build configurations
build-without-default-devices:
<<: *native_build_job_definition
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --without-default-devices --disable-user
build-without-default-features:
<<: *native_build_job_definition
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --without-default-features --disable-user
--target-list-exclude=arm-softmmu,i386-softmmu,mipsel-softmmu,mips64-softmmu,ppc-softmmu
MAKE_CHECK_ARGS: check-unit
check-patch:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
needs:
job: amd64-centos8-container
script: .gitlab-ci.d/check-patch.py
except:
variables:
- $CI_PROJECT_NAMESPACE == 'qemu-project' && $CI_COMMIT_BRANCH == 'master'
variables:
GIT_DEPTH: 1000
allow_failure: true
check-dco:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
needs:
job: amd64-centos8-container
script: .gitlab-ci.d/check-dco.py
except:
variables:
- $CI_PROJECT_NAMESPACE == 'qemu-project' && $CI_COMMIT_BRANCH == 'master'
variables:
GIT_DEPTH: 1000
build-libvhost-user:
stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
needs:
job: amd64-fedora-container
before_script:
- dnf install -y meson ninja-build
script:
- mkdir subprojects/libvhost-user/build
- cd subprojects/libvhost-user/build
- meson
- ninja
# No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian:
<<: *native_build_job_definition
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-unit check-softfloat ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT
pages:
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
stage: test
needs:
- job: build-tools-and-docs-debian
script:
- mkdir -p public
# HTML-ised source tree
- make gtags
- htags -anT --tree-view=filetree -m qemu_init
-t "Welcome to the QEMU sourcecode"
- mv HTML public/src
# Project documentation
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
paths:
- public
- local: '/.gitlab-ci.d/qemu-project.yml'

View File

@ -0,0 +1,64 @@
<!--
This is the upstream QEMU issue tracker.
If you are able to, it will greatly facilitate bug triage if you attempt
to reproduce the problem with the latest qemu.git master built from
source. See https://www.qemu.org/download/#source for instructions on
how to do this.
QEMU generally supports the last two releases advertised on
https://www.qemu.org/. Problems with distro-packaged versions of QEMU
older than this should be reported to the distribution instead.
See https://www.qemu.org/contribute/report-a-bug/ for additional
guidance.
If this is a security issue, please consult
https://www.qemu.org/contribute/security-process/
-->
## Host environment
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
- OS/kernel version: (For POSIX hosts, use `uname -a`)
- Architecture: (x86, ARM, s390x, etc.)
- QEMU flavor: (qemu-system-x86_64, qemu-aarch64, qemu-img, etc.)
- QEMU version: (e.g. `qemu-system-x86_64 --version`)
- QEMU command line:
<!--
Give the smallest, complete command line that exhibits the problem.
If you are using libvirt, virsh, or vmm, you can likely find the QEMU
command line arguments in /var/log/libvirt/qemu/$GUEST.log.
-->
```
./qemu-system-x86_64 -M q35 -m 4096 -enable-kvm -hda fedora32.qcow2
```
## Emulated/Virtualized environment
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
- OS/kernel version: (For POSIX guests, use `uname -a`.)
- Architecture: (x86, ARM, s390x, etc.)
## Description of problem
<!-- Describe the problem, including any error/crash messages seen. -->
## Steps to reproduce
1.
2.
3.
## Additional information
<!--
Attach logs, stack traces, screenshots, etc. Compress the files if necessary.
If using libvirt, libvirt logs and XML domain information may be relevant.
-->
<!--
The line below ensures that proper tags are added to the issue.
Please do not remove it.
-->
/label ~"kind::Bug"

View File

@ -0,0 +1,32 @@
<!--
This is the upstream QEMU issue tracker.
Please note that QEMU, like most open source projects, relies on
contributors who have motivation, skills and available time to work on
implementing particular features.
Feature requests can be helpful for determining demand and interest, but
they are not a guarantee that a contributor will volunteer to implement
it. We welcome and encourage even draft patches to implement a feature
be sent to the mailing list where it can be discussed and developed
further by the community.
Thank you for your interest in helping us to make QEMU better!
-->
## Goal
<!-- Describe the final result you want to achieve. Avoid design specifics. -->
## Technical details
<!-- Describe technical details, design specifics, suggestions, versions, etc. -->
## Additional information
<!-- Patch or branch references, any other useful information -->
<!--
The line below ensures that proper tags are added to the issue.
Please do not remove it.
-->
/label ~"kind::Feature Request"

View File

@ -27,6 +27,10 @@ Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-714662
Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
# Corrupted Author fields
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
# There is also a:
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
# for the cvs2svn initialization commit e63c3dc74bf.
@ -96,6 +100,7 @@ Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Gonglei (Arei) <arei.gonglei@huawei.com>
Guang Wang <wang.guang55@zte.com.cn>
Hailiang Zhang <zhang.zhanghailiang@huawei.com>
Hanna Reitz <hreitz@redhat.com> <mreitz@redhat.com>
Hervé Poussineau <hpoussin@reactos.org>
Jakub Jermář <jakub@jermar.eu>
Jakub Jermář <jakub.jermar@kernkonzept.com>

View File

@ -88,7 +88,7 @@ email:
more information:
{{ logtext }}
{% elif test == "docker-mingw@fedora" or test == "docker-quick@centos7" or test == "asan" %}
{% elif test == "docker-mingw@fedora" or test == "docker-quick@centos8" or test == "asan" %}
Hi,
This series failed the {{ test }} build test. Please find the testing commands and
@ -124,13 +124,13 @@ testing:
script: |
#!/bin/bash
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
docker-quick@centos7:
docker-quick@centos8:
enabled: false
requirements: docker,x86_64
timeout: 3600
script: |
#!/bin/bash
time make docker-test-quick@centos7 SHOW_ENV=1 J=14 NETWORK=1
time make docker-test-quick@centos8 SHOW_ENV=1 J=14 NETWORK=1
checkpatch:
enabled: true
requirements: ''
@ -138,9 +138,6 @@ testing:
script: |
#!/bin/bash
git rev-parse base > /dev/null || exit 0
git config --local diff.renamelimit 0
git config --local diff.renames True
git config --local diff.algorithm histogram
./scripts/checkpatch.pl --mailback base..
docker-mingw@fedora:
enabled: true

View File

@ -27,6 +27,7 @@ addons:
- libattr1-dev
- libbrlapi-dev
- libcap-ng-dev
- libcacard-dev
- libgcc-7-dev
- libgnutls28-dev
- libgtk-3-dev
@ -34,7 +35,6 @@ addons:
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
@ -129,6 +129,7 @@ jobs:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
@ -137,7 +138,6 @@ jobs:
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
@ -163,6 +163,7 @@ jobs:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
@ -171,7 +172,6 @@ jobs:
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
@ -196,6 +196,7 @@ jobs:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
@ -204,7 +205,6 @@ jobs:
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
@ -238,6 +238,7 @@ jobs:
apt_packages:
- libaio-dev
- libattr1-dev
- libcacard-dev
- libcap-ng-dev
- libgnutls28-dev
- libiscsi-dev
@ -245,7 +246,6 @@ jobs:
- liblzo2-dev
- libncurses-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libsdl2-dev
- libsdl2-image-dev
@ -281,6 +281,7 @@ jobs:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
@ -289,7 +290,6 @@ jobs:
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libpng-dev
- librados-dev

View File

@ -1,5 +1,6 @@
source Kconfig.host
source backends/Kconfig
source accel/Kconfig
source target/Kconfig
source hw/Kconfig
source semihosting/Kconfig

View File

@ -87,7 +87,7 @@ S390 general architecture support
M: Cornelia Huck <cohuck@redhat.com>
M: Thomas Huth <thuth@redhat.com>
S: Supported
F: default-configs/*/s390x-softmmu.mak
F: configs/devices/s390x-softmmu/default.mak
F: gdb-xml/s390*.xml
F: hw/char/sclp*.[hc]
F: hw/char/terminal3270.c
@ -128,7 +128,6 @@ F: docs/devel/decodetree.rst
F: include/exec/cpu*.h
F: include/exec/exec-all.h
F: include/exec/helper*.h
F: include/exec/tb-hash.h
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
F: include/hw/core/tcg-cpu-ops.h
@ -156,6 +155,7 @@ S: Maintained
F: target/arm/
F: tests/tcg/arm/
F: tests/tcg/aarch64/
F: tests/qtest/arm-cpu-features.c
F: hw/arm/
F: hw/cpu/a*mpcore.c
F: include/hw/cpu/a*mpcore.h
@ -171,6 +171,7 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/smmu*
F: include/hw/arm/smmu*
F: tests/acceptance/smmu.py
AVR TCG CPUs
M: Michael Rolnik <mrolnik@gmail.com>
@ -196,7 +197,9 @@ F: target/hexagon/
F: linux-user/hexagon/
F: tests/tcg/hexagon/
F: disas/hexagon.c
F: default-configs/targets/hexagon-linux-user.mak
F: configs/targets/hexagon-linux-user/default.mak
F: docker/dockerfiles/debian-hexagon-cross.docker
F: docker/dockerfiles/debian-hexagon-cross.docker.d/build-toolchain.sh
HPPA (PA-RISC) TCG CPUs
M: Richard Henderson <richard.henderson@linaro.org>
@ -207,19 +210,6 @@ F: disas/hppa.c
F: hw/net/*i82596*
F: include/hw/net/lasi_82596.h
LM32 TCG CPUs
R: Michael Walle <michael@walle.cc>
S: Orphan
F: target/lm32/
F: disas/lm32.c
F: hw/lm32/
F: hw/*/lm32_*
F: hw/*/milkymist-*
F: include/hw/display/milkymist_tmu2.h
F: include/hw/char/lm32_juart.h
F: include/hw/lm32/
F: tests/tcg/lm32/
M68K TCG CPUs
M: Laurent Vivier <laurent@vivier.eu>
S: Maintained
@ -240,7 +230,7 @@ R: Jiaxun Yang <jiaxun.yang@flygoat.com>
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
S: Odd Fixes
F: target/mips/
F: default-configs/*/*mips*
F: configs/devices/mips*/*
F: disas/mips.c
F: docs/system/cpu-models-mips.rst.inc
F: hw/intc/mips_gic.c
@ -257,14 +247,7 @@ K: ^Subject:.*(?i)mips
MIPS TCG CPUs (nanoMIPS ISA)
S: Orphan
F: disas/nanomips.*
Moxie TCG CPUs
M: Anthony Green <green@moxielogic.com>
S: Maintained
F: target/moxie/
F: disas/moxie.c
F: hw/moxie/
F: default-configs/*/moxie-softmmu.mak
F: target/mips/tcg/*nanomips*
NiosII TCG CPUs
M: Chris Wulff <crwulff@gmail.com>
@ -273,7 +256,7 @@ S: Maintained
F: target/nios2/
F: hw/nios2/
F: disas/nios2.c
F: default-configs/*/nios2-softmmu.mak
F: configs/devices/nios2-softmmu/default.mak
OpenRISC TCG CPUs
M: Stafford Horne <shorne@gmail.com>
@ -295,9 +278,8 @@ F: tests/acceptance/machine_ppc.py
RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <Alistair.Francis@wdc.com>
M: Sagar Karandikar <sagark@eecs.berkeley.edu>
M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
M: Alistair Francis <alistair.francis@wdc.com>
M: Bin Meng <bin.meng@windriver.com>
L: qemu-riscv@nongnu.org
S: Supported
F: target/riscv/
@ -316,6 +298,8 @@ M: Richard Henderson <richard.henderson@linaro.org>
M: David Hildenbrand <david@redhat.com>
S: Maintained
F: target/s390x/
F: target/s390x/tcg
F: target/s390x/cpu_models_*.[ch]
F: hw/s390x/
F: disas/s390.c
F: tests/tcg/s390x/
@ -339,24 +323,17 @@ F: hw/sparc64/
F: include/hw/sparc/sparc64.h
F: disas/sparc.c
UniCore32 TCG CPUs
M: Guan Xuetao <gxt@mprc.pku.edu.cn>
S: Maintained
F: target/unicore32/
F: hw/unicore32/
F: include/hw/unicore32/
X86 TCG CPUs
M: Paolo Bonzini <pbonzini@redhat.com>
M: Richard Henderson <richard.henderson@linaro.org>
M: Eduardo Habkost <ehabkost@redhat.com>
S: Maintained
F: target/i386/
F: target/i386/tcg/
F: tests/tcg/i386/
F: tests/tcg/x86_64/
F: hw/i386/
F: disas/i386.c
F: docs/system/cpu-models-x86.rst.inc
F: docs/system/cpu-models-x86*
T: git https://gitlab.com/ehabkost/qemu.git x86-next
Xtensa TCG CPUs
@ -368,7 +345,7 @@ F: hw/xtensa/
F: tests/tcg/xtensa/
F: disas/xtensa.c
F: include/hw/xtensa/xtensa-isa.h
F: default-configs/*/xtensa*.mak
F: configs/devices/xtensa*/default.mak
TriCore TCG CPUs
M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
@ -376,6 +353,7 @@ S: Maintained
F: target/tricore/
F: hw/tricore/
F: include/hw/tricore/
F: tests/tcg/tricore/
Multiarch Linux User Tests
M: Alex Bennée <alex.bennee@linaro.org>
@ -404,7 +382,8 @@ F: target/arm/kvm.c
MIPS KVM CPUs
M: Huacai Chen <chenhuacai@kernel.org>
S: Odd Fixes
F: target/mips/kvm.c
F: target/mips/kvm*
F: target/mips/sysemu/
PPC KVM CPUs
M: David Gibson <david@gibson.dropbear.id.au>
@ -417,9 +396,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
M: Cornelia Huck <cohuck@redhat.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
S: Supported
F: target/s390x/kvm.c
F: target/s390x/kvm_s390x.h
F: target/s390x/kvm-stub.c
F: target/s390x/kvm/
F: target/s390x/ioinst.[ch]
F: target/s390x/machine.c
F: target/s390x/sigp.c
@ -462,7 +439,15 @@ M: Roman Bolshakov <r.bolshakov@yadro.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: target/i386/hvf/
HVF
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: accel/hvf/
F: include/sysemu/hvf.h
F: include/sysemu/hvf_int.h
WHPX CPUs
M: Sunil Muthuswamy <sunilmut@microsoft.com>
@ -509,6 +494,15 @@ F: accel/stubs/hax-stub.c
F: include/sysemu/hax.h
F: target/i386/hax/
Guest CPU Cores (NVMM)
----------------------
NetBSD Virtual Machine Monitor (NVMM) CPU support
M: Kamil Rytarowski <kamil@netbsd.org>
M: Reinoud Zandijk <reinoud@netbsd.org>
S: Maintained
F: include/sysemu/nvmm.h
F: target/i386/nvmm/
Hosts
-----
LINUX
@ -529,6 +523,8 @@ F: include/qemu/*posix*.h
NETBSD
M: Kamil Rytarowski <kamil@netbsd.org>
M: Reinoud Zandijk <reinoud@netbsd.org>
M: Ryo ONODERA <ryoon@netbsd.org>
S: Maintained
K: ^Subject:.*(?i)NetBSD
@ -564,6 +560,7 @@ S: Odd Fixes
F: hw/*/allwinner*
F: include/hw/*/allwinner*
F: hw/arm/cubieboard.c
F: docs/system/arm/cubieboard.rst
Allwinner-h3
M: Niek Linnenbank <nieklinnenbank@gmail.com>
@ -646,6 +643,7 @@ L: qemu-arm@nongnu.org
S: Odd Fixes
F: hw/arm/highbank.c
F: hw/net/xgmac.c
F: docs/system/arm/highbank.rst
Canon DIGIC
M: Antony Pavlov <antonynpavlov@gmail.com>
@ -686,6 +684,7 @@ F: hw/watchdog/wdt_imx2.c
F: include/hw/arm/fsl-imx25.h
F: include/hw/misc/imx25_ccm.h
F: include/hw/watchdog/wdt_imx2.h
F: docs/system/arm/imx25-pdk.rst
i.MX31 (kzm)
M: Peter Maydell <peter.maydell@linaro.org>
@ -696,6 +695,7 @@ F: hw/*/imx_*
F: hw/*/*imx31*
F: include/hw/*/imx_*
F: include/hw/*/*imx31*
F: docs/system/arm/kzm.rst
Integrator CP
M: Peter Maydell <peter.maydell@linaro.org>
@ -788,7 +788,6 @@ F: roms/vbootrom
F: docs/system/arm/nuvoton.rst
nSeries
M: Andrzej Zaborowski <balrogg@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Odd Fixes
@ -799,13 +798,13 @@ F: hw/input/tsc2005.c
F: hw/misc/cbus.c
F: hw/rtc/twl92230.c
F: include/hw/display/blizzard.h
F: include/hw/input/lm832x.h
F: include/hw/input/tsc2xxx.h
F: include/hw/misc/cbus.h
F: tests/acceptance/machine_arm_n8x0.py
F: docs/system/arm/nseries.rst
Palm
M: Andrzej Zaborowski <balrogg@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Odd Fixes
@ -838,7 +837,6 @@ F: include/hw/intc/realview_gic.h
F: docs/system/arm/realview.rst
PXA2XX
M: Andrzej Zaborowski <balrogg@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Odd Fixes
@ -851,12 +849,13 @@ F: hw/display/tc6393xb.c
F: hw/gpio/max7310.c
F: hw/gpio/zaurus.c
F: hw/misc/mst_fpga.c
F: hw/misc/max111x.c
F: include/hw/misc/max111x.h
F: hw/adc/max111x.c
F: include/hw/adc/max111x.h
F: include/hw/arm/pxa.h
F: include/hw/arm/sharpsl.h
F: include/hw/display/tc6393xb.h
F: docs/system/arm/xscale.rst
F: docs/system/arm/mainstone.rst
SABRELITE / i.MX6
M: Peter Maydell <peter.maydell@linaro.org>
@ -898,6 +897,13 @@ F: hw/*/stellaris*
F: include/hw/input/gamepad.h
F: docs/system/arm/stellaris.rst
STM32VLDISCOVERY
M: Alexandre Iooss <erdnaxe@crans.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/stm32vldiscovery.c
F: docs/system/arm/stm32.rst
Versatile Express
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
@ -930,8 +936,10 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/*/xilinx_*
F: hw/*/cadence_*
F: hw/misc/zynq*
F: include/hw/misc/zynq*
F: hw/misc/zynq_slcr.c
F: hw/adc/zynq-xadc.c
F: include/hw/misc/zynq_slcr.h
F: include/hw/adc/zynq-xadc.h
X: hw/ssi/xilinx_*
Xilinx ZynqMP and Versal
@ -953,6 +961,12 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
STM32F100
M: Alexandre Iooss <erdnaxe@crans.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/stm32f100_soc.c
STM32F205
M: Alistair Francis <alistair@alistair23.me>
M: Peter Maydell <peter.maydell@linaro.org>
@ -1011,6 +1025,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/msf2-som.c
F: docs/system/arm/emcraft-sf2.rst
ASPEED BMCs
M: Cédric Le Goater <clg@kaod.org>
@ -1026,6 +1041,7 @@ F: include/hw/misc/pca9552*.h
F: hw/net/ftgmac100.c
F: include/hw/net/ftgmac100.h
F: docs/system/arm/aspeed.rst
F: tests/qtest/*aspeed*
NRF51
M: Joel Stanley <joel@jms.id.au>
@ -1037,6 +1053,7 @@ F: hw/*/microbit*.c
F: include/hw/*/nrf51*.h
F: include/hw/*/microbit*.h
F: tests/qtest/microbit-test.c
F: docs/system/arm/nrf.rst
AVR Machines
-------------
@ -1044,7 +1061,7 @@ AVR Machines
AVR MCUs
M: Michael Rolnik <mrolnik@gmail.com>
S: Maintained
F: default-configs/*/avr-softmmu.mak
F: configs/devices/avr-softmmu/default.mak
F: hw/avr/
F: include/hw/char/avr_usart.h
F: hw/char/avr_usart.c
@ -1072,22 +1089,10 @@ HP B160L
M: Richard Henderson <richard.henderson@linaro.org>
R: Helge Deller <deller@gmx.de>
S: Odd Fixes
F: default-configs/*/hppa-softmmu.mak
F: configs/devices/hppa-softmmu/default.mak
F: hw/hppa/
F: pc-bios/hppa-firmware.img
LM32 Machines
-------------
EVR32 and uclinux BSP
R: Michael Walle <michael@walle.cc>
S: Orphan
F: hw/lm32/lm32_boards.c
milkymist
R: Michael Walle <michael@walle.cc>
S: Orphan
F: hw/lm32/milkymist.c
M68K Machines
-------------
an5206
@ -1193,6 +1198,7 @@ F: hw/isa/vt82c686.c
F: hw/pci-host/bonito.c
F: hw/usb/vt82c686-uhci-pci.c
F: include/hw/isa/vt82c686.h
F: tests/acceptance/machine_mips_fuloong2e.py
Loongson-3 virtual platforms
M: Huacai Chen <chenhuacai@kernel.org>
@ -1302,7 +1308,7 @@ S: Maintained
F: hw/ppc/prep.c
F: hw/ppc/prep_systemio.c
F: hw/ppc/rs6000_mc.c
F: hw/pci-host/prep.[hc]
F: hw/pci-host/raven.c
F: hw/isa/i82378.c
F: hw/isa/pc87312.c
F: hw/dma/i82374.c
@ -1364,6 +1370,28 @@ F: pc-bios/canyonlands.dt[sb]
F: pc-bios/u-boot-sam460ex-20100605.bin
F: roms/u-boot-sam460ex
pegasos2
M: BALATON Zoltan <balaton@eik.bme.hu>
R: David Gibson <david@gibson.dropbear.id.au>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/pegasos2.c
F: hw/pci-host/mv64361.c
F: hw/pci-host/mv643xx.h
F: include/hw/pci-host/mv64361.h
Virtual Open Firmware (VOF)
M: Alexey Kardashevskiy <aik@ozlabs.ru>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/spapr_vof*
F: hw/ppc/vof*
F: include/hw/ppc/vof*
F: pc-bios/vof/*
F: pc-bios/vof*
RISC-V Machines
---------------
OpenTitan
@ -1371,11 +1399,9 @@ M: Alistair Francis <Alistair.Francis@wdc.com>
L: qemu-riscv@nongnu.org
S: Supported
F: hw/riscv/opentitan.c
F: hw/char/ibex_uart.c
F: hw/intc/ibex_plic.c
F: hw/*/ibex_*.c
F: include/hw/riscv/opentitan.h
F: include/hw/char/ibex_uart.h
F: include/hw/intc/ibex_plic.h
F: include/hw/*/ibex_*.h
Microchip PolarFire SoC Icicle Kit
M: Bin Meng <bin.meng@windriver.com>
@ -1392,6 +1418,15 @@ F: include/hw/misc/mchp_pfsoc_dmc.h
F: include/hw/misc/mchp_pfsoc_ioscb.h
F: include/hw/misc/mchp_pfsoc_sysreg.h
Shakti C class SoC
M: Vijai Kumar K <vijai@behindbytes.com>
L: qemu-riscv@nongnu.org
S: Supported
F: hw/riscv/shakti_c.c
F: hw/char/shakti_uart.c
F: include/hw/riscv/shakti_c.h
F: include/hw/char/shakti_uart.h
SiFive Machines
M: Alistair Francis <Alistair.Francis@wdc.com>
M: Bin Meng <bin.meng@windriver.com>
@ -1493,7 +1528,7 @@ F: hw/s390x/
F: include/hw/s390x/
F: hw/watchdog/wdt_diag288.c
F: include/hw/watchdog/wdt_diag288.h
F: default-configs/*/s390x-softmmu.mak
F: configs/devices/s390x-softmmu/default.mak
F: tests/acceptance/machine_s390_ccw_virtio.py
T: git https://gitlab.com/cohuck/qemu.git s390-next
T: git https://github.com/borntraeger/qemu.git s390-next
@ -1518,14 +1553,6 @@ F: hw/s390x/s390-pci*
F: include/hw/s390x/s390-pci*
L: qemu-s390x@nongnu.org
UniCore32 Machines
------------------
PKUnity-3 SoC initramfs-with-busybox
M: Guan Xuetao <gxt@mprc.pku.edu.cn>
S: Maintained
F: hw/*/puv3*
F: hw/unicore32/
X86 Machines
------------
PC
@ -1676,6 +1703,9 @@ M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: hw/block/fdc.c
F: hw/block/fdc-internal.h
F: hw/block/fdc-isa.c
F: hw/block/fdc-sysbus.c
F: include/hw/block/fdc.h
F: tests/qtest/fdc-test.c
T: git https://gitlab.com/jsnow/qemu.git ide
@ -1705,7 +1735,6 @@ F: hw/pci-bridge/*
F: qapi/pci.json
F: docs/pci*
F: docs/specs/*pci*
F: default-configs/pci.mak
ACPI/SMBIOS
M: Michael S. Tsirkin <mst@redhat.com>
@ -1798,21 +1827,21 @@ F: include/hw/sd/sd*
F: hw/sd/core.c
F: hw/sd/sd*
F: hw/sd/ssi-sd.c
F: tests/qtest/sd*
F: tests/qtest/fuzz-sdcard-test.c
F: tests/qtest/sdhci-test.c
USB
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
S: Odd Fixes
F: hw/usb/*
F: stubs/usb-dev-stub.c
F: tests/qtest/usb-*-test.c
F: docs/usb2.txt
F: docs/usb-storage.txt
F: docs/system/devices/usb.rst
F: include/hw/usb.h
F: include/hw/usb/
F: default-configs/usb.mak
USB (serial adapter)
M: Gerd Hoffmann <kraxel@redhat.com>
R: Gerd Hoffmann <kraxel@redhat.com>
M: Samuel Thibault <samuel.thibault@ens-lyon.org>
S: Maintained
F: hw/usb/dev-serial.c
@ -1823,6 +1852,7 @@ S: Supported
F: hw/vfio/*
F: include/hw/vfio/
F: docs/igd-assign.txt
F: docs/devel/vfio-migration.rst
vfio-ccw
M: Cornelia Huck <cohuck@redhat.com>
@ -1881,6 +1911,7 @@ virtio-9p
M: Greg Kurz <groug@kaod.org>
M: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Odd Fixes
W: https://wiki.qemu.org/Documentation/9p
F: hw/9pfs/
X: hw/9pfs/xen-9p*
F: fsdev/
@ -1920,7 +1951,7 @@ L: virtio-fs@redhat.com
virtio-input
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
S: Odd Fixes
F: hw/input/vhost-user-input.c
F: hw/input/virtio-input*.c
F: include/hw/virtio/virtio-input.h
@ -1951,6 +1982,15 @@ F: include/sysemu/rng*.h
F: backends/rng*.c
F: tests/qtest/virtio-rng-test.c
vhost-user-rng
M: Mathieu Poirier <mathieu.poirier@linaro.org>
S: Supported
F: docs/tools/vhost-user-rng.rst
F: hw/virtio/vhost-user-rng.c
F: hw/virtio/vhost-user-rng-pci.c
F: include/hw/virtio/vhost-user-rng.h
F: tools/vhost-user-rng/*
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported
@ -1972,7 +2012,7 @@ M: Keith Busch <kbusch@kernel.org>
M: Klaus Jensen <its@irrelevant.dk>
L: qemu-block@nongnu.org
S: Supported
F: hw/block/nvme*
F: hw/nvme/*
F: include/block/nvme.h
F: tests/qtest/nvme-test.c
F: docs/system/nvme.rst
@ -2040,6 +2080,12 @@ S: Maintained
F: hw/net/tulip.c
F: hw/net/tulip.h
pca954x
M: Patrick Venture <venture@google.com>
S: Maintained
F: hw/i2c/i2c_mux_pca954x.c
F: include/hw/i2c/i2c_mux_pca954x.h
Generic Loader
M: Alistair Francis <alistair@alistair23.me>
S: Maintained
@ -2111,7 +2157,7 @@ F: include/hw/display/ramfb.h
virtio-gpu
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
S: Odd Fixes
F: hw/display/virtio-gpu*
F: hw/display/virtio-vga.*
F: include/hw/virtio/virtio-gpu.h
@ -2130,7 +2176,7 @@ F: include/hw/virtio/vhost-user-scsi.h
vhost-user-gpu
M: Marc-André Lureau <marcandre.lureau@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
R: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
F: docs/interop/vhost-user-gpu.rst
F: contrib/vhost-user-gpu
@ -2158,7 +2204,6 @@ F: include/hw/southbridge/piix.h
Firmware configuration (fw_cfg)
M: Philippe Mathieu-Daudé <philmd@redhat.com>
R: Laszlo Ersek <lersek@redhat.com>
R: Gerd Hoffmann <kraxel@redhat.com>
S: Supported
F: docs/specs/fw_cfg.txt
@ -2213,7 +2258,7 @@ Subsystems
----------
Audio
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
S: Odd Fixes
F: audio/
F: hw/audio/
F: include/hw/audio/
@ -2221,10 +2266,11 @@ F: qapi/audio.json
F: tests/qtest/ac97-test.c
F: tests/qtest/es1370-test.c
F: tests/qtest/intel-hda-test.c
F: tests/qtest/fuzz-sb16-test.c
Block layer core
M: Kevin Wolf <kwolf@redhat.com>
M: Max Reitz <mreitz@redhat.com>
M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block*
@ -2405,22 +2451,26 @@ F: tests/tcg/multiarch/gdbstub/
Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
M: Peter Xu <peterx@redhat.com>
M: David Hildenbrand <david@redhat.com>
S: Supported
F: include/exec/ioport.h
F: include/exec/memop.h
F: include/exec/memory.h
F: include/exec/ram_addr.h
F: include/exec/ramblock.h
F: include/sysemu/memory_mapping.h
F: softmmu/dma-helpers.c
F: softmmu/ioport.c
F: softmmu/memory.c
F: softmmu/memory_mapping.c
F: softmmu/physmem.c
F: include/exec/memory-internal.h
F: scripts/coccinelle/memory-region-housekeeping.cocci
SPICE
M: Gerd Hoffmann <kraxel@redhat.com>
S: Supported
S: Odd Fixes
F: include/ui/qemu-spice.h
F: include/ui/spice-display.h
F: ui/spice-*.c
@ -2445,6 +2495,7 @@ F: ui/cocoa.m
Main loop
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: include/exec/gen-icount.h
F: include/qemu/main-loop.h
F: include/sysemu/runstate.h
F: include/sysemu/runstate-action.h
@ -2499,7 +2550,7 @@ S: Maintained
F: net/netmap.c
Host Memory Backends
M: Eduardo Habkost <ehabkost@redhat.com>
M: David Hildenbrand <david@redhat.com>
M: Igor Mammedov <imammedo@redhat.com>
S: Maintained
F: backends/hostmem*.c
@ -2531,6 +2582,13 @@ Benchmark util
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
S: Maintained
F: scripts/simplebench/
T: git https://src.openvz.org/scm/~vsementsov/qemu.git simplebench
Transactions helper
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
S: Maintained
F: include/qemu/transactions.h
F: util/transactions.c
QAPI
M: Markus Armbruster <armbru@redhat.com>
@ -2692,14 +2750,13 @@ F: scripts/tracetool.py
F: scripts/tracetool/
F: scripts/qemu-trace-stap*
F: docs/tools/qemu-trace-stap.rst
F: docs/devel/tracing.txt
F: docs/devel/tracing.rst
T: git https://github.com/stefanha/qemu.git tracing
TPM
M: Stefan Berger <stefanb@linux.ibm.com>
S: Maintained
F: tpm.c
F: stubs/tpm.c
F: hw/tpm/*
F: include/hw/acpi/tpm.h
F: include/sysemu/tpm*
@ -2786,7 +2843,6 @@ F: tests/unit/test-authz-*
Sockets
M: Daniel P. Berrange <berrange@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
F: include/qemu/sockets.h
F: util/qemu-sockets.c
@ -2882,7 +2938,6 @@ F: include/hw/i2c/smbus_slave.h
F: include/hw/i2c/smbus_eeprom.h
Firmware schema specifications
M: Laszlo Ersek <lersek@redhat.com>
M: Philippe Mathieu-Daudé <philmd@redhat.com>
R: Daniel P. Berrange <berrange@redhat.com>
R: Kashyap Chamarthy <kchamart@redhat.com>
@ -2890,9 +2945,10 @@ S: Maintained
F: docs/interop/firmware.json
EDK2 Firmware
M: Laszlo Ersek <lersek@redhat.com>
M: Philippe Mathieu-Daudé <philmd@redhat.com>
R: Gerd Hoffmann <kraxel@redhat.com>
S: Supported
F: hw/i386/*ovmf*
F: pc-bios/descriptors/??-edk2-*.json
F: pc-bios/edk2-*
F: roms/Makefile.edk2
@ -2943,14 +2999,14 @@ M: Warner Losh <imp@bsdimp.com>
R: Kyle Evans <kevans@freebsd.org>
S: Maintained
F: bsd-user/
F: default-configs/targets/*-bsd-user.mak
F: configs/targets/*-bsd-user.mak
T: git https://github.com/qemu-bsd-user/qemu-bsd-user bsd-user-rebase-3.1
Linux user
M: Laurent Vivier <laurent@vivier.eu>
S: Maintained
F: linux-user/
F: default-configs/targets/*linux-user.mak
F: configs/targets/*linux-user.mak
F: scripts/qemu-binfmt-conf.sh
F: scripts/update-syscalltbl.sh
F: scripts/update-mips-syscall-args.sh
@ -2966,6 +3022,8 @@ F: include/tcg/
TCG Plugins
M: Alex Bennée <alex.bennee@linaro.org>
R: Alexandre Iooss <erdnaxe@crans.org>
R: Mahmoud Mandour <ma.mandourr@gmail.com>
S: Maintained
F: docs/devel/tcg-plugins.rst
F: plugins/
@ -2982,7 +3040,7 @@ F: disas/arm-a64.cc
F: disas/libvixl/
ARM TCG target
M: Andrzej Zaborowski <balrogg@gmail.com>
M: Richard Henderson <richard.henderson@linaro.org>
S: Maintained
L: qemu-arm@nongnu.org
F: tcg/arm/
@ -3045,17 +3103,12 @@ S: Supported
F: block/vmdk.c
RBD
M: Jason Dillaman <dillaman@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com>
R: Peter Lieven <pl@kamp.de>
L: qemu-block@nongnu.org
S: Supported
F: block/rbd.c
Sheepdog
M: Liu Yuan <namei.unix@gmail.com>
L: qemu-block@nongnu.org
S: Odd Fixes
F: block/sheepdog.c
VHDX
M: Jeff Cody <codyprime@gmail.com>
L: qemu-block@nongnu.org
@ -3124,6 +3177,7 @@ F: block/null.c
NVMe Block Driver
M: Stefan Hajnoczi <stefanha@redhat.com>
R: Fam Zheng <fam@euphon.net>
R: Philippe Mathieu-Daudé <philmd@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/nvme*
@ -3203,6 +3257,7 @@ Linux io_uring
M: Aarushi Mehta <mehta.aaru20@gmail.com>
M: Julia Suvorova <jusual@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
R: Stefano Garzarella <sgarzare@redhat.com>
L: qemu-block@nongnu.org
S: Maintained
F: block/io_uring.c
@ -3210,7 +3265,7 @@ F: stubs/io_uring.c
qcow2
M: Kevin Wolf <kwolf@redhat.com>
M: Max Reitz <mreitz@redhat.com>
M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/qcow2*
@ -3224,7 +3279,7 @@ F: block/qcow.c
blkdebug
M: Kevin Wolf <kwolf@redhat.com>
M: Max Reitz <mreitz@redhat.com>
M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/blkdebug.c
@ -3254,10 +3309,12 @@ F: block/export/vhost-user-blk-server.c
F: block/export/vhost-user-blk-server.h
F: include/qemu/vhost-user-server.h
F: tests/qtest/libqos/vhost-user-blk.c
F: tests/qtest/libqos/vhost-user-blk.h
F: tests/qtest/vhost-user-blk-test.c
F: util/vhost-user-server.c
FUSE block device exports
M: Max Reitz <mreitz@redhat.com>
M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/export/fuse.c
@ -3312,6 +3369,14 @@ F: include/hw/remote/proxy-memory-listener.h
F: hw/remote/iohub.c
F: include/hw/remote/iohub.h
EBPF:
M: Jason Wang <jasowang@redhat.com>
R: Andrew Melnychenko <andrew@daynix.com>
R: Yuri Benditovich <yuri.benditovich@daynix.com>
S: Maintained
F: ebpf/*
F: tools/ebpf/*
Build and test automation
-------------------------
Build and test automation, general continuous integration
@ -3365,7 +3430,7 @@ Documentation
Build system architecture
M: Daniel P. Berrange <berrange@redhat.com>
S: Odd Fixes
F: docs/devel/build-system.txt
F: docs/devel/build-system.rst
GIT Data Mining Config
M: Alex Bennée <alex.bennee@linaro.org>
@ -3375,7 +3440,7 @@ F: contrib/gitdm/*
Incompatible changes
R: libvir-list@redhat.com
F: docs/system/deprecated.rst
F: docs/about/deprecated.rst
Build System
------------
@ -3394,6 +3459,7 @@ S: Maintained
F: docs/conf.py
F: docs/*/conf.py
F: docs/sphinx/
F: docs/_templates/
Miscellaneous
-------------

View File

@ -14,7 +14,7 @@ SRC_PATH=.
# we have explicit rules for everything
MAKEFLAGS += -rR
SHELL = /usr/bin/env bash -o pipefail
SHELL = bash -o pipefail
# Usage: $(call quiet-command,command and args,"NAME","args to print")
# This will run "command and args", and either:
@ -48,9 +48,11 @@ Makefile: .git-submodule-status
.PHONY: git-submodule-update
git-submodule-update:
ifneq ($(GIT_SUBMODULES_ACTION),ignore)
$(call quiet-command, \
(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
"GIT","$(GIT_SUBMODULES)")
endif
# 0. ensure the build tree is okay
@ -127,9 +129,11 @@ endif
# 4. Rules to bridge to other makefiles
ifneq ($(NINJA),)
MAKE.n = $(findstring n,$(firstword $(MAKEFLAGS)))
MAKE.k = $(findstring k,$(firstword $(MAKEFLAGS)))
MAKE.q = $(findstring q,$(firstword $(MAKEFLAGS)))
# Filter out long options to avoid flags like --no-print-directory which
# may result in false positive match for MAKE.n
MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
@ -213,7 +217,7 @@ qemu-%.tar.bz2:
distclean: clean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak config-host.h*
rm -f config-host.mak config-host.h* config-poison.h
rm -f tests/tcg/config-*.mak
rm -f config-all-disas.mak config.status
rm -f roms/seabios/config.mak roms/vgabios/config.mak

View File

@ -131,16 +131,16 @@ will be tagged as my-feature-v2.
Bug reporting
=============
The QEMU project uses Launchpad as its primary upstream bug tracker. Bugs
The QEMU project uses GitLab issues to track bugs. Bugs
found when running code built from QEMU git or upstream released sources
should be reported via:
* `<https://bugs.launchpad.net/qemu/>`_
* `<https://gitlab.com/qemu-project/qemu/-/issues>`_
If using QEMU via an operating system vendor pre-built binary package, it
is preferable to report bugs to the vendor's own bug tracker first. If
the bug is also known to affect latest upstream code, it can also be
reported via launchpad.
reported via GitLab.
For additional information on bug reporting consult:

View File

@ -1 +1 @@
5.2.95
6.1.0

View File

@ -1,6 +1,9 @@
config WHPX
bool
config NVMM
bool
config HAX
bool

View File

@ -44,7 +44,7 @@ static const TypeInfo accel_type = {
AccelClass *accel_find(const char *opt_name)
{
char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name));
AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
g_free(class_name);
return ac;
}
@ -54,10 +54,23 @@ static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
CPUClass *cc = CPU_CLASS(klass);
AccelCPUClass *accel_cpu = opaque;
/*
* The first callback allows accel-cpu to run initializations
* for the CPU, customizing CPU behavior according to the accelerator.
*
* The second one allows the CPU to customize the accel-cpu
* behavior according to the CPU.
*
* The second is currently only used by TCG, to specialize the
* TCGCPUOps depending on the CPU type.
*/
cc->accel_cpu = accel_cpu;
if (accel_cpu->cpu_class_init) {
accel_cpu->cpu_class_init(cc);
}
if (cc->init_accel_cpu) {
cc->init_accel_cpu(accel_cpu, cc);
}
}
/* initialize the arch-specific accel CpuClass interfaces */
@ -89,6 +102,25 @@ void accel_init_interfaces(AccelClass *ac)
accel_init_cpu_interfaces(ac);
}
void accel_cpu_instance_init(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
cc->accel_cpu->cpu_instance_init(cpu);
}
}
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
return cc->accel_cpu->cpu_realizefn(cpu, errp);
}
return true;
}
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT,

View File

@ -72,7 +72,7 @@ void accel_init_ops_interfaces(AccelClass *ac)
g_assert(ac_name != NULL);
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
ops = ACCEL_OPS_CLASS(object_class_by_name(ops_name));
ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
g_free(ops_name);
/*

471
accel/hvf/hvf-accel-ops.c Normal file
View File

@ -0,0 +1,471 @@
/*
* Copyright 2008 IBM Corporation
* 2008 Red Hat, Inc.
* Copyright 2011 Intel Corporation
* Copyright 2016 Veertu, Inc.
* Copyright 2017 The Android Open Source Project
*
* QEMU Hypervisor.framework support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This file contain code under public domain from the hvdos project:
* https://github.com/mist64/hvdos
*
* Parts Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "sysemu/cpus.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"
#include "sysemu/runstate.h"
#include "qemu/guest-random.h"
HVFState *hvf_state;
/* Memory slots */
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
{
hvf_slot *slot;
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
slot = &hvf_state->slots[x];
if (slot->size && start < (slot->start + slot->size) &&
(start + size) > slot->start) {
return slot;
}
}
return NULL;
}
struct mac_slot {
int present;
uint64_t size;
uint64_t gpa_start;
uint64_t gva;
};
struct mac_slot mac_slots[32];
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
{
struct mac_slot *macslot;
hv_return_t ret;
macslot = &mac_slots[slot->slot_id];
if (macslot->present) {
if (macslot->size != slot->size) {
macslot->present = 0;
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
assert_hvf_ok(ret);
}
}
if (!slot->size) {
return 0;
}
macslot->present = 1;
macslot->gpa_start = slot->start;
macslot->size = slot->size;
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
assert_hvf_ok(ret);
return 0;
}
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
{
hvf_slot *mem;
MemoryRegion *area = section->mr;
bool writeable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
if (!memory_region_is_ram(area)) {
if (writeable) {
return;
} else if (!memory_region_is_romd(area)) {
/*
* If the memory device is not in romd_mode, then we actually want
* to remove the hvf memory slot so all accesses will trap.
*/
add = false;
}
}
mem = hvf_find_overlap_slot(
section->offset_within_address_space,
int128_get64(section->size));
if (mem && add) {
if (mem->size == int128_get64(section->size) &&
mem->start == section->offset_within_address_space &&
mem->mem == (memory_region_get_ram_ptr(area) +
section->offset_within_region)) {
return; /* Same region was attempted to register, go away. */
}
}
/* Region needs to be reset. set the size to 0 and remap it. */
if (mem) {
mem->size = 0;
if (do_hvf_set_memory(mem, 0)) {
error_report("Failed to reset overlapping slot");
abort();
}
}
if (!add) {
return;
}
if (area->readonly ||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
} else {
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
}
/* Now make a new slot. */
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
mem = &hvf_state->slots[x];
if (!mem->size) {
break;
}
}
if (x == hvf_state->num_slots) {
error_report("No free slots");
abort();
}
mem->size = int128_get64(section->size);
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
mem->start = section->offset_within_address_space;
mem->region = area;
if (do_hvf_set_memory(mem, flags)) {
error_report("Error registering new memory slot");
abort();
}
}
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->vcpu_dirty) {
hvf_get_registers(cpu);
cpu->vcpu_dirty = true;
}
}
static void hvf_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
run_on_cpu_data arg)
{
/* QEMU state is the reference, push it to HVF now and on next entry */
cpu->vcpu_dirty = true;
}
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
{
hvf_slot *slot;
slot = hvf_find_overlap_slot(
section->offset_within_address_space,
int128_get64(section->size));
/* protect region against writes; begin tracking it */
if (on) {
slot->flags |= HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ);
/* stop tracking region*/
} else {
slot->flags &= ~HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_WRITE);
}
}
static void hvf_log_start(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (old != 0) {
return;
}
hvf_set_dirty_tracking(section, 1);
}
static void hvf_log_stop(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (new != 0) {
return;
}
hvf_set_dirty_tracking(section, 0);
}
static void hvf_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
/*
* sync of dirty pages is handled elsewhere; just make sure we keep
* tracking the region.
*/
hvf_set_dirty_tracking(section, 1);
}
static void hvf_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, true);
}
static void hvf_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, false);
}
static MemoryListener hvf_memory_listener = {
.priority = 10,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
.log_start = hvf_log_start,
.log_stop = hvf_log_stop,
.log_sync = hvf_log_sync,
};
static void dummy_signal(int sig)
{
}
bool hvf_allowed;
static int hvf_accel_init(MachineState *ms)
{
int x;
hv_return_t ret;
HVFState *s;
ret = hv_vm_create(HV_VM_DEFAULT);
assert_hvf_ok(ret);
s = g_new0(HVFState, 1);
s->num_slots = 32;
for (x = 0; x < s->num_slots; ++x) {
s->slots[x].size = 0;
s->slots[x].slot_id = x;
}
hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
return 0;
}
static void hvf_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF";
ac->init_machine = hvf_accel_init;
ac->allowed = &hvf_allowed;
}
static const TypeInfo hvf_accel_type = {
.name = TYPE_HVF_ACCEL,
.parent = TYPE_ACCEL,
.class_init = hvf_accel_class_init,
};
static void hvf_type_init(void)
{
type_register_static(&hvf_accel_type);
}
type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu);
g_free(cpu->hvf);
cpu->hvf = NULL;
}
static int hvf_init_vcpu(CPUState *cpu)
{
int r;
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
/* init cpu signals */
sigset_t set;
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &set);
sigdelset(&set, SIG_IPI);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
return hvf_arch_init_vcpu(cpu);
}
/*
* The HVF-specific vCPU thread function. This one should only run when the host
* CPU supports the VMX "unrestricted guest" feature.
*/
static void *hvf_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
int r;
assert(hvf_enabled());
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
current_cpu = cpu;
hvf_init_vcpu(cpu);
/* signal CPU creation */
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
r = hvf_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
hvf_vcpu_destroy(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
}
static void hvf_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
/*
* HVF currently does not support TCG, and only runs in
* unrestricted-guest mode.
*/
assert(hvf_enabled());
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
}
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
ops->synchronize_state = hvf_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
};
static const TypeInfo hvf_accel_ops_type = {
.name = ACCEL_OPS_NAME("hvf"),
.parent = TYPE_ACCEL_OPS,
.class_init = hvf_accel_ops_class_init,
.abstract = true,
};
static void hvf_accel_ops_register_types(void)
{
type_register_static(&hvf_accel_ops_type);
}
type_init(hvf_accel_ops_register_types);

47
accel/hvf/hvf-all.c Normal file
View File

@ -0,0 +1,47 @@
/*
* QEMU Hypervisor.framework support
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"
void assert_hvf_ok(hv_return_t ret)
{
if (ret == HV_SUCCESS) {
return;
}
switch (ret) {
case HV_ERROR:
error_report("Error: HV_ERROR");
break;
case HV_BUSY:
error_report("Error: HV_BUSY");
break;
case HV_BAD_ARGUMENT:
error_report("Error: HV_BAD_ARGUMENT");
break;
case HV_NO_RESOURCES:
error_report("Error: HV_NO_RESOURCES");
break;
case HV_NO_DEVICE:
error_report("Error: HV_NO_DEVICE");
break;
case HV_UNSUPPORTED:
error_report("Error: HV_UNSUPPORTED");
break;
default:
error_report("Unknown Error");
}
abort();
}

7
accel/hvf/meson.build Normal file
View File

@ -0,0 +1,7 @@
hvf_ss = ss.source_set()
hvf_ss.add(files(
'hvf-all.c',
'hvf-accel-ops.c',
))
specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)

View File

@ -15,6 +15,7 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <poll.h>
#include <linux/kvm.h>
@ -30,11 +31,9 @@
#include "sysemu/kvm_int.h"
#include "sysemu/runstate.h"
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "qemu/bswap.h"
#include "exec/memory.h"
#include "exec/ram_addr.h"
#include "exec/address-spaces.h"
#include "qemu/event_notifier.h"
#include "qemu/main-loop.h"
#include "trace.h"
@ -80,6 +79,25 @@ struct KVMParkedVcpu {
QLIST_ENTRY(KVMParkedVcpu) node;
};
enum KVMDirtyRingReaperState {
KVM_DIRTY_RING_REAPER_NONE = 0,
/* The reaper is sleeping */
KVM_DIRTY_RING_REAPER_WAIT,
/* The reaper is reaping for dirty pages */
KVM_DIRTY_RING_REAPER_REAPING,
};
/*
* KVM reaper instance, responsible for collecting the KVM dirty bits
* via the dirty ring.
*/
struct KVMDirtyRingReaper {
/* The reaper thread */
QemuThread reaper_thr;
volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
};
struct KVMState
{
AccelState parent_obj;
@ -128,6 +146,9 @@ struct KVMState
KVMMemoryListener *ml;
AddressSpace *as;
} *as;
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
struct KVMDirtyRingReaper reaper;
};
KVMState *kvm_state;
@ -174,8 +195,12 @@ typedef struct KVMResampleFd KVMResampleFd;
static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
static QemuMutex kml_slots_lock;
#define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
#define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
static inline void kvm_resample_fd_remove(int gsi)
{
@ -241,9 +266,9 @@ bool kvm_has_free_slot(MachineState *ms)
bool result;
KVMMemoryListener *kml = &s->memory_listener;
kvm_slots_lock(kml);
kvm_slots_lock();
result = !!kvm_get_free_slot(kml);
kvm_slots_unlock(kml);
kvm_slots_unlock();
return result;
}
@ -309,7 +334,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
KVMMemoryListener *kml = &s->memory_listener;
int i, ret = 0;
kvm_slots_lock(kml);
kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &kml->slots[i];
@ -319,7 +344,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
break;
}
}
kvm_slots_unlock(kml);
kvm_slots_unlock();
return ret;
}
@ -385,6 +410,13 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
goto err;
}
if (cpu->kvm_dirty_gfns) {
ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
if (ret < 0) {
goto err;
}
}
vcpu = g_malloc0(sizeof(*vcpu));
vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
vcpu->kvm_fd = cpu->kvm_fd;
@ -461,6 +493,19 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
(void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
}
if (s->kvm_dirty_ring_size) {
/* Use MAP_SHARED to share pages with the kernel */
cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
PROT_READ | PROT_WRITE, MAP_SHARED,
cpu->kvm_fd,
PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
if (cpu->kvm_dirty_gfns == MAP_FAILED) {
ret = -errno;
DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
goto err;
}
}
ret = kvm_arch_init_vcpu(cpu);
if (ret < 0) {
error_setg_errno(errp, -ret,
@ -500,6 +545,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
return 0;
}
kvm_slot_init_dirty_bitmap(mem);
return kvm_set_user_memory_region(kml, mem, false);
}
@ -515,7 +561,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
return 0;
}
kvm_slots_lock(kml);
kvm_slots_lock();
while (size && !ret) {
slot_size = MIN(kvm_max_slot_size, size);
@ -531,7 +577,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
}
out:
kvm_slots_unlock(kml);
kvm_slots_unlock();
return ret;
}
@ -570,22 +616,28 @@ static void kvm_log_stop(MemoryListener *listener,
}
/* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap)
static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{
ram_addr_t start = section->offset_within_region +
memory_region_get_ram_addr(section->mr);
ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
return 0;
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
{
memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
}
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
/* Allocate the dirty bitmap for a slot */
static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
{
if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
return;
}
/*
* XXX bad kernel interface alert
* For dirty bitmap, kernel allocates array of size aligned to
@ -606,6 +658,196 @@ static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
/*HOST_LONG_BITS*/ 64) / 8;
mem->dirty_bmap = g_malloc0(bitmap_size);
mem->dirty_bmap_size = bitmap_size;
}
/*
* Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
* succeeded, false otherwise
*/
static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
{
struct kvm_dirty_log d = {};
int ret;
d.dirty_bitmap = slot->dirty_bmap;
d.slot = slot->slot | (slot->as_id << 16);
ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
if (ret == -ENOENT) {
/* kernel does not have dirty bitmap in this slot */
ret = 0;
}
if (ret) {
error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
__func__, ret);
}
return ret == 0;
}
/* Should be with all slots_lock held for the address spaces. */
static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
uint32_t slot_id, uint64_t offset)
{
KVMMemoryListener *kml;
KVMSlot *mem;
if (as_id >= s->nr_as) {
return;
}
kml = s->as[as_id].ml;
mem = &kml->slots[slot_id];
if (!mem->memory_size || offset >=
(mem->memory_size / qemu_real_host_page_size)) {
return;
}
set_bit(offset, mem->dirty_bmap);
}
static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
{
return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
}
static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
{
gfn->flags = KVM_DIRTY_GFN_F_RESET;
}
/*
* Should be with all slots_lock held for the address spaces. It returns the
* dirty page we've collected on this dirty ring.
*/
static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
{
struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
uint32_t ring_size = s->kvm_dirty_ring_size;
uint32_t count = 0, fetch = cpu->kvm_fetch_index;
assert(dirty_gfns && ring_size);
trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
while (true) {
cur = &dirty_gfns[fetch % ring_size];
if (!dirty_gfn_is_dirtied(cur)) {
break;
}
kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
cur->offset);
dirty_gfn_set_collected(cur);
trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
fetch++;
count++;
}
cpu->kvm_fetch_index = fetch;
return count;
}
/* Must be with slots_lock held */
static uint64_t kvm_dirty_ring_reap_locked(KVMState *s)
{
int ret;
CPUState *cpu;
uint64_t total = 0;
int64_t stamp;
stamp = get_clock();
CPU_FOREACH(cpu) {
total += kvm_dirty_ring_reap_one(s, cpu);
}
if (total) {
ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
assert(ret == total);
}
stamp = get_clock() - stamp;
if (total) {
trace_kvm_dirty_ring_reap(total, stamp / 1000);
}
return total;
}
/*
* Currently for simplicity, we must hold BQL before calling this. We can
* consider to drop the BQL if we're clear with all the race conditions.
*/
static uint64_t kvm_dirty_ring_reap(KVMState *s)
{
uint64_t total;
/*
* We need to lock all kvm slots for all address spaces here,
* because:
*
* (1) We need to mark dirty for dirty bitmaps in multiple slots
* and for tons of pages, so it's better to take the lock here
* once rather than once per page. And more importantly,
*
* (2) We must _NOT_ publish dirty bits to the other threads
* (e.g., the migration thread) via the kvm memory slot dirty
* bitmaps before correctly re-protect those dirtied pages.
* Otherwise we can have potential risk of data corruption if
* the page data is read in the other thread before we do
* reset below.
*/
kvm_slots_lock();
total = kvm_dirty_ring_reap_locked(s);
kvm_slots_unlock();
return total;
}
static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
{
/* No need to do anything */
}
/*
* Kick all vcpus out in a synchronized way. When returned, we
* guarantee that every vcpu has been kicked and at least returned to
* userspace once.
*/
static void kvm_cpu_synchronize_kick_all(void)
{
CPUState *cpu;
CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
}
}
/*
* Flush all the existing dirty pages to the KVM slot buffers. When
* this call returns, we guarantee that all the touched dirty pages
* before calling this function have been put into the per-kvmslot
* dirty bitmap.
*
* This function must be called with BQL held.
*/
static void kvm_dirty_ring_flush(void)
{
trace_kvm_dirty_ring_flush(0);
/*
* The function needs to be serialized. Since this function
* should always be with BQL held, serialization is guaranteed.
* However, let's be sure of it.
*/
assert(qemu_mutex_iothread_locked());
/*
* First make sure to flush the hardware buffers by kicking all
* vcpus out in a synchronous way.
*/
kvm_cpu_synchronize_kick_all();
kvm_dirty_ring_reap(kvm_state);
trace_kvm_dirty_ring_flush(1);
}
/**
@ -619,53 +861,28 @@ static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
* @kml: the KVM memory listener object
* @section: the memory section to sync the dirty bitmap with
*/
static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
MemoryRegionSection *section)
static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
MemoryRegionSection *section)
{
KVMState *s = kvm_state;
struct kvm_dirty_log d = {};
KVMSlot *mem;
hwaddr start_addr, size;
hwaddr slot_size, slot_offset = 0;
int ret = 0;
hwaddr slot_size;
size = kvm_align_section(section, &start_addr);
while (size) {
MemoryRegionSection subsection = *section;
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
goto out;
return;
}
if (!mem->dirty_bmap) {
/* Allocate on the first log_sync, once and for all */
kvm_memslot_init_dirty_bitmap(mem);
if (kvm_slot_get_dirty_log(s, mem)) {
kvm_slot_sync_dirty_pages(mem);
}
d.dirty_bitmap = mem->dirty_bmap;
d.slot = mem->slot | (kml->as_id << 16);
ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
if (ret == -ENOENT) {
/* kernel does not have dirty bitmap in this slot */
ret = 0;
} else if (ret < 0) {
error_report("ioctl KVM_GET_DIRTY_LOG failed: %d", errno);
goto out;
} else {
subsection.offset_within_region += slot_offset;
subsection.size = int128_make64(slot_size);
kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
}
slot_offset += slot_size;
start_addr += slot_size;
size -= slot_size;
}
out:
return ret;
}
/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
@ -812,7 +1029,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
return ret;
}
kvm_slots_lock(kml);
kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
@ -838,7 +1055,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
}
}
kvm_slots_unlock(kml);
kvm_slots_unlock();
return ret;
}
@ -1121,7 +1338,8 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
int err;
MemoryRegion *mr = section->mr;
bool writeable = !mr->readonly && !mr->rom_device;
hwaddr start_addr, size, slot_size;
hwaddr start_addr, size, slot_size, mr_offset;
ram_addr_t ram_start_offset;
void *ram;
if (!memory_region_is_ram(mr)) {
@ -1139,11 +1357,15 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
return;
}
/* use aligned delta to align the ram address */
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
(start_addr - section->offset_within_address_space);
/* The offset of the kvmslot within the memory region */
mr_offset = section->offset_within_region + start_addr -
section->offset_within_address_space;
kvm_slots_lock(kml);
/* use aligned delta to align the ram address and offset */
ram = memory_region_get_ram_ptr(mr) + mr_offset;
ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
kvm_slots_lock();
if (!add) {
do {
@ -1153,7 +1375,25 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
goto out;
}
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_physical_sync_dirty_bitmap(kml, section);
/*
* NOTE: We should be aware of the fact that here we're only
* doing a best effort to sync dirty bits. No matter whether
* we're using dirty log or dirty ring, we ignored two facts:
*
* (1) dirty bits can reside in hardware buffers (PML)
*
* (2) after we collected dirty bits here, pages can be dirtied
* again before we do the final KVM_SET_USER_MEMORY_REGION to
* remove the slot.
*
* Not easy. Let's cross the fingers until it's fixed.
*/
if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state);
} else {
kvm_slot_get_dirty_log(kvm_state, mem);
}
kvm_slot_sync_dirty_pages(mem);
}
/* unregister the slot */
@ -1177,18 +1417,13 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
do {
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_alloc_slot(kml);
mem->as_id = kml->as_id;
mem->memory_size = slot_size;
mem->start_addr = start_addr;
mem->ram_start_offset = ram_start_offset;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
/*
* Reallocate the bmap; it means it doesn't disappear in
* middle of a migrate.
*/
kvm_memslot_init_dirty_bitmap(mem);
}
kvm_slot_init_dirty_bitmap(mem);
err = kvm_set_user_memory_region(kml, mem, true);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
@ -1196,12 +1431,58 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
abort();
}
start_addr += slot_size;
ram_start_offset += slot_size;
ram += slot_size;
size -= slot_size;
} while (size);
out:
kvm_slots_unlock(kml);
kvm_slots_unlock();
}
static void *kvm_dirty_ring_reaper_thread(void *data)
{
KVMState *s = data;
struct KVMDirtyRingReaper *r = &s->reaper;
rcu_register_thread();
trace_kvm_dirty_ring_reaper("init");
while (true) {
r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
trace_kvm_dirty_ring_reaper("wait");
/*
* TODO: provide a smarter timeout rather than a constant?
*/
sleep(1);
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
qemu_mutex_lock_iothread();
kvm_dirty_ring_reap(s);
qemu_mutex_unlock_iothread();
r->reaper_iteration++;
}
trace_kvm_dirty_ring_reaper("exit");
rcu_unregister_thread();
return NULL;
}
static int kvm_dirty_ring_reaper_init(KVMState *s)
{
struct KVMDirtyRingReaper *r = &s->reaper;
qemu_thread_create(&r->reaper_thr, "kvm-reaper",
kvm_dirty_ring_reaper_thread,
s, QEMU_THREAD_JOINABLE);
return 0;
}
static void kvm_region_add(MemoryListener *listener,
@ -1226,14 +1507,40 @@ static void kvm_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
kvm_slots_lock(kml);
r = kvm_physical_sync_dirty_bitmap(kml, section);
kvm_slots_unlock(kml);
if (r < 0) {
abort();
kvm_slots_lock();
kvm_physical_sync_dirty_bitmap(kml, section);
kvm_slots_unlock();
}
static void kvm_log_sync_global(MemoryListener *l)
{
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state;
KVMSlot *mem;
int i;
/* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
kvm_dirty_ring_flush();
/*
* TODO: make this faster when nr_slots is big while there are
* only a few used slots (small VMs).
*/
kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
/*
* This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region.
* However kvm dirty ring has no such side effect.
*/
kvm_slot_reset_dirty_pages(mem);
}
}
kvm_slots_unlock();
}
static void kvm_log_clear(MemoryListener *listener,
@ -1330,7 +1637,6 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
{
int i;
qemu_mutex_init(&kml->slots_lock);
kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
kml->as_id = as_id;
@ -1342,10 +1648,15 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.region_del = kvm_region_del;
kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop;
kml->listener.log_sync = kvm_log_sync;
kml->listener.log_clear = kvm_log_clear;
kml->listener.priority = 10;
if (s->kvm_dirty_ring_size) {
kml->listener.log_sync_global = kvm_log_sync_global;
} else {
kml->listener.log_sync = kvm_log_sync;
kml->listener.log_clear = kvm_log_clear;
}
memory_listener_register(&kml->listener, as);
for (i = 0; i < s->nr_as; ++i) {
@ -2003,6 +2314,8 @@ static int kvm_init(MachineState *ms)
int type = 0;
uint64_t dirty_log_manual_caps;
qemu_mutex_init(&kml_slots_lock);
s = KVM_STATE(ms->accelerator);
/*
@ -2019,7 +2332,6 @@ static int kvm_init(MachineState *ms)
QTAILQ_INIT(&s->kvm_sw_breakpoints);
#endif
QLIST_INIT(&s->kvm_parked_vcpus);
s->vmfd = -1;
s->fd = qemu_open_old("/dev/kvm", O_RDWR);
if (s->fd == -1) {
fprintf(stderr, "Could not access KVM kernel module: %m\n");
@ -2085,6 +2397,12 @@ static int kvm_init(MachineState *ms)
"- for kernels supporting the vm.allocate_pgste sysctl, "
"whether it is enabled\n");
}
#elif defined(TARGET_PPC)
if (ret == -EINVAL) {
fprintf(stderr,
"PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
(type == 2) ? "pr" : "hv");
}
#endif
goto err;
}
@ -2127,20 +2445,70 @@ static int kvm_init(MachineState *ms)
s->coalesced_pio = s->coalesced_mmio &&
kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
dirty_log_manual_caps =
kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
s->manual_dirty_log_protect = dirty_log_manual_caps;
if (dirty_log_manual_caps) {
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
dirty_log_manual_caps);
if (ret) {
warn_report("Trying to enable capability %"PRIu64" of "
"KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
"Falling back to the legacy mode. ",
dirty_log_manual_caps);
s->manual_dirty_log_protect = 0;
/*
* Enable KVM dirty ring if supported, otherwise fall back to
* dirty logging mode
*/
if (s->kvm_dirty_ring_size > 0) {
uint64_t ring_bytes;
ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
if (ret > 0) {
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
s->kvm_dirty_ring_size,
(long)ret / sizeof(struct kvm_dirty_gfn));
ret = -EINVAL;
goto err;
}
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested mininum value is 1024.", strerror(-ret));
goto err;
}
s->kvm_dirty_ring_bytes = ring_bytes;
} else {
warn_report("KVM dirty ring not available, using bitmap method");
s->kvm_dirty_ring_size = 0;
}
}
/*
* KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
* enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
* page is wr-protected initially, which is against how kvm dirty ring is
* usage - kvm dirty ring requires all pages are wr-protected at the very
* beginning. Enabling this feature for dirty ring causes data corruption.
*
* TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
* we may expect a higher stall time when starting the migration. In the
* future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
* instead of clearing dirty bit, it can be a way to explicitly wr-protect
* guest pages.
*/
if (!s->kvm_dirty_ring_size) {
dirty_log_manual_caps =
kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
s->manual_dirty_log_protect = dirty_log_manual_caps;
if (dirty_log_manual_caps) {
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
dirty_log_manual_caps);
if (ret) {
warn_report("Trying to enable capability %"PRIu64" of "
"KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
"Falling back to the legacy mode. ",
dirty_log_manual_caps);
s->manual_dirty_log_protect = 0;
}
}
}
@ -2226,6 +2594,14 @@ static int kvm_init(MachineState *ms)
ret = ram_block_discard_disable(true);
assert(!ret);
}
if (s->kvm_dirty_ring_size) {
ret = kvm_dirty_ring_reaper_init(s);
if (ret) {
goto err;
}
}
return 0;
err:
@ -2269,7 +2645,7 @@ static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
int i;
for (i = 0; i < run->internal.ndata; ++i) {
fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
i, (uint64_t)run->internal.data[i]);
}
}
@ -2538,6 +2914,17 @@ int kvm_cpu_exec(CPUState *cpu)
case KVM_EXIT_INTERNAL_ERROR:
ret = kvm_handle_internal_error(cpu, run);
break;
case KVM_EXIT_DIRTY_RING_FULL:
/*
* We shouldn't continue if the dirty ring of this vcpu is
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
qemu_mutex_lock_iothread();
kvm_dirty_ring_reap(kvm_state);
qemu_mutex_unlock_iothread();
ret = 0;
break;
case KVM_EXIT_SYSTEM_EVENT:
switch (run->system_event.type) {
case KVM_SYSTEM_EVENT_SHUTDOWN:
@ -3114,6 +3501,11 @@ static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
KVMState *s = KVM_STATE(obj);
int64_t value;
if (s->fd != -1) {
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
return;
}
if (!visit_type_int(v, name, &value, errp)) {
return;
}
@ -3128,6 +3520,11 @@ static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
KVMState *s = KVM_STATE(obj);
OnOffSplit mode;
if (s->fd != -1) {
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
return;
}
if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
return;
}
@ -3170,13 +3567,53 @@ bool kvm_kernel_irqchip_split(void)
return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
}
static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
uint32_t value = s->kvm_dirty_ring_size;
visit_type_uint32(v, name, &value, errp);
}
static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
Error *error = NULL;
uint32_t value;
if (s->fd != -1) {
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
return;
}
visit_type_uint32(v, name, &value, &error);
if (error) {
error_propagate(errp, error);
return;
}
if (value & (value - 1)) {
error_setg(errp, "dirty-ring-size must be a power of two.");
return;
}
s->kvm_dirty_ring_size = value;
}
static void kvm_accel_instance_init(Object *obj)
{
KVMState *s = KVM_STATE(obj);
s->fd = -1;
s->vmfd = -1;
s->kvm_shadow_mem = -1;
s->kernel_irqchip_allowed = true;
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
}
static void kvm_accel_class_init(ObjectClass *oc, void *data)
@ -3198,6 +3635,12 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data)
NULL, NULL);
object_class_property_set_description(oc, "kvm-shadow-mem",
"KVM shadow MMU size");
object_class_property_add(oc, "dirty-ring-size", "uint32",
kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
NULL, NULL);
object_class_property_set_description(oc, "dirty-ring-size",
"Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
}
static const TypeInfo kvm_accel_type = {

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
@ -18,4 +18,11 @@ kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
kvm_resample_fd_notify(int gsi) "gsi %d"
kvm_dirty_ring_full(int id) "vcpu %d"
kvm_dirty_ring_reap_vcpu(int id) "vcpu %d"
kvm_dirty_ring_page(int vcpu, uint32_t slot, uint64_t offset) "vcpu %d fetch %"PRIu32" offset 0x%"PRIx64
kvm_dirty_ring_reaper(const char *s) "%s"
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
kvm_dirty_ring_flush(int finished) "%d"

View File

@ -2,6 +2,7 @@ specific_ss.add(files('accel-common.c'))
softmmu_ss.add(files('accel-softmmu.c'))
user_ss.add(files('accel-user.c'))
subdir('hvf')
subdir('qtest')
subdir('kvm')
subdir('tcg')

View File

@ -1,6 +1,2 @@
qtest_ss = ss.source_set()
qtest_ss.add(files(
'qtest.c',
))
specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: qtest_ss)
qtest_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'],
if_true: files('qtest.c'))

View File

@ -45,6 +45,7 @@ static const TypeInfo qtest_accel_type = {
.parent = TYPE_ACCEL,
.class_init = qtest_accel_class_init,
};
module_obj(TYPE_QTEST_ACCEL);
static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
{
@ -61,6 +62,7 @@ static const TypeInfo qtest_accel_ops_type = {
.class_init = qtest_accel_ops_class_init,
.abstract = true,
};
module_obj(ACCEL_OPS_NAME("qtest"));
static void qtest_type_init(void)
{

View File

@ -11,7 +11,6 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "sysemu/kvm.h"
#ifndef CONFIG_USER_ONLY

View File

@ -11,7 +11,6 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
void tb_flush(CPUState *cpu)

View File

@ -13,42 +13,125 @@
* See the COPYING file in the top-level directory.
*/
static inline
void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info)
static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
{
CPUState *cpu = env_cpu(env);
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(cpu, addr, info);
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
return info;
}
static inline void
atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info)
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
}
static inline
void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info)
#if HAVE_ATOMIC128
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
}
static inline
void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info)
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
static inline
void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info)
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
}
static inline
void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info)
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
#endif
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
* using the host return address from GETPC().
*/
#define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
CMPXCHG_HELPER(cmpxchgb, uint32_t)
CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
CMPXCHG_HELPER(cmpxchgl_le, uint32_t)
#ifdef CONFIG_ATOMIC64
CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
#undef CMPXCHG_HELPER
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t) \
ATOMIC_HELPER(glue(OP,q_be), uint64_t) \
ATOMIC_HELPER(glue(OP,q_le), uint64_t)
#else
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t)
#endif
GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)
GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)
GEN_ATOMIC_HELPERS(xchg)
#undef ATOMIC_HELPER
#undef GEN_ATOMIC_HELPERS

View File

@ -28,8 +28,8 @@
# define SHIFT 4
#elif DATA_SIZE == 8
# define SUFFIX q
# define DATA_TYPE uint64_t
# define SDATA_TYPE int64_t
# define DATA_TYPE aligned_uint64_t
# define SDATA_TYPE aligned_int64_t
# define BSWAP bswap64
# define SHIFT 3
#elif DATA_SIZE == 4
@ -71,15 +71,14 @@
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else
@ -92,45 +91,41 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
ATOMIC_MMU_IDX);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
uint16_t info = atomic_trace_st_pre(env, addr, oi);
atomic_trace_st_pre(env, addr, info);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, info);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
@ -139,14 +134,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
@ -164,7 +157,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
/* These helpers are, as a whole, full barriers. Within the helper,
/*
* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
@ -173,14 +167,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@ -218,15 +210,14 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else
@ -239,30 +230,27 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
ATOMIC_MMU_IDX);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
uint16_t info = atomic_trace_st_pre(env, addr, oi);
val = BSWAP(val);
atomic_trace_st_pre(env, addr, info);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
@ -270,16 +258,14 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
@ -288,14 +274,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
@ -320,14 +304,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \

View File

@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
#include "exec/exec-all.h"

View File

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
#include "disas/disas.h"
@ -30,8 +29,6 @@
#include "qemu/compiler.h"
#include "qemu/timer.h"
#include "qemu/rcu.h"
#include "exec/tb-hash.h"
#include "exec/tb-lookup.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
@ -41,6 +38,9 @@
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
#include "exec/helper-proto.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
/* -icount align implementation. */
@ -145,6 +145,190 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
}
#endif /* CONFIG USER ONLY */
uint32_t curr_cflags(CPUState *cpu)
{
uint32_t cflags = cpu->tcg_cflags;
/*
* Record gdb single-step. We should be exiting the TB by raising
* EXCP_DEBUG, but to simplify other tests, disable chaining too.
*
* For singlestep and -d nochain, suppress goto_tb so that
* we can log -d cpu,exec after every TB.
*/
if (unlikely(cpu->singlestep_enabled)) {
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
} else if (singlestep) {
cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB;
}
return cflags;
}
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
uint32_t flags, uint32_t cflags)
{
TranslationBlock *tb;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
qatomic_set(&cpu->tb_jmp_cache[hash], tb);
return tb;
}
static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb)
{
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
&& qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [" TARGET_FMT_lx
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_lock();
int flags = 0;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
#if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP;
#endif
log_cpu_state(cpu, flags);
qemu_log_unlock(logfile);
}
#endif /* DEBUG_DISAS */
}
}
static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
uint32_t *cflags)
{
CPUBreakpoint *bp;
bool match_page = false;
if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
return false;
}
/*
* Singlestep overrides breakpoints.
* This requirement is visible in the record-replay tests, where
* we would fail to make forward progress in reverse-continue.
*
* TODO: gdb singlestep should only override gdb breakpoints,
* so that one could (gdb) singlestep into the guest kernel's
* architectural breakpoint handler.
*/
if (cpu->singlestep_enabled) {
return false;
}
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
/*
* If we have an exact pc match, trigger the breakpoint.
* Otherwise, note matches within the page.
*/
if (pc == bp->pc) {
bool match_bp = false;
if (bp->flags & BP_GDB) {
match_bp = true;
} else if (bp->flags & BP_CPU) {
#ifdef CONFIG_USER_ONLY
g_assert_not_reached();
#else
CPUClass *cc = CPU_GET_CLASS(cpu);
assert(cc->tcg_ops->debug_check_breakpoint);
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
#endif
}
if (match_bp) {
cpu->exception_index = EXCP_DEBUG;
return true;
}
} else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
match_page = true;
}
}
/*
* Within the same page as a breakpoint, single-step,
* returning to helper_lookup_tb_ptr after each insn looking
* for the actual breakpoint.
*
* TODO: Perhaps better to record all of the TBs associated
* with a given virtual page that contains a breakpoint, and
* then invalidate them when a new overlapping breakpoint is
* set on the page. Non-overlapping TBs would not be
* invalidated, nor would any TB need to be invalidated as
* breakpoints are removed.
*/
if (match_page) {
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
}
return false;
}
/**
* helper_lookup_tb_ptr: quick check for next tb
* @env: current cpu state
*
* Look for an existing TB matching the current cpu state.
* If found, return the code pointer. If not found, return
* the tcg epilogue so that we return into cpu_tb_exec.
*/
const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
cflags = curr_cflags(cpu);
if (check_for_breakpoints(cpu, pc, &cflags)) {
cpu_loop_exit(cpu);
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
log_cpu_exec(pc, cpu, tb);
return tb->tc.ptr;
}
/* Execute a TB, and fix up the CPU state afterwards if necessary */
/*
* Disable CFI checks.
@ -163,28 +347,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %d: %p ["
TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
cpu->cpu_index, itb->tc.ptr,
itb->cs_base, itb->pc, itb->flags,
lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
&& qemu_log_in_addr_range(itb->pc)) {
FILE *logfile = qemu_log_lock();
int flags = 0;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
#if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP;
#endif
log_cpu_state(cpu, flags);
qemu_log_unlock(logfile);
}
#endif /* DEBUG_DISAS */
log_cpu_exec(itb->pc, cpu, itb);
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
@ -247,8 +410,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@ -258,8 +420,20 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
cflags = curr_cflags(cpu);
/* Execute in a serial context. */
cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
* of an insn that includes an atomic operation we can't handle.
* Any breakpoint for this insn will have been recognized earlier.
*/
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
@ -412,41 +586,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
return;
}
static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb,
int tb_exit, uint32_t cflags)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
* system emulation. So it's not safe to make a direct jump to a TB
* spanning two pages because the mapping for the second page can change.
*/
if (tb->page_addr[1] != -1) {
last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
}
return tb;
}
static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
@ -695,7 +834,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
/* Ensure global icount has gone forward */
icount_update(cpu);
/* Refill decrementer and continue execution. */
insns_left = MIN(CF_COUNT_MASK, cpu->icount_budget);
insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
@ -704,7 +843,9 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
* execute we need to ensure we find/generate a TB with exactly
* insns_left instructions in it.
*/
if (!cpu->icount_extra && insns_left > 0 && insns_left < tb->icount) {
if (insns_left > 0 && insns_left < tb->icount) {
assert(insns_left <= CF_COUNT_MASK);
assert(cpu->icount_extra == 0);
cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
}
#endif
@ -714,7 +855,6 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int ret;
SyncClocks sc = { 0 };
@ -748,19 +888,14 @@ int cpu_exec(CPUState *cpu)
* that we support, but is still unfixed in clang:
* https://bugs.llvm.org/show_bug.cgi?id=21183
*
* Reload essential local variables here for those compilers.
* Reload an essential local variable here for those compilers.
* Newer versions of gcc would complain about this code (-Wclobbered),
* so we only perform the workaround for clang.
*/
cpu = current_cpu;
cc = CPU_GET_CLASS(cpu);
#else
/*
* Non-buggy compilers preserve these locals; assert that
* they have the correct value.
*/
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#endif
#ifndef CONFIG_SOFTMMU
@ -780,22 +915,60 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags, cflags;
/* When requested, use an exact setting for cflags for the next
execution. This is used for icount, precise smc, and stop-
after-access watchpoints. Since this request should never
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
/*
* When requested, use an exact setting for cflags for the next
* execution. This is used for icount, precise smc, and stop-
* after-access watchpoints. Since this request should never
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
tb = tb_find(cpu, last_tb, tb_exit, cflags);
if (check_for_breakpoints(cpu, pc, &cflags)) {
break;
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/*
* We don't take care of direct jumps when address mapping
* changes in system emulation. So it's not safe to make a
* direct jump to a TB spanning two pages because the mapping
* for the second page can change.
*/
if (tb->page_addr[1] != -1) {
last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
}
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);

View File

@ -19,14 +19,11 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
#include "exec/tb-hash.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
@ -38,6 +35,7 @@
#include "exec/translate-all.h"
#include "trace/trace-root.h"
#include "trace/mem.h"
#include "tb-hash.h"
#include "internal.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
@ -709,8 +707,9 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
target_ulong page, unsigned bits)
static void tlb_flush_range_locked(CPUArchState *env, int midx,
target_ulong addr, target_ulong len,
unsigned bits)
{
CPUTLBDesc *d = &env_tlb(env)->d[midx];
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
@ -720,20 +719,26 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
* If @bits is smaller than the tlb size, there may be multiple entries
* within the TLB; otherwise all addresses that match under @mask hit
* the same TLB entry.
*
* TODO: Perhaps allow bits to be a few bits less than the size.
* For now, just flush the entire TLB.
*
* If @len is larger than the tlb size, then it will take longer to
* test all of the entries in the TLB than it will to flush it all.
*/
if (mask < f->mask) {
if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, page, mask);
TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
}
/* Check if we need to flush due to large pages. */
if ((page & d->large_page_mask) == d->large_page_addr) {
/*
* Check if we need to flush due to large pages.
* Because large_page_mask contains all 1's from the msb,
* we only need to test the end of the range.
*/
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, d->large_page_addr, d->large_page_mask);
@ -741,85 +746,67 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
return;
}
if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
tlb_n_used_entries_dec(env, midx);
for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
target_ulong page = addr + i;
CPUTLBEntry *entry = tlb_entry(env, midx, page);
if (tlb_flush_entry_mask_locked(entry, page, mask)) {
tlb_n_used_entries_dec(env, midx);
}
tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
typedef struct {
target_ulong addr;
target_ulong len;
uint16_t idxmap;
uint16_t bits;
} TLBFlushPageBitsByMMUIdxData;
} TLBFlushRangeData;
static void
tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
TLBFlushPageBitsByMMUIdxData d)
static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
TLBFlushRangeData d)
{
CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
d.addr, d.bits, d.idxmap);
tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((d.idxmap >> mmu_idx) & 1) {
tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
}
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
tb_flush_jmp_cache(cpu, d.addr);
}
static bool encode_pbm_to_runon(run_on_cpu_data *out,
TLBFlushPageBitsByMMUIdxData d)
{
/* We need 6 bits to hold to hold @bits up to 63. */
if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
*out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
return true;
for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
tb_flush_jmp_cache(cpu, d.addr + i);
}
return false;
}
static TLBFlushPageBitsByMMUIdxData
decode_runon_to_pbm(run_on_cpu_data data)
static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
run_on_cpu_data data)
{
target_ulong addr_map_bits = (target_ulong) data.target_ptr;
return (TLBFlushPageBitsByMMUIdxData){
.addr = addr_map_bits & TARGET_PAGE_MASK,
.idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
.bits = addr_map_bits & 0x3f
};
}
static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
run_on_cpu_data runon)
{
tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
}
static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
run_on_cpu_data data)
{
TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
TLBFlushRangeData *d = data.host_ptr;
tlb_flush_range_by_mmuidx_async_0(cpu, *d);
g_free(d);
}
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
uint16_t idxmap, unsigned bits)
void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
target_ulong len, uint16_t idxmap,
unsigned bits)
{
TLBFlushPageBitsByMMUIdxData d;
run_on_cpu_data runon;
TLBFlushRangeData d;
/* If all bits are significant, this devolves to tlb_flush_page. */
if (bits >= TARGET_LONG_BITS) {
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
@ -831,34 +818,38 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
d.len = len;
d.idxmap = idxmap;
d.bits = bits;
if (qemu_cpu_is_self(cpu)) {
tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
} else if (encode_pbm_to_runon(&runon, d)) {
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
tlb_flush_range_by_mmuidx_async_0(cpu, d);
} else {
TLBFlushPageBitsByMMUIdxData *p
= g_new(TLBFlushPageBitsByMMUIdxData, 1);
/* Otherwise allocate a structure, freed by the worker. */
*p = d;
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap,
unsigned bits)
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
uint16_t idxmap, unsigned bits)
{
TLBFlushPageBitsByMMUIdxData d;
run_on_cpu_data runon;
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
}
/* If all bits are significant, this devolves to tlb_flush_page. */
if (bits >= TARGET_LONG_BITS) {
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr, target_ulong len,
uint16_t idxmap, unsigned bits)
{
TLBFlushRangeData d;
CPUState *dst_cpu;
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
return;
}
@ -870,40 +861,45 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
d.len = len;
d.idxmap = idxmap;
d.bits = bits;
if (encode_pbm_to_runon(&runon, d)) {
flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
} else {
CPUState *dst_cpu;
TLBFlushPageBitsByMMUIdxData *p;
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
*p = d;
async_run_on_cpu(dst_cpu,
tlb_flush_page_bits_by_mmuidx_async_2,
RUN_ON_CPU_HOST_PTR(p));
}
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
async_run_on_cpu(dst_cpu,
tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
}
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap,
unsigned bits)
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap, unsigned bits)
{
TLBFlushPageBitsByMMUIdxData d;
run_on_cpu_data runon;
tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
idxmap, bits);
}
/* If all bits are significant, this devolves to tlb_flush_page. */
if (bits >= TARGET_LONG_BITS) {
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr,
target_ulong len,
uint16_t idxmap,
unsigned bits)
{
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
@ -915,32 +911,31 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
d.len = len;
d.idxmap = idxmap;
d.bits = bits;
if (encode_pbm_to_runon(&runon, d)) {
flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
runon);
} else {
CPUState *dst_cpu;
TLBFlushPageBitsByMMUIdxData *p;
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
*p = d;
async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
RUN_ON_CPU_HOST_PTR(p));
}
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
p = g_memdup(&d, sizeof(d));
async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
*p = d;
async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
RUN_ON_CPU_HOST_PTR(p));
}
p = g_memdup(&d, sizeof(d));
async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap,
unsigned bits)
{
tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
idxmap, bits);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
@ -1742,7 +1737,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
} else {
data->is_io = false;
data->v.ram.hostaddr = addr + tlbe->addend;
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
}
return true;
} else {
@ -1756,18 +1751,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
#endif
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
/*
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
TCGMemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(tlbe);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
uintptr_t index;
CPUTLBEntry *tlbe;
target_ulong tlb_addr;
void *hostaddr;
/* Adjust the given return address. */
@ -1781,7 +1780,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/* Enforce qemu required alignment. */
if (unlikely(addr & ((1 << s_bits) - 1))) {
if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
@ -1789,15 +1788,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
if (prot & PAGE_WRITE) {
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Let the guest notice RMW on a write-only page. */
if ((prot & PAGE_READ) &&
unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different addresses,
* and we do have the proper page loaded for write, this shouldn't
* ever return. But just in case, handle via stop-the-world.
*/
goto stop_the_world;
}
} else /* if (prot & PAGE_READ) */ {
tlb_addr = tlbe->addr_read;
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */
@ -1807,20 +1836,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
ever return. But just in case, handle via stop-the-world. */
goto stop_the_world;
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(env_cpu(env), addr, 1 << s_bits,
notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
}
@ -2703,14 +2722,14 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
cpu_stq_le_data_ra(env, ptr, val, 0);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
/*
* First set of functions passes in OI and RETADDR.
* This makes them callable from other helpers.
*/
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
@ -2735,30 +2754,6 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#include "atomic_template.h"
#endif
/* Second set of helpers are directly callable from TCG as helpers. */
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP
#define EXTRA_ARGS , TCGMemOpIdx oi
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
#define DATA_SIZE 1
#include "atomic_template.h"
#define DATA_SIZE 2
#include "atomic_template.h"
#define DATA_SIZE 4
#include "atomic_template.h"
#ifdef CONFIG_ATOMIC64
#define DATA_SIZE 8
#include "atomic_template.h"
#endif
#undef ATOMIC_MMU_IDX
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,

29
accel/tcg/hmp.c Normal file
View File

@ -0,0 +1,29 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
#include "sysemu/tcg.h"
static void hmp_info_jit(Monitor *mon, const QDict *qdict)
{
if (!tcg_enabled()) {
error_report("JIT information is only available with accel=tcg");
return;
}
dump_exec_info();
dump_drift_info();
}
static void hmp_info_opcount(Monitor *mon, const QDict *qdict)
{
dump_opcount_info();
}
static void hmp_tcg_register(void)
{
monitor_register_hmp("jit", true, hmp_info_jit);
monitor_register_hmp("opcount", true, hmp_info_opcount);
}
type_init(hmp_tcg_register);

View File

@ -16,5 +16,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
int cflags);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
void page_init(void);
void tb_htable_init(void);
#endif /* ACCEL_TCG_INTERNAL_H */

View File

@ -15,8 +15,12 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
'hmp.c',
))
tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'tcg-accel-ops.c',
'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-icount.c',
'tcg-accel-ops-rr.c'
'tcg-accel-ops-rr.c',
))

View File

@ -43,7 +43,6 @@
* CPU's index into a TCG temp, since the first callback did it already.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "trace/mem.h"
@ -161,9 +160,8 @@ static void gen_empty_mem_helper(void)
tcg_temp_free_ptr(ptr);
}
static inline
void gen_plugin_cb_start(enum plugin_gen_from from,
enum plugin_gen_cb type, unsigned wr)
static void gen_plugin_cb_start(enum plugin_gen_from from,
enum plugin_gen_cb type, unsigned wr)
{
TCGOp *op;
@ -180,7 +178,7 @@ static void gen_wrapped(enum plugin_gen_from from,
tcg_gen_plugin_cb_end();
}
static inline void plugin_gen_empty_callback(enum plugin_gen_from from)
static void plugin_gen_empty_callback(enum plugin_gen_from from)
{
switch (from) {
case PLUGIN_GEN_AFTER_INSN:
@ -386,7 +384,7 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
}
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
void *func, unsigned tcg_flags, int *cb_idx)
void *func, int *cb_idx)
{
/* copy all ops until the call */
do {
@ -413,7 +411,7 @@ static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
}
op->args[*cb_idx] = (uintptr_t)func;
op->args[*cb_idx + 1] = tcg_flags;
op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1];
return op;
}
@ -440,7 +438,7 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
/* call */
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
cb->f.vcpu_udata, cb_idx);
return op;
}
@ -491,7 +489,7 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
if (type == PLUGIN_GEN_CB_MEM) {
/* call */
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
cb->f.vcpu_udata, cb_idx);
}
return op;
@ -514,9 +512,8 @@ static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
return !!(cb->rw & (w + 1));
}
static inline
void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject,
op_ok_fn ok)
static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
inject_fn inject, op_ok_fn ok)
{
TCGOp *end_op;
TCGOp *op;

View File

@ -1,5 +1,4 @@
#ifdef CONFIG_PLUGIN
/* Note: no TCG flags because those are overwritten later */
DEF_HELPER_2(plugin_vcpu_udata_cb, void, i32, ptr)
DEF_HELPER_4(plugin_vcpu_mem_cb, void, i32, i32, i64, ptr)
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG, void, i32, ptr)
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG, void, i32, i32, i64, ptr)
#endif

View File

@ -34,6 +34,7 @@ struct TBContext {
/* statistics */
unsigned tb_flush_count;
unsigned tb_phys_invalidate_count;
};
extern TBContext tb_ctx;

View File

@ -30,7 +30,6 @@
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-icount.h"

View File

@ -30,7 +30,6 @@
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"

View File

@ -32,7 +32,6 @@
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
@ -125,6 +124,7 @@ static const TypeInfo tcg_accel_ops_type = {
.class_init = tcg_accel_ops_class_init,
.abstract = true,
};
module_obj(ACCEL_OPS_NAME("tcg"));
static void tcg_accel_ops_register_types(void)
{

View File

@ -32,6 +32,11 @@
#include "qemu/error-report.h"
#include "qemu/accel.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/boards.h"
#endif
#include "internal.h"
struct TCGState {
AccelState parent_obj;
@ -105,22 +110,29 @@ static void tcg_accel_instance_init(Object *obj)
bool mttcg_enabled;
static int tcg_init(MachineState *ms)
static int tcg_init_machine(MachineState *ms)
{
TCGState *s = TCG_STATE(current_accel());
#ifdef CONFIG_USER_ONLY
unsigned max_cpus = 1;
#else
unsigned max_cpus = ms->smp.max_cpus;
#endif
tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
tcg_allowed = true;
mttcg_enabled = s->mttcg_enabled;
page_init();
tb_htable_init();
tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
#if defined(CONFIG_SOFTMMU)
/*
* Initialize TCG regions only for softmmu.
*
* This needs to be done later for user mode, because the prologue
* generation needs to be delayed so that GUEST_BASE is already set.
* There's no guest base to take into account, so go ahead and
* initialize the prologue now.
*/
#ifndef CONFIG_USER_ONLY
tcg_region_init();
#endif /* !CONFIG_USER_ONLY */
tcg_prologue_init(tcg_ctx);
#endif
return 0;
}
@ -200,7 +212,7 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg";
ac->init_machine = tcg_init;
ac->init_machine = tcg_init_machine;
ac->allowed = &tcg_allowed;
object_class_property_add_str(oc, "thread",
@ -226,6 +238,7 @@ static const TypeInfo tcg_accel_type = {
.class_init = tcg_accel_class_init,
.instance_size = sizeof(TCGState),
};
module_obj(TYPE_TCG_ACCEL);
static void register_accel_types(void)
{

View File

@ -1073,9 +1073,8 @@ void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(int32_t)) {
int32_t ai = *(int32_t *)(a + i);
int32_t bi = *(int32_t *)(b + i);
int32_t di = ai + bi;
if (((di ^ ai) &~ (ai ^ bi)) < 0) {
/* Signed overflow. */
int32_t di;
if (sadd32_overflow(ai, bi, &di)) {
di = (di < 0 ? INT32_MAX : INT32_MIN);
}
*(int32_t *)(d + i) = di;
@ -1091,9 +1090,8 @@ void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(int64_t)) {
int64_t ai = *(int64_t *)(a + i);
int64_t bi = *(int64_t *)(b + i);
int64_t di = ai + bi;
if (((di ^ ai) &~ (ai ^ bi)) < 0) {
/* Signed overflow. */
int64_t di;
if (sadd64_overflow(ai, bi, &di)) {
di = (di < 0 ? INT64_MAX : INT64_MIN);
}
*(int64_t *)(d + i) = di;
@ -1143,9 +1141,8 @@ void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(int32_t)) {
int32_t ai = *(int32_t *)(a + i);
int32_t bi = *(int32_t *)(b + i);
int32_t di = ai - bi;
if (((di ^ ai) & (ai ^ bi)) < 0) {
/* Signed overflow. */
int32_t di;
if (ssub32_overflow(ai, bi, &di)) {
di = (di < 0 ? INT32_MAX : INT32_MIN);
}
*(int32_t *)(d + i) = di;
@ -1161,9 +1158,8 @@ void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(int64_t)) {
int64_t ai = *(int64_t *)(a + i);
int64_t bi = *(int64_t *)(b + i);
int64_t di = ai - bi;
if (((di ^ ai) & (ai ^ bi)) < 0) {
/* Signed overflow. */
int64_t di;
if (ssub64_overflow(ai, bi, &di)) {
di = (di < 0 ? INT64_MAX : INT64_MIN);
}
*(int64_t *)(d + i) = di;
@ -1209,8 +1205,8 @@ void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
uint32_t ai = *(uint32_t *)(a + i);
uint32_t bi = *(uint32_t *)(b + i);
uint32_t di = ai + bi;
if (di < ai) {
uint32_t di;
if (uadd32_overflow(ai, bi, &di)) {
di = UINT32_MAX;
}
*(uint32_t *)(d + i) = di;
@ -1226,8 +1222,8 @@ void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
uint64_t ai = *(uint64_t *)(a + i);
uint64_t bi = *(uint64_t *)(b + i);
uint64_t di = ai + bi;
if (di < ai) {
uint64_t di;
if (uadd64_overflow(ai, bi, &di)) {
di = UINT64_MAX;
}
*(uint64_t *)(d + i) = di;
@ -1273,8 +1269,8 @@ void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
uint32_t ai = *(uint32_t *)(a + i);
uint32_t bi = *(uint32_t *)(b + i);
uint32_t di = ai - bi;
if (ai < bi) {
uint32_t di;
if (usub32_overflow(ai, bi, &di)) {
di = 0;
}
*(uint32_t *)(d + i) = di;
@ -1290,8 +1286,8 @@ void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc)
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
uint64_t ai = *(uint64_t *)(a + i);
uint64_t bi = *(uint64_t *)(b + i);
uint64_t di = ai - bi;
if (ai < bi) {
uint64_t di;
if (usub64_overflow(ai, bi, &di)) {
di = 0;
}
*(uint64_t *)(d + i) = di;

View File

@ -30,7 +30,6 @@
#include "disas/disas.h"
#include "exec/log.h"
#include "tcg/tcg.h"
#include "exec/tb-lookup.h"
/* 32-bit helpers */
@ -145,27 +144,6 @@ uint64_t HELPER(ctpop_i64)(uint64_t arg)
return ctpop64(arg);
}
const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
"Chain %d: %p ["
TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
cpu->cpu_index, tb->tc.ptr, cs_base, pc, flags,
lookup_symbol(pc));
return tb->tc.ptr;
}
void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(env_cpu(env), GETPC());

View File

@ -39,8 +39,6 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */
#ifdef CONFIG_SOFTMMU
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
@ -88,50 +86,6 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#endif /* CONFIG_ATOMIC64 */
#else
DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
#endif
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \
TCG_CALL_NO_WG, i64, env, tl, i64) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, tl, i64)
#else
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32)
#endif /* CONFIG_ATOMIC64 */
#endif /* CONFIG_SOFTMMU */
GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# TCG related tracing
# cpu-exec.c

View File

@ -18,11 +18,9 @@
*/
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu-common.h"
#define NO_CPU_IO_DEFS
#include "cpu.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
@ -48,10 +46,8 @@
#endif
#include "exec/cputlb.h"
#include "exec/tb-hash.h"
#include "exec/translate-all.h"
#include "qemu/bitmap.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h"
@ -61,6 +57,8 @@
#include "sysemu/tcg.h"
#include "qapi/error.h"
#include "hw/core/tcg-cpu-ops.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
/* #define DEBUG_TB_INVALIDATE */
@ -220,9 +218,6 @@ static int v_l2_levels;
static void *l1_map[V_L1_MAX_SIZE];
/* code generation context */
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
TBContext tb_ctx;
static void page_table_config_init(void)
@ -245,11 +240,6 @@ static void page_table_config_init(void)
assert(v_l2_levels >= 0);
}
static void cpu_gen_init(void)
{
tcg_context_init(&tcg_init_ctx);
}
/* Encode VAL as a signed leb128 sequence at P.
Return P incremented past the encoded value. */
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
@ -388,11 +378,6 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
return 0;
}
void tb_destroy(TranslationBlock *tb)
{
qemu_spin_destroy(&tb->jmp_lock);
}
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
{
/*
@ -415,7 +400,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
return false;
}
static void page_init(void)
void page_init(void)
{
page_size_init();
page_table_config_init();
@ -900,408 +885,6 @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
}
}
/* Minimum size of the code gen buffer. This number is randomly chosen,
but not so small that we can't have a fair number of TB's live. */
#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
indicated, this is constrained by the range of direct branches on the
host cpu, as used by the TCG implementation of goto_tb. */
#if defined(__x86_64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#elif defined(__sparc__)
# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#elif defined(__powerpc64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#elif defined(__powerpc__)
# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
#elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#elif defined(__s390x__)
/* We have a +- 4GB range on the branches; leave some slop. */
# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
#elif defined(__mips__)
/* We have a 256MB branch region, but leave room to make sure the
main executable is also within that region. */
# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
#else
# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
#endif
#if TCG_TARGET_REG_BITS == 32
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
#ifdef CONFIG_USER_ONLY
/*
* For user mode on smaller 32 bit systems we may run into trouble
* allocating big chunks of data in the right place. On these systems
* we utilise a static code generation buffer directly in the binary.
*/
#define USE_STATIC_CODE_GEN_BUFFER
#endif
#else /* TCG_TARGET_REG_BITS == 64 */
#ifdef CONFIG_USER_ONLY
/*
* As user-mode emulation typically means running multiple instances
* of the translator don't go too nuts with our default code gen
* buffer lest we make things too hard for the OS.
*/
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
#else
/*
* We expect most system emulation to run one or two guests per host.
* Users running large scale system emulation may want to tweak their
* runtime setup via the tb-size control on the command line.
*/
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
#endif
#endif
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
static size_t size_code_gen_buffer(size_t tb_size)
{
/* Size the buffer. */
if (tb_size == 0) {
size_t phys_mem = qemu_get_host_physmem();
if (phys_mem == 0) {
tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
} else {
tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
}
}
if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
tb_size = MIN_CODE_GEN_BUFFER_SIZE;
}
if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
tb_size = MAX_CODE_GEN_BUFFER_SIZE;
}
return tb_size;
}
#ifdef __mips__
/* In order to use J and JAL within the code_gen_buffer, we require
that the buffer not cross a 256MB boundary. */
static inline bool cross_256mb(void *addr, size_t size)
{
return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
}
/* We weren't able to allocate a buffer without crossing that boundary,
so make do with the larger portion of the buffer that doesn't cross.
Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
static inline void *split_cross_256mb(void *buf1, size_t size1)
{
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
size_t size2 = buf1 + size1 - buf2;
size1 = buf2 - buf1;
if (size1 < size2) {
size1 = size2;
buf1 = buf2;
}
tcg_ctx->code_gen_buffer_size = size1;
return buf1;
}
#endif
#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
{
void *buf, *end;
size_t size;
if (splitwx > 0) {
error_setg(errp, "jit split-wx not supported");
return false;
}
/* page-align the beginning and end of the buffer */
buf = static_code_gen_buffer;
end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tb_size) {
size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
}
tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__
if (cross_256mb(buf, size)) {
buf = split_cross_256mb(buf, size);
size = tcg_ctx->code_gen_buffer_size;
}
#endif
if (qemu_mprotect_rwx(buf, size)) {
error_setg_errno(errp, errno, "mprotect of jit buffer");
return false;
}
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
tcg_ctx->code_gen_buffer = buf;
return true;
}
#elif defined(_WIN32)
static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
{
void *buf;
if (splitwx > 0) {
error_setg(errp, "jit split-wx not supported");
return false;
}
buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
if (buf == NULL) {
error_setg_win32(errp, GetLastError(),
"allocate %zu bytes for jit buffer", size);
return false;
}
tcg_ctx->code_gen_buffer = buf;
tcg_ctx->code_gen_buffer_size = size;
return true;
}
#else
static bool alloc_code_gen_buffer_anon(size_t size, int prot,
int flags, Error **errp)
{
void *buf;
buf = mmap(NULL, size, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
error_setg_errno(errp, errno,
"allocate %zu bytes for jit buffer", size);
return false;
}
tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__
if (cross_256mb(buf, size)) {
/*
* Try again, with the original still mapped, to avoid re-acquiring
* the same 256mb crossing.
*/
size_t size2;
void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
switch ((int)(buf2 != MAP_FAILED)) {
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
munmap(buf, size);
break;
}
/* Failure. Work with what we had. */
munmap(buf2, size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
buf2 = split_cross_256mb(buf, size);
size2 = tcg_ctx->code_gen_buffer_size;
if (buf == buf2) {
munmap(buf + size2, size - size2);
} else {
munmap(buf, size - size2);
}
size = size2;
break;
}
buf = buf2;
}
#endif
/* Request large pages for the buffer. */
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
tcg_ctx->code_gen_buffer = buf;
return true;
}
#ifndef CONFIG_TCG_INTERPRETER
#ifdef CONFIG_POSIX
#include "qemu/memfd.h"
static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
{
void *buf_rw = NULL, *buf_rx = MAP_FAILED;
int fd = -1;
#ifdef __mips__
/* Find space for the RX mapping, vs the 256MiB regions. */
if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_NORESERVE, errp)) {
return false;
}
/* The size of the mapping may have been adjusted. */
size = tcg_ctx->code_gen_buffer_size;
buf_rx = tcg_ctx->code_gen_buffer;
#endif
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
if (buf_rw == NULL) {
goto fail;
}
#ifdef __mips__
void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
MAP_SHARED | MAP_FIXED, fd, 0);
if (tmp != buf_rx) {
goto fail_rx;
}
#else
buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
if (buf_rx == MAP_FAILED) {
goto fail_rx;
}
#endif
close(fd);
tcg_ctx->code_gen_buffer = buf_rw;
tcg_ctx->code_gen_buffer_size = size;
tcg_splitwx_diff = buf_rx - buf_rw;
/* Request large pages for the buffer and the splitwx. */
qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
return true;
fail_rx:
error_setg_errno(errp, errno, "failed to map shared memory for execute");
fail:
if (buf_rx != MAP_FAILED) {
munmap(buf_rx, size);
}
if (buf_rw) {
munmap(buf_rw, size);
}
if (fd >= 0) {
close(fd);
}
return false;
}
#endif /* CONFIG_POSIX */
#ifdef CONFIG_DARWIN
#include <mach/mach.h>
extern kern_return_t mach_vm_remap(vm_map_t target_task,
mach_vm_address_t *target_address,
mach_vm_size_t size,
mach_vm_offset_t mask,
int flags,
vm_map_t src_task,
mach_vm_address_t src_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance);
static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
{
kern_return_t ret;
mach_vm_address_t buf_rw, buf_rx;
vm_prot_t cur_prot, max_prot;
/* Map the read-write portion via normal anon memory. */
if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
return false;
}
buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
buf_rx = 0;
ret = mach_vm_remap(mach_task_self(),
&buf_rx,
size,
0,
VM_FLAGS_ANYWHERE,
mach_task_self(),
buf_rw,
false,
&cur_prot,
&max_prot,
VM_INHERIT_NONE);
if (ret != KERN_SUCCESS) {
/* TODO: Convert "ret" to a human readable error message. */
error_setg(errp, "vm_remap for jit splitwx failed");
munmap((void *)buf_rw, size);
return false;
}
if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
error_setg_errno(errp, errno, "mprotect for jit splitwx");
munmap((void *)buf_rx, size);
munmap((void *)buf_rw, size);
return false;
}
tcg_splitwx_diff = buf_rx - buf_rw;
return true;
}
#endif /* CONFIG_DARWIN */
#endif /* CONFIG_TCG_INTERPRETER */
static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
{
#ifndef CONFIG_TCG_INTERPRETER
# ifdef CONFIG_DARWIN
return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
# endif
# ifdef CONFIG_POSIX
return alloc_code_gen_buffer_splitwx_memfd(size, errp);
# endif
#endif
error_setg(errp, "jit split-wx not supported");
return false;
}
static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
{
ERRP_GUARD();
int prot, flags;
if (splitwx) {
if (alloc_code_gen_buffer_splitwx(size, errp)) {
return true;
}
/*
* If splitwx force-on (1), fail;
* if splitwx default-on (-1), fall through to splitwx off.
*/
if (splitwx > 0) {
return false;
}
error_free_or_abort(errp);
}
prot = PROT_READ | PROT_WRITE | PROT_EXEC;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#ifdef CONFIG_TCG_INTERPRETER
/* The tcg interpreter does not need execute permission. */
prot = PROT_READ | PROT_WRITE;
#elif defined(CONFIG_DARWIN)
/* Applicable to both iOS and macOS (Apple Silicon). */
if (!splitwx) {
flags |= MAP_JIT;
}
#endif
return alloc_code_gen_buffer_anon(size, prot, flags, errp);
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
static bool tb_cmp(const void *ap, const void *bp)
{
const TranslationBlock *a = ap;
@ -1316,36 +899,13 @@ static bool tb_cmp(const void *ap, const void *bp)
a->page_addr[1] == b->page_addr[1];
}
static void tb_htable_init(void)
void tb_htable_init(void)
{
unsigned int mode = QHT_MODE_AUTO_RESIZE;
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size
(in bytes) allocated to the translation buffer. Zero means default
size. */
void tcg_exec_init(unsigned long tb_size, int splitwx)
{
bool ok;
tcg_allowed = true;
cpu_gen_init();
page_init();
tb_htable_init();
ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
splitwx, &error_fatal);
assert(ok);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
tcg_prologue_init(tcg_ctx);
#endif
}
/* call with @p->lock held */
static inline void invalidate_page_bitmap(PageDesc *p)
{
@ -1659,8 +1219,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb);
qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
tcg_ctx->tb_phys_invalidate_count + 1);
qatomic_set(&tb_ctx.tb_phys_invalidate_count,
tb_ctx.tb_phys_invalidate_count + 1);
}
static void tb_phys_invalidate__locked(TranslationBlock *tb)
@ -1868,14 +1428,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
max_insns = cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
if (cpu->singlestep_enabled || singlestep) {
max_insns = 1;
}
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
tb = tcg_tb_alloc(tcg_ctx);
@ -1913,6 +1468,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(cpu, tb, max_insns);
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
max_insns = tb->icount;
@ -2043,8 +1599,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
int i;
qemu_log(" data: [size=%d]\n", data_size);
for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
qemu_log("0x%08" PRIxPTR ": .quad 0x%" TCG_PRIlx "\n",
(uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
if (sizeof(tcg_target_ulong) == 8) {
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
(uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
} else if (sizeof(tcg_target_ulong) == 4) {
qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
(uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
} else {
qemu_build_not_reached();
}
}
}
qemu_log("\n");
@ -2084,6 +1647,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
return tb;
}
/*
* Insert TB into the corresponding region tree before publishing it
* through QHT. Otherwise rewinding happened in the TB might fail to
* lookup itself using host PC.
*/
tcg_tb_insert(tb);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
@ -2101,10 +1671,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
tb_destroy(tb);
tcg_tb_remove(tb);
return existing_tb;
}
tcg_tb_insert(tb);
return tb;
}
@ -2554,8 +2123,8 @@ void dump_exec_info(void)
qemu_printf("\nStatistics:\n");
qemu_printf("TB flush count %u\n",
qatomic_read(&tb_ctx.tb_flush_count));
qemu_printf("TB invalidate count %zu\n",
tcg_tb_phys_invalidate_count());
qemu_printf("TB invalidate count %u\n",
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
qemu_printf("TLB full flushes %zu\n", flush_full);

View File

@ -9,7 +9,6 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
@ -32,10 +31,21 @@ void translator_loop_temp_check(DisasContextBase *db)
}
}
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{
/* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
return false;
}
/* Check for the dest on the same page as the start of the TB. */
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{
int bp_insn = 0;
uint32_t cflags = tb_cflags(tb);
bool plugin_enabled;
/* Initialize DisasContext */
@ -45,7 +55,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = max_insns;
db->singlestep_enabled = cpu->singlestep_enabled;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -58,8 +68,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
plugin_enabled = plugin_gen_tb_start(cpu, tb,
tb_cflags(db->tb) & CF_MEMI_ONLY);
plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
while (true) {
db->num_insns++;
@ -70,39 +79,17 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
plugin_gen_insn_start(cpu, db);
}
/* Pass breakpoint hits to target for further processing */
if (!db->singlestep_enabled
&& unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == db->pc_next) {
if (ops->breakpoint_check(db, cpu, bp)) {
bp_insn = 1;
break;
}
}
}
/* The breakpoint_check hook may use DISAS_TOO_MANY to indicate
that only one more instruction is to be executed. Otherwise
it should use DISAS_NORETURN when generating an exception,
but may use a DISAS_TARGET_* value for Something Else. */
if (db->is_jmp > DISAS_TOO_MANY) {
break;
}
}
/* Disassemble one instruction. The translate_insn hook should
update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of
the next instruction. */
if (db->num_insns == db->max_insns
&& (tb_cflags(db->tb) & CF_LAST_IO)) {
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */
gen_io_start();
ops->translate_insn(db, cpu);
} else {
/* we should only see CF_MEMI_ONLY for io_recompile */
tcg_debug_assert(!(tb_cflags(db->tb) & CF_MEMI_ONLY));
tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
ops->translate_insn(db, cpu);
}
@ -129,7 +116,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
/* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu);
gen_tb_end(db->tb, db->num_insns - bp_insn);
gen_tb_end(db->tb, db->num_insns);
if (plugin_enabled) {
plugin_gen_tb_end(cpu);

View File

@ -1,7 +1,6 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
#include "sysemu/replay.h"
#include "sysemu/sysemu.h"
bool enable_cpu_pm = false;

View File

@ -17,7 +17,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
@ -255,28 +254,35 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
#if defined(__NetBSD__)
#include <ucontext.h>
#include <machine/trap.h>
#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#elif defined(__FreeBSD__) || defined(__DragonFly__)
#include <ucontext.h>
#include <machine/trap.h>
#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#elif defined(__OpenBSD__)
#include <machine/trap.h>
#define EIP_sig(context) ((context)->sc_eip)
#define TRAP_sig(context) ((context)->sc_trapno)
#define ERROR_sig(context) ((context)->sc_err)
#define MASK_sig(context) ((context)->sc_mask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#else
#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP 0xe
#endif
int cpu_signal_handler(int host_signum, void *pinfo,
@ -302,34 +308,42 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = EIP_sig(uc);
trapno = TRAP_sig(uc);
return handle_cpu_signal(pc, info,
trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
trapno == PAGE_FAULT_TRAP ?
(ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
#elif defined(__x86_64__)
#ifdef __NetBSD__
#include <machine/trap.h>
#define PC_sig(context) _UC_MACHINE_PC(context)
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#elif defined(__OpenBSD__)
#include <machine/trap.h>
#define PC_sig(context) ((context)->sc_rip)
#define TRAP_sig(context) ((context)->sc_trapno)
#define ERROR_sig(context) ((context)->sc_err)
#define MASK_sig(context) ((context)->sc_mask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#elif defined(__FreeBSD__) || defined(__DragonFly__)
#include <ucontext.h>
#include <machine/trap.h>
#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP T_PAGEFLT
#else
#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
#define MASK_sig(context) ((context)->uc_sigmask)
#define PAGE_FAULT_TRAP 0xe
#endif
int cpu_signal_handler(int host_signum, void *pinfo,
@ -347,7 +361,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = PC_sig(uc);
return handle_cpu_signal(pc, info,
TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
TRAP_sig(uc) == PAGE_FAULT_TRAP ?
(ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
@ -1206,9 +1221,14 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
/* Do not allow unaligned operations to proceed. Return the host address. */
/*
* Do not allow unaligned operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
int size, uintptr_t retaddr)
TCGMemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
@ -1219,17 +1239,18 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret;
}
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#include "atomic_common.c.inc"
/*
* First set of functions passes in OI and RETADDR.
* This makes them callable from other helpers.
*/
#define ATOMIC_NAME(X) \
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_MMU_IDX MMU_USER_IDX
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
#include "atomic_common.c.inc"
#define DATA_SIZE 1
#include "atomic_template.h"
@ -1244,20 +1265,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif

View File

@ -34,6 +34,8 @@
#define AUDIO_CAP "alsa"
#include "audio_int.h"
#define DEBUG_ALSA 0
struct pollhlp {
snd_pcm_t *handle;
struct pollfd *pfds;
@ -587,16 +589,12 @@ static int alsa_open(bool in, struct alsa_params_req *req,
*handlep = handle;
if (obtfmt != req->fmt ||
obt->nchannels != req->nchannels ||
obt->freq != req->freq) {
if (DEBUG_ALSA || obtfmt != req->fmt ||
obt->nchannels != req->nchannels || obt->freq != req->freq) {
dolog ("Audio parameters for %s\n", typ);
alsa_dump_info(req, obt, obtfmt, apdo);
}
#ifdef DEBUG
alsa_dump_info(req, obt, obtfmt, apdo);
#endif
return 0;
err:

View File

@ -32,6 +32,7 @@
#include "qapi/qapi-visit-audio.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu-common.h"
#include "sysemu/replay.h"
#include "sysemu/runstate.h"
#include "ui/qemu-spice.h"
@ -704,7 +705,7 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
if (live == hwsamples) {
#ifdef DEBUG_OUT
dolog ("%s is full %d\n", sw->name, live);
dolog ("%s is full %zu\n", sw->name, live);
#endif
return 0;
}
@ -994,7 +995,7 @@ static size_t audio_get_avail (SWVoiceIn *sw)
}
ldebug (
"%s: get_avail live %d ret %" PRId64 "\n",
"%s: get_avail live %zu ret %" PRId64 "\n",
SW_NAME (sw),
live, (((int64_t) live << 32) / sw->ratio) * sw->info.bytes_per_frame
);
@ -1021,7 +1022,7 @@ static size_t audio_get_free(SWVoiceOut *sw)
dead = sw->hw->mix_buf->size - live;
#ifdef DEBUG_OUT
dolog ("%s: get_free live %d dead %d ret %" PRId64 "\n",
dolog ("%s: get_free live %zu dead %zu ret %" PRId64 "\n",
SW_NAME (sw),
live, dead, (((int64_t) dead << 32) / sw->ratio) *
sw->info.bytes_per_frame);
@ -1621,10 +1622,20 @@ void audio_cleanup(void)
}
}
static bool vmstate_audio_needed(void *opaque)
{
/*
* Never needed, this vmstate only exists in case
* an old qemu sends it to us.
*/
return false;
}
static const VMStateDescription vmstate_audio = {
.name = "audio",
.version_id = 1,
.minimum_version_id = 1,
.needed = vmstate_audio_needed,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
}
@ -2172,6 +2183,14 @@ const char *audio_get_id(QEMUSoundCard *card)
}
}
const char *audio_application_name(void)
{
const char *vm_name;
vm_name = qemu_get_vm_name();
return vm_name ? vm_name : "qemu";
}
void audio_rate_start(RateCtl *rate)
{
memset(rate, 0, sizeof(RateCtl));

View File

@ -243,6 +243,8 @@ void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run(AudioState *s, const char *msg);
const char *audio_application_name(void);
typedef struct RateCtl {
int64_t start_ticks;
int64_t bytes_sent;

View File

@ -26,6 +26,7 @@
#include <CoreAudio/CoreAudio.h>
#include <pthread.h> /* pthread_X */
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "audio.h"
@ -34,12 +35,11 @@
typedef struct coreaudioVoiceOut {
HWVoiceOut hw;
pthread_mutex_t mutex;
pthread_mutex_t buf_mutex;
AudioDeviceID outputDeviceID;
int frameSizeSetting;
uint32_t bufferCount;
UInt32 audioDevicePropertyBufferFrameSize;
AudioStreamBasicDescription outputStreamBasicDescription;
AudioDeviceIOProcID ioprocid;
bool enabled;
} coreaudioVoiceOut;
@ -114,24 +114,6 @@ static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize)
framesize);
}
static OSStatus coreaudio_get_streamformat(AudioDeviceID id,
AudioStreamBasicDescription *d)
{
UInt32 size = sizeof(*d);
AudioObjectPropertyAddress addr = {
kAudioDevicePropertyStreamFormat,
kAudioDevicePropertyScopeOutput,
kAudioObjectPropertyElementMaster
};
return AudioObjectGetPropertyData(id,
&addr,
0,
NULL,
&size,
d);
}
static OSStatus coreaudio_set_streamformat(AudioDeviceID id,
AudioStreamBasicDescription *d)
{
@ -260,11 +242,11 @@ static void GCC_FMT_ATTR (3, 4) coreaudio_logerr2 (
#define coreaudio_playback_logerr(status, ...) \
coreaudio_logerr2(status, "playback", __VA_ARGS__)
static int coreaudio_lock (coreaudioVoiceOut *core, const char *fn_name)
static int coreaudio_buf_lock (coreaudioVoiceOut *core, const char *fn_name)
{
int err;
err = pthread_mutex_lock (&core->mutex);
err = pthread_mutex_lock (&core->buf_mutex);
if (err) {
dolog ("Could not lock voice for %s\nReason: %s\n",
fn_name, strerror (err));
@ -273,11 +255,11 @@ static int coreaudio_lock (coreaudioVoiceOut *core, const char *fn_name)
return 0;
}
static int coreaudio_unlock (coreaudioVoiceOut *core, const char *fn_name)
static int coreaudio_buf_unlock (coreaudioVoiceOut *core, const char *fn_name)
{
int err;
err = pthread_mutex_unlock (&core->mutex);
err = pthread_mutex_unlock (&core->buf_mutex);
if (err) {
dolog ("Could not unlock voice for %s\nReason: %s\n",
fn_name, strerror (err));
@ -292,13 +274,13 @@ static int coreaudio_unlock (coreaudioVoiceOut *core, const char *fn_name)
coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; \
ret_type ret; \
\
if (coreaudio_lock(core, "coreaudio_" #name)) { \
if (coreaudio_buf_lock(core, "coreaudio_" #name)) { \
return 0; \
} \
\
ret = glue(audio_generic_, name)args; \
\
coreaudio_unlock(core, "coreaudio_" #name); \
coreaudio_buf_unlock(core, "coreaudio_" #name); \
return ret; \
}
COREAUDIO_WRAPPER_FUNC(get_buffer_out, void *, (HWVoiceOut *hw, size_t *size),
@ -310,7 +292,10 @@ COREAUDIO_WRAPPER_FUNC(write, size_t, (HWVoiceOut *hw, void *buf, size_t size),
(hw, buf, size))
#undef COREAUDIO_WRAPPER_FUNC
/* callback to feed audiooutput buffer */
/*
* callback to feed audiooutput buffer. called without iothread lock.
* allowed to lock "buf_mutex", but disallowed to have any other locks.
*/
static OSStatus audioDeviceIOProc(
AudioDeviceID inDevice,
const AudioTimeStamp *inNow,
@ -326,13 +311,13 @@ static OSStatus audioDeviceIOProc(
coreaudioVoiceOut *core = (coreaudioVoiceOut *) hwptr;
size_t len;
if (coreaudio_lock (core, "audioDeviceIOProc")) {
if (coreaudio_buf_lock (core, "audioDeviceIOProc")) {
inInputTime = 0;
return 0;
}
if (inDevice != core->outputDeviceID) {
coreaudio_unlock (core, "audioDeviceIOProc(old device)");
coreaudio_buf_unlock (core, "audioDeviceIOProc(old device)");
return 0;
}
@ -342,7 +327,7 @@ static OSStatus audioDeviceIOProc(
/* if there are not enough samples, set signal and return */
if (pending_frames < frameCount) {
inInputTime = 0;
coreaudio_unlock (core, "audioDeviceIOProc(empty)");
coreaudio_buf_unlock (core, "audioDeviceIOProc(empty)");
return 0;
}
@ -364,7 +349,7 @@ static OSStatus audioDeviceIOProc(
out += write_len;
}
coreaudio_unlock (core, "audioDeviceIOProc");
coreaudio_buf_unlock (core, "audioDeviceIOProc");
return 0;
}
@ -373,6 +358,17 @@ static OSStatus init_out_device(coreaudioVoiceOut *core)
OSStatus status;
AudioValueRange frameRange;
AudioStreamBasicDescription streamBasicDescription = {
.mBitsPerChannel = core->hw.info.bits,
.mBytesPerFrame = core->hw.info.bytes_per_frame,
.mBytesPerPacket = core->hw.info.bytes_per_frame,
.mChannelsPerFrame = core->hw.info.nchannels,
.mFormatFlags = kLinearPCMFormatFlagIsFloat,
.mFormatID = kAudioFormatLinearPCM,
.mFramesPerPacket = 1,
.mSampleRate = core->hw.info.freq
};
status = coreaudio_get_voice(&core->outputDeviceID);
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr (status,
@ -432,34 +428,30 @@ static OSStatus init_out_device(coreaudioVoiceOut *core)
}
core->hw.samples = core->bufferCount * core->audioDevicePropertyBufferFrameSize;
/* get StreamFormat */
status = coreaudio_get_streamformat(core->outputDeviceID,
&core->outputStreamBasicDescription);
if (status == kAudioHardwareBadObjectError) {
return 0;
}
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr (status,
"Could not get Device Stream properties\n");
core->outputDeviceID = kAudioDeviceUnknown;
return status;
}
/* set Samplerate */
status = coreaudio_set_streamformat(core->outputDeviceID,
&core->outputStreamBasicDescription);
&streamBasicDescription);
if (status == kAudioHardwareBadObjectError) {
return 0;
}
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr (status,
"Could not set samplerate %lf\n",
core->outputStreamBasicDescription.mSampleRate);
streamBasicDescription.mSampleRate);
core->outputDeviceID = kAudioDeviceUnknown;
return status;
}
/* set Callback */
/*
* set Callback.
*
* On macOS 11.3.1, Core Audio calls AudioDeviceIOProc after calling an
* internal function named HALB_Mutex::Lock(), which locks a mutex in
* HALB_IOThread::Entry(void*). HALB_Mutex::Lock() is also called in
* AudioObjectGetPropertyData, which is called by coreaudio driver.
* Therefore, the specified callback must be designed to avoid a deadlock
* with the callers of AudioObjectGetPropertyData.
*/
core->ioprocid = NULL;
status = AudioDeviceCreateIOProcID(core->outputDeviceID,
audioDeviceIOProc,
@ -542,6 +534,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
}
}
/* called without iothread lock. */
static OSStatus handle_voice_change(
AudioObjectID in_object_id,
UInt32 in_number_addresses,
@ -551,9 +544,7 @@ static OSStatus handle_voice_change(
OSStatus status;
coreaudioVoiceOut *core = in_client_data;
if (coreaudio_lock(core, __func__)) {
abort();
}
qemu_mutex_lock_iothread();
if (core->outputDeviceID) {
fini_out_device(core);
@ -564,7 +555,7 @@ static OSStatus handle_voice_change(
update_device_playback_state(core);
}
coreaudio_unlock (core, __func__);
qemu_mutex_unlock_iothread();
return status;
}
@ -579,14 +570,10 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
struct audsettings obt_as;
/* create mutex */
err = pthread_mutex_init(&core->mutex, NULL);
err = pthread_mutex_init(&core->buf_mutex, NULL);
if (err) {
dolog("Could not create mutex\nReason: %s\n", strerror (err));
goto mutex_error;
}
if (coreaudio_lock(core, __func__)) {
goto lock_error;
return -1;
}
obt_as = *as;
@ -598,7 +585,6 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
qapi_AudiodevCoreaudioPerDirectionOptions_base(cpdo), as, 11610);
core->bufferCount = cpdo->has_buffer_count ? cpdo->buffer_count : 4;
core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq;
status = AudioObjectAddPropertyListener(kAudioObjectSystemObject,
&voice_addr, handle_voice_change,
@ -606,37 +592,21 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr (status,
"Could not listen to voice property change\n");
goto listener_error;
return -1;
}
if (init_out_device(core)) {
goto device_error;
status = AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
&voice_addr,
handle_voice_change,
core);
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr(status,
"Could not remove voice property change listener\n");
}
}
coreaudio_unlock(core, __func__);
return 0;
device_error:
status = AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
&voice_addr,
handle_voice_change,
core);
if (status != kAudioHardwareNoError) {
coreaudio_playback_logerr(status,
"Could not remove voice property change listener\n");
}
listener_error:
coreaudio_unlock(core, __func__);
lock_error:
err = pthread_mutex_destroy(&core->mutex);
if (err) {
dolog("Could not destroy mutex\nReason: %s\n", strerror (err));
}
mutex_error:
return -1;
}
static void coreaudio_fini_out (HWVoiceOut *hw)
@ -645,10 +615,6 @@ static void coreaudio_fini_out (HWVoiceOut *hw)
int err;
coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
if (coreaudio_lock(core, __func__)) {
abort();
}
status = AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
&voice_addr,
handle_voice_change,
@ -659,10 +625,8 @@ static void coreaudio_fini_out (HWVoiceOut *hw)
fini_out_device(core);
coreaudio_unlock(core, __func__);
/* destroy mutex */
err = pthread_mutex_destroy(&core->mutex);
err = pthread_mutex_destroy(&core->buf_mutex);
if (err) {
dolog("Could not destroy mutex\nReason: %s\n", strerror (err));
}
@ -672,14 +636,8 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable)
{
coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
if (coreaudio_lock(core, __func__)) {
abort();
}
core->enabled = enable;
update_device_playback_state(core);
coreaudio_unlock(core, __func__);
}
static void *coreaudio_audio_init(Audiodev *dev)

View File

@ -26,7 +26,6 @@
#include "qemu/module.h"
#include "qemu/atomic.h"
#include "qemu/main-loop.h"
#include "qemu-common.h"
#include "audio.h"
#define AUDIO_CAP "jack"
@ -412,7 +411,7 @@ static int qjack_client_init(QJackClient *c)
snprintf(client_name, sizeof(client_name), "%s-%s",
c->out ? "out" : "in",
c->opt->client_name ? c->opt->client_name : qemu_get_vm_name());
c->opt->client_name ? c->opt->client_name : audio_application_name());
if (c->opt->exact_name) {
options |= JackUseExactName;

View File

@ -2,7 +2,6 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "qemu-common.h"
#include "audio.h"
#include "qapi/opts-visitor.h"
@ -463,10 +462,7 @@ static pa_stream *qpa_simple_new (
pa_stream_set_state_callback(stream, stream_state_cb, c);
flags =
PA_STREAM_INTERPOLATE_TIMING
| PA_STREAM_AUTO_TIMING_UPDATE
| PA_STREAM_EARLY_REQUESTS;
flags = PA_STREAM_EARLY_REQUESTS;
if (dev) {
/* don't move the stream if the user specified a sink/source */
@ -756,7 +752,6 @@ static int qpa_validate_per_direction_opts(Audiodev *dev,
/* common */
static void *qpa_conn_init(const char *server)
{
const char *vm_name;
PAConnection *c = g_malloc0(sizeof(PAConnection));
QTAILQ_INSERT_TAIL(&pa_conns, c, list);
@ -765,9 +760,8 @@ static void *qpa_conn_init(const char *server)
goto fail;
}
vm_name = qemu_get_vm_name();
c->context = pa_context_new(pa_threaded_mainloop_get_api(c->mainloop),
vm_name ? vm_name : "qemu");
audio_application_name());
if (!c->context) {
goto fail;
}

View File

@ -317,3 +317,5 @@ static void register_audio_spice(void)
audio_driver_register(&spice_audio_driver);
}
type_init(register_audio_spice);
module_dep("ui-spice-core");

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# alsaaudio.c
alsa_revents(int revents) "revents = %d"

View File

@ -6,4 +6,4 @@ authz_ss.add(files(
'simple.c',
))
authz_ss.add(when: ['CONFIG_AUTH_PAM', pam], if_true: files('pamacct.c'))
authz_ss.add(when: pam, if_true: files('pamacct.c'))

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# base.c
qauthz_is_allowed(void *authz, const char *identity, bool allowed) "AuthZ %p check identity=%s allowed=%d"

View File

@ -52,6 +52,7 @@ cryptodev_vhost_init(
{
int r;
CryptoDevBackendVhost *crypto;
Error *local_err = NULL;
crypto = g_new(CryptoDevBackendVhost, 1);
crypto->dev.max_queues = 1;
@ -66,8 +67,10 @@ cryptodev_vhost_init(
/* vhost-user needs vq_index to initiate a specific queue pair */
crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs;
r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0);
r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0,
&local_err);
if (r < 0) {
error_report_err(local_err);
goto fail;
}

View File

@ -15,7 +15,6 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "sysemu/hostmem.h"
#include "sysemu/sysemu.h"
#include "qom/object_interfaces.h"
#include "qom/object.h"
@ -40,6 +39,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
object_get_typename(OBJECT(backend)));
#else
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend);
uint32_t ram_flags;
gchar *name;
if (!backend->size) {
@ -52,11 +52,11 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
}
name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend),
name,
backend->size, fb->align,
(backend->share ? RAM_SHARED : 0) |
(fb->is_pmem ? RAM_PMEM : 0),
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
backend->size, fb->align, ram_flags,
fb->mem_path, fb->readonly, errp);
g_free(name);
#endif

View File

@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "sysemu/hostmem.h"
#include "sysemu/sysemu.h"
#include "qom/object_interfaces.h"
#include "qemu/memfd.h"
#include "qemu/module.h"
@ -36,6 +35,7 @@ static void
memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend);
uint32_t ram_flags;
char *name;
int fd;
@ -53,9 +53,10 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
}
name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend),
name, backend->size,
backend->share, fd, 0, errp);
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, fd, 0, errp);
g_free(name);
}

View File

@ -19,6 +19,7 @@
static void
ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
uint32_t ram_flags;
char *name;
if (!backend->size) {
@ -27,8 +28,10 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
}
name = host_memory_backend_get_name(backend);
memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), name,
backend->size, backend->share, errp);
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, errp);
g_free(name);
}

View File

@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "sysemu/hostmem.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
@ -217,6 +216,11 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value,
Error *local_err = NULL;
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
if (!backend->reserve && value) {
error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible");
return;
}
if (!host_memory_backend_mr_inited(backend)) {
backend->prealloc = value;
return;
@ -268,6 +272,7 @@ static void host_memory_backend_init(Object *obj)
/* TODO: convert access to globals to compat properties */
backend->merge = machine_mem_merge(machine);
backend->dump = machine_dump_guest_core(machine);
backend->reserve = true;
backend->prealloc_threads = 1;
}
@ -426,6 +431,30 @@ static void host_memory_backend_set_share(Object *o, bool value, Error **errp)
backend->share = value;
}
#ifdef CONFIG_LINUX
static bool host_memory_backend_get_reserve(Object *o, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
return backend->reserve;
}
static void host_memory_backend_set_reserve(Object *o, bool value, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property value");
return;
}
if (backend->prealloc && !value) {
error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible");
return;
}
backend->reserve = value;
}
#endif /* CONFIG_LINUX */
static bool
host_memory_backend_get_use_canonical_path(Object *obj, Error **errp)
{
@ -494,6 +523,12 @@ host_memory_backend_class_init(ObjectClass *oc, void *data)
host_memory_backend_get_share, host_memory_backend_set_share);
object_class_property_set_description(oc, "share",
"Mark the memory as private to QEMU or shared");
#ifdef CONFIG_LINUX
object_class_property_add_bool(oc, "reserve",
host_memory_backend_get_reserve, host_memory_backend_set_reserve);
object_class_property_set_description(oc, "reserve",
"Reserve swap space (or huge pages) if applicable");
#endif /* CONFIG_LINUX */
/*
* Do not delete/rename option. This option must be considered stable
* (as if it didn't have the 'x-' prefix including deprecation period) as

View File

@ -30,6 +30,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/sockets.h"
#include "qemu/lockable.h"
#include "io/channel-socket.h"
#include "sysemu/tpm_backend.h"
#include "sysemu/tpm_util.h"
@ -124,31 +125,26 @@ static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg,
uint32_t cmd_no = cpu_to_be32(cmd);
ssize_t n = sizeof(uint32_t) + msg_len_in;
uint8_t *buf = NULL;
int ret = -1;
qemu_mutex_lock(&tpm->mutex);
WITH_QEMU_LOCK_GUARD(&tpm->mutex) {
buf = g_alloca(n);
memcpy(buf, &cmd_no, sizeof(cmd_no));
memcpy(buf + sizeof(cmd_no), msg, msg_len_in);
buf = g_alloca(n);
memcpy(buf, &cmd_no, sizeof(cmd_no));
memcpy(buf + sizeof(cmd_no), msg, msg_len_in);
n = qemu_chr_fe_write_all(dev, buf, n);
if (n <= 0) {
goto end;
}
if (msg_len_out != 0) {
n = qemu_chr_fe_read_all(dev, msg, msg_len_out);
n = qemu_chr_fe_write_all(dev, buf, n);
if (n <= 0) {
goto end;
return -1;
}
if (msg_len_out != 0) {
n = qemu_chr_fe_read_all(dev, msg, msg_len_out);
if (n <= 0) {
return -1;
}
}
}
ret = 0;
end:
qemu_mutex_unlock(&tpm->mutex);
return ret;
return 0;
}
static int tpm_emulator_unix_tx_bufs(TPMEmulator *tpm_emu,

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# tpm_passthrough.c
tpm_passthrough_handle_request(void *cmd) "processing command %p"

View File

@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# See docs/devel/tracing.rst for syntax documentation.
# dbus-vmstate.c
dbus_vmstate_pre_save(void)

View File

@ -48,9 +48,9 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
b->dev.nvqs = nvqs;
b->dev.vqs = g_new0(struct vhost_virtqueue, nvqs);
ret = vhost_dev_init(&b->dev, &b->vhost_user, VHOST_BACKEND_TYPE_USER, 0);
ret = vhost_dev_init(&b->dev, &b->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
errp);
if (ret < 0) {
error_setg_errno(errp, -ret, "vhost initialization failed");
return -1;
}

1739
block.c

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,6 @@
typedef struct BDRVBackupTopState {
BlockCopyState *bcs;
BdrvChild *target;
bool active;
int64_t cluster_size;
} BDRVBackupTopState;
@ -45,12 +44,6 @@ static coroutine_fn int backup_top_co_preadv(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
BDRVBackupTopState *s = bs->opaque;
if (!s->active) {
return -EIO;
}
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
@ -60,10 +53,6 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
BDRVBackupTopState *s = bs->opaque;
uint64_t off, end;
if (!s->active) {
return -EIO;
}
if (flags & BDRV_REQ_WRITE_UNCHANGED) {
return 0;
}
@ -137,21 +126,6 @@ static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
BDRVBackupTopState *s = bs->opaque;
if (!s->active) {
/*
* The filter node may be in process of bdrv_append(), which firstly do
* bdrv_set_backing_hd() and then bdrv_replace_node(). This means that
* we can't unshare BLK_PERM_WRITE during bdrv_append() operation. So,
* let's require nothing during bdrv_append() and refresh permissions
* after it (see bdrv_backup_top_append()).
*/
*nperm = 0;
*nshared = BLK_PERM_ALL;
return;
}
if (!(role & BDRV_CHILD_FILTERED)) {
/*
* Target child
@ -234,7 +208,6 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
bdrv_drained_begin(source);
bdrv_ref(top);
ret = bdrv_append(top, source, errp);
if (ret < 0) {
error_prepend(errp, "Cannot append backup-top filter: ");
@ -242,17 +215,6 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
}
appended = true;
/*
* bdrv_append() finished successfully, now we can require permissions
* we want.
*/
state->active = true;
ret = bdrv_child_refresh_perms(top, top->backing, errp);
if (ret < 0) {
error_prepend(errp, "Cannot set permissions for backup-top filter: ");
goto fail;
}
state->cluster_size = cluster_size;
state->bcs = block_copy_state_new(top->backing, state->target,
cluster_size, perf->use_copy_range,
@ -269,7 +231,6 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
fail:
if (appended) {
state->active = false;
bdrv_backup_top_drop(top);
} else {
bdrv_unref(top);
@ -284,16 +245,9 @@ void bdrv_backup_top_drop(BlockDriverState *bs)
{
BDRVBackupTopState *s = bs->opaque;
bdrv_drained_begin(bs);
bdrv_drop_filter(bs, &error_abort);
block_copy_state_free(s->bcs);
s->active = false;
bdrv_child_refresh_perms(bs, bs->backing, &error_abort);
bdrv_replace_node(bs, bs->backing->bs, &error_abort);
bdrv_set_backing_hd(bs, NULL, &error_abort);
bdrv_drained_end(bs);
bdrv_unref(bs);
}

View File

@ -331,7 +331,7 @@ static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed)
}
}
static void backup_cancel(Job *job)
static void backup_cancel(Job *job, bool force)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);

View File

@ -38,25 +38,27 @@
#include "qapi/qobject-input-visitor.h"
#include "sysemu/qtest.h"
/* All APIs are thread-safe */
typedef struct BDRVBlkdebugState {
int state;
int new_state;
/* IN: initialized in blkdebug_open() and never changed */
uint64_t align;
uint64_t max_transfer;
uint64_t opt_write_zero;
uint64_t max_write_zero;
uint64_t opt_discard;
uint64_t max_discard;
char *config_file; /* For blkdebug_refresh_filename() */
/* initialized in blkdebug_parse_perms() */
uint64_t take_child_perms;
uint64_t unshare_child_perms;
/* For blkdebug_refresh_filename() */
char *config_file;
/* State. Protected by lock */
int state;
QLIST_HEAD(, BlkdebugRule) rules[BLKDBG__MAX];
QSIMPLEQ_HEAD(, BlkdebugRule) active_rules;
QLIST_HEAD(, BlkdebugSuspendedReq) suspended_reqs;
QemuMutex lock;
} BDRVBlkdebugState;
typedef struct BlkdebugAIOCB {
@ -65,8 +67,11 @@ typedef struct BlkdebugAIOCB {
} BlkdebugAIOCB;
typedef struct BlkdebugSuspendedReq {
/* IN: initialized in suspend_request() */
Coroutine *co;
char *tag;
/* List entry protected BDRVBlkdebugState's lock */
QLIST_ENTRY(BlkdebugSuspendedReq) next;
} BlkdebugSuspendedReq;
@ -74,9 +79,11 @@ enum {
ACTION_INJECT_ERROR,
ACTION_SET_STATE,
ACTION_SUSPEND,
ACTION__MAX,
};
typedef struct BlkdebugRule {
/* IN: initialized in add_rule() or blkdebug_debug_breakpoint() */
BlkdebugEvent event;
int action;
int state;
@ -95,6 +102,8 @@ typedef struct BlkdebugRule {
char *tag;
} suspend;
} options;
/* List entries protected BDRVBlkdebugState's lock */
QLIST_ENTRY(BlkdebugRule) next;
QSIMPLEQ_ENTRY(BlkdebugRule) active_next;
} BlkdebugRule;
@ -244,11 +253,14 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
};
/* Add the rule */
qemu_mutex_lock(&s->lock);
QLIST_INSERT_HEAD(&s->rules[event], rule, next);
qemu_mutex_unlock(&s->lock);
return 0;
}
/* Called with lock held or from .bdrv_close */
static void remove_rule(BlkdebugRule *rule)
{
switch (rule->action) {
@ -467,6 +479,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
uint64_t align;
qemu_mutex_init(&s->lock);
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
ret = -EINVAL;
@ -567,6 +580,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
ret = 0;
out:
if (ret < 0) {
qemu_mutex_destroy(&s->lock);
g_free(s->config_file);
}
qemu_opts_del(opts);
@ -581,6 +595,7 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
int error;
bool immediately;
qemu_mutex_lock(&s->lock);
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
uint64_t inject_offset = rule->options.inject.offset;
@ -594,6 +609,7 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
if (!rule || !rule->options.inject.error) {
qemu_mutex_unlock(&s->lock);
return 0;
}
@ -605,6 +621,7 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
remove_rule(rule);
}
qemu_mutex_unlock(&s->lock);
if (!immediately) {
aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self());
qemu_coroutine_yield();
@ -770,78 +787,80 @@ static void blkdebug_close(BlockDriverState *bs)
}
g_free(s->config_file);
qemu_mutex_destroy(&s->lock);
}
/* Called with lock held. */
static void suspend_request(BlockDriverState *bs, BlkdebugRule *rule)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq r;
BlkdebugSuspendedReq *r;
r = (BlkdebugSuspendedReq) {
.co = qemu_coroutine_self(),
.tag = g_strdup(rule->options.suspend.tag),
};
r = g_new(BlkdebugSuspendedReq, 1);
r->co = qemu_coroutine_self();
r->tag = g_strdup(rule->options.suspend.tag);
remove_rule(rule);
QLIST_INSERT_HEAD(&s->suspended_reqs, &r, next);
QLIST_INSERT_HEAD(&s->suspended_reqs, r, next);
if (!qtest_enabled()) {
printf("blkdebug: Suspended request '%s'\n", r.tag);
printf("blkdebug: Suspended request '%s'\n", r->tag);
}
qemu_coroutine_yield();
if (!qtest_enabled()) {
printf("blkdebug: Resuming request '%s'\n", r.tag);
}
QLIST_REMOVE(&r, next);
g_free(r.tag);
}
static bool process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
bool injected)
/* Called with lock held. */
static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
int *action_count, int *new_state)
{
BDRVBlkdebugState *s = bs->opaque;
/* Only process rules for the current state */
if (rule->state && rule->state != s->state) {
return injected;
return;
}
/* Take the action */
action_count[rule->action]++;
switch (rule->action) {
case ACTION_INJECT_ERROR:
if (!injected) {
if (action_count[ACTION_INJECT_ERROR] == 1) {
QSIMPLEQ_INIT(&s->active_rules);
injected = true;
}
QSIMPLEQ_INSERT_HEAD(&s->active_rules, rule, active_next);
break;
case ACTION_SET_STATE:
s->new_state = rule->options.set_state.new_state;
*new_state = rule->options.set_state.new_state;
break;
case ACTION_SUSPEND:
suspend_request(bs, rule);
break;
}
return injected;
}
static void blkdebug_debug_event(BlockDriverState *bs, BlkdebugEvent event)
{
BDRVBlkdebugState *s = bs->opaque;
struct BlkdebugRule *rule, *next;
bool injected;
int new_state;
int actions_count[ACTION__MAX] = { 0 };
assert((int)event >= 0 && event < BLKDBG__MAX);
injected = false;
s->new_state = s->state;
QLIST_FOREACH_SAFE(rule, &s->rules[event], next, next) {
injected = process_rule(bs, rule, injected);
WITH_QEMU_LOCK_GUARD(&s->lock) {
new_state = s->state;
QLIST_FOREACH_SAFE(rule, &s->rules[event], next, next) {
process_rule(bs, rule, actions_count, &new_state);
}
s->state = new_state;
}
while (actions_count[ACTION_SUSPEND] > 0) {
qemu_coroutine_yield();
actions_count[ACTION_SUSPEND]--;
}
s->state = s->new_state;
}
static int blkdebug_debug_breakpoint(BlockDriverState *bs, const char *event,
@ -864,33 +883,64 @@ static int blkdebug_debug_breakpoint(BlockDriverState *bs, const char *event,
.options.suspend.tag = g_strdup(tag),
};
qemu_mutex_lock(&s->lock);
QLIST_INSERT_HEAD(&s->rules[blkdebug_event], rule, next);
qemu_mutex_unlock(&s->lock);
return 0;
}
static int blkdebug_debug_resume(BlockDriverState *bs, const char *tag)
/* Called with lock held. May temporarily release lock. */
static int resume_req_by_tag(BDRVBlkdebugState *s, const char *tag, bool all)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq *r, *next;
BlkdebugSuspendedReq *r;
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, next) {
retry:
/*
* No need for _SAFE, since a different coroutine can remove another node
* (not the current one) in this list, and when the current one is removed
* the iteration starts back from beginning anyways.
*/
QLIST_FOREACH(r, &s->suspended_reqs, next) {
if (!strcmp(r->tag, tag)) {
qemu_coroutine_enter(r->co);
Coroutine *co = r->co;
if (!qtest_enabled()) {
printf("blkdebug: Resuming request '%s'\n", r->tag);
}
QLIST_REMOVE(r, next);
g_free(r->tag);
g_free(r);
qemu_mutex_unlock(&s->lock);
qemu_coroutine_enter(co);
qemu_mutex_lock(&s->lock);
if (all) {
goto retry;
}
return 0;
}
}
return -ENOENT;
}
static int blkdebug_debug_resume(BlockDriverState *bs, const char *tag)
{
BDRVBlkdebugState *s = bs->opaque;
QEMU_LOCK_GUARD(&s->lock);
return resume_req_by_tag(s, tag, false);
}
static int blkdebug_debug_remove_breakpoint(BlockDriverState *bs,
const char *tag)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq *r, *r_next;
BlkdebugRule *rule, *next;
int i, ret = -ENOENT;
QEMU_LOCK_GUARD(&s->lock);
for (i = 0; i < BLKDBG__MAX; i++) {
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
if (rule->action == ACTION_SUSPEND &&
@ -900,11 +950,8 @@ static int blkdebug_debug_remove_breakpoint(BlockDriverState *bs,
}
}
}
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, r_next) {
if (!strcmp(r->tag, tag)) {
qemu_coroutine_enter(r->co);
ret = 0;
}
if (resume_req_by_tag(s, tag, true) == 0) {
ret = 0;
}
return ret;
}
@ -914,6 +961,7 @@ static bool blkdebug_debug_is_suspended(BlockDriverState *bs, const char *tag)
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq *r;
QEMU_LOCK_GUARD(&s->lock);
QLIST_FOREACH(r, &s->suspended_reqs, next) {
if (!strcmp(r->tag, tag)) {
return true;

View File

@ -18,7 +18,6 @@
#include "hw/qdev-core.h"
#include "sysemu/blockdev.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "sysemu/replay.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
@ -142,19 +141,18 @@ static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
static char *blk_root_get_parent_desc(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
char *dev_id;
g_autofree char *dev_id = NULL;
if (blk->name) {
return g_strdup(blk->name);
return g_strdup_printf("block device '%s'", blk->name);
}
dev_id = blk_get_attached_dev_id(blk);
if (*dev_id) {
return dev_id;
return g_strdup_printf("block device '%s'", dev_id);
} else {
/* TODO Callback into the BB owner for something more detailed */
g_free(dev_id);
return g_strdup("a block device");
return g_strdup("an unnamed block device");
}
}
@ -298,6 +296,13 @@ static void blk_root_detach(BdrvChild *child)
}
}
static AioContext *blk_root_get_parent_aio_context(BdrvChild *c)
{
BlockBackend *blk = c->opaque;
return blk_get_aio_context(blk);
}
static const BdrvChildClass child_root = {
.inherit_options = blk_root_inherit_options,
@ -318,6 +323,8 @@ static const BdrvChildClass child_root = {
.can_set_aio_ctx = blk_root_can_set_aio_ctx,
.set_aio_ctx = blk_root_set_aio_ctx,
.get_parent_aio_context = blk_root_get_parent_aio_context,
};
/*
@ -398,15 +405,19 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
BlockBackend *blk;
BlockDriverState *bs;
uint64_t perm = 0;
uint64_t shared = BLK_PERM_ALL;
/* blk_new_open() is mainly used in .bdrv_create implementations and the
* tools where sharing isn't a concern because the BDS stays private, so we
* just request permission according to the flags.
/*
* blk_new_open() is mainly used in .bdrv_create implementations and the
* tools where sharing isn't a major concern because the BDS stays private
* and the file is generally not supposed to be used by a second process,
* so we just request permission according to the flags.
*
* The exceptions are xen_disk and blockdev_init(); in these cases, the
* caller of blk_new_open() doesn't make use of the permissions, but they
* shouldn't hurt either. We can still share everything here because the
* guest devices will add their own blockers if they can't share. */
* guest devices will add their own blockers if they can't share.
*/
if ((flags & BDRV_O_NO_IO) == 0) {
perm |= BLK_PERM_CONSISTENT_READ;
if (flags & BDRV_O_RDWR) {
@ -416,8 +427,11 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
if (flags & BDRV_O_RESIZE) {
perm |= BLK_PERM_RESIZE;
}
if (flags & BDRV_O_NO_SHARE) {
shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
}
blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL);
blk = blk_new(qemu_get_aio_context(), perm, shared);
bs = bdrv_open(filename, reference, options, flags, errp);
if (!bs) {
blk_unref(blk);
@ -426,7 +440,7 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
blk->ctx, perm, BLK_PERM_ALL, blk, errp);
perm, shared, blk, errp);
if (!blk->root) {
blk_unref(blk);
return NULL;
@ -840,7 +854,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
bdrv_ref(bs);
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
blk->ctx, blk->perm, blk->shared_perm,
blk->perm, blk->shared_perm,
blk, errp);
if (blk->root == NULL) {
return -EPERM;
@ -1837,7 +1851,7 @@ bool blk_supports_write_perm(BlockBackend *blk)
if (bs) {
return !bdrv_is_read_only(bs);
} else {
return !blk->root_state.read_only;
return blk->root_state.open_flags & BDRV_O_RDWR;
}
}
@ -1939,16 +1953,29 @@ uint32_t blk_get_request_alignment(BlockBackend *blk)
return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
}
/* Returns the maximum hardware transfer length, in bytes; guaranteed nonzero */
uint64_t blk_get_max_hw_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint64_t max = INT_MAX;
if (bs) {
max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer);
max = MIN_NON_ZERO(max, bs->bl.max_transfer);
}
return ROUND_DOWN(max, blk_get_request_alignment(blk));
}
/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
uint32_t blk_get_max_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint32_t max = 0;
uint32_t max = INT_MAX;
if (bs) {
max = bs->bl.max_transfer;
max = MIN_NON_ZERO(max, bs->bl.max_transfer);
}
return MIN_NON_ZERO(max, INT_MAX);
return ROUND_DOWN(max, blk_get_request_alignment(blk));
}
int blk_get_max_iov(BlockBackend *blk)
@ -2254,7 +2281,6 @@ void blk_update_root_state(BlockBackend *blk)
assert(blk->root);
blk->root_state.open_flags = blk->root->bs->open_flags;
blk->root_state.read_only = blk->root->bs->read_only;
blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
}
@ -2273,12 +2299,7 @@ bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
*/
int blk_get_open_flags_from_root_state(BlockBackend *blk)
{
int bs_flags;
bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
return bs_flags;
return blk->root_state.open_flags;
}
BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
@ -2378,8 +2399,13 @@ static void blk_root_drained_begin(BdrvChild *child)
static bool blk_root_drained_poll(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
bool busy = false;
assert(blk->quiesce_counter);
return !!blk->in_flight;
if (blk->dev_ops && blk->dev_ops->drained_poll) {
busy = blk->dev_ops->drained_poll(blk->dev_opaque);
}
return busy || !!blk->in_flight;
}
static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)

View File

@ -28,10 +28,18 @@
#define BLOCK_COPY_MAX_WORKERS 64
#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
typedef enum {
COPY_READ_WRITE_CLUSTER,
COPY_READ_WRITE,
COPY_WRITE_ZEROES,
COPY_RANGE_SMALL,
COPY_RANGE_FULL
} BlockCopyMethod;
static coroutine_fn int block_copy_task_entry(AioTask *task);
typedef struct BlockCopyCallState {
/* IN parameters. Initialized in block_copy_async() and never changed. */
/* Fields initialized in block_copy_async() and never changed. */
BlockCopyState *s;
int64_t offset;
int64_t bytes;
@ -40,33 +48,60 @@ typedef struct BlockCopyCallState {
bool ignore_ratelimit;
BlockCopyAsyncCallbackFunc cb;
void *cb_opaque;
/* Coroutine where async block-copy is running */
Coroutine *co;
/* Fields whose state changes throughout the execution */
bool finished; /* atomic */
QemuCoSleep sleep; /* TODO: protect API with a lock */
bool cancelled; /* atomic */
/* To reference all call states from BlockCopyState */
QLIST_ENTRY(BlockCopyCallState) list;
/* State */
int ret;
bool finished;
QemuCoSleepState *sleep_state;
bool cancelled;
/* OUT parameters */
/*
* Fields that report information about return values and erros.
* Protected by lock in BlockCopyState.
*/
bool error_is_read;
/*
* @ret is set concurrently by tasks under mutex. Only set once by first
* failed task (and untouched if no task failed).
* After finishing (call_state->finished is true), it is not modified
* anymore and may be safely read without mutex.
*/
int ret;
} BlockCopyCallState;
typedef struct BlockCopyTask {
AioTask task;
/*
* Fields initialized in block_copy_task_create()
* and never changed.
*/
BlockCopyState *s;
BlockCopyCallState *call_state;
int64_t offset;
int64_t bytes;
bool zeroes;
QLIST_ENTRY(BlockCopyTask) list;
/*
* @method can also be set again in the while loop of
* block_copy_dirty_clusters(), but it is never accessed concurrently
* because the only other function that reads it is
* block_copy_task_entry() and it is invoked afterwards in the same
* iteration.
*/
BlockCopyMethod method;
/*
* Fields whose state changes throughout the execution
* Protected by lock in BlockCopyState.
*/
CoQueue wait_queue; /* coroutines blocked on this task */
/*
* Only protect the case of parallel read while updating @bytes
* value in block_copy_task_shrink().
*/
int64_t bytes;
QLIST_ENTRY(BlockCopyTask) list;
} BlockCopyTask;
static int64_t task_end(BlockCopyTask *task)
@ -82,17 +117,25 @@ typedef struct BlockCopyState {
*/
BdrvChild *source;
BdrvChild *target;
BdrvDirtyBitmap *copy_bitmap;
int64_t in_flight_bytes;
int64_t cluster_size;
bool use_copy_range;
int64_t copy_size;
uint64_t len;
QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
QLIST_HEAD(, BlockCopyCallState) calls;
/*
* Fields initialized in block_copy_state_new()
* and never changed.
*/
int64_t cluster_size;
int64_t max_transfer;
uint64_t len;
BdrvRequestFlags write_flags;
/*
* Fields whose state changes throughout the execution
* Protected by lock.
*/
CoMutex lock;
int64_t in_flight_bytes;
BlockCopyMethod method;
QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
QLIST_HEAD(, BlockCopyCallState) calls;
/*
* skip_unallocated:
*
@ -107,16 +150,15 @@ typedef struct BlockCopyState {
* skip unallocated regions, clear them in the copy_bitmap, and invoke
* block_copy_reset_unallocated() every time it does.
*/
bool skip_unallocated;
bool skip_unallocated; /* atomic */
/* State fields that use a thread-safe API */
BdrvDirtyBitmap *copy_bitmap;
ProgressMeter *progress;
SharedResource *mem;
uint64_t speed;
RateLimit rate_limit;
} BlockCopyState;
/* Called with lock held */
static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
int64_t offset, int64_t bytes)
{
@ -134,6 +176,9 @@ static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
/*
* If there are no intersecting tasks return false. Otherwise, wait for the
* first found intersecting tasks to finish and return true.
*
* Called with lock held. May temporary release the lock.
* Return value of 0 proves that lock was NOT released.
*/
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
int64_t bytes)
@ -144,22 +189,43 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return false;
}
qemu_co_queue_wait(&task->wait_queue, NULL);
qemu_co_queue_wait(&task->wait_queue, &s->lock);
return true;
}
/* Called with lock held */
static int64_t block_copy_chunk_size(BlockCopyState *s)
{
switch (s->method) {
case COPY_READ_WRITE_CLUSTER:
return s->cluster_size;
case COPY_READ_WRITE:
case COPY_RANGE_SMALL:
return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
s->max_transfer);
case COPY_RANGE_FULL:
return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
s->max_transfer);
default:
/* Cannot have COPY_WRITE_ZEROES here. */
abort();
}
}
/*
* Search for the first dirty area in offset/bytes range and create task at
* the beginning of it.
*/
static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
BlockCopyCallState *call_state,
int64_t offset, int64_t bytes)
static coroutine_fn BlockCopyTask *
block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
int64_t offset, int64_t bytes)
{
BlockCopyTask *task;
int64_t max_chunk = MIN_NON_ZERO(s->copy_size, call_state->max_chunk);
int64_t max_chunk;
QEMU_LOCK_GUARD(&s->lock);
max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
offset, offset + bytes,
max_chunk, &offset, &bytes))
@ -183,6 +249,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
.call_state = call_state,
.offset = offset,
.bytes = bytes,
.method = s->method,
};
qemu_co_queue_init(&task->wait_queue);
QLIST_INSERT_HEAD(&s->tasks, task, list);
@ -200,6 +267,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
int64_t new_bytes)
{
QEMU_LOCK_GUARD(&task->s->lock);
if (new_bytes == task->bytes) {
return;
}
@ -216,11 +284,15 @@ static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
{
QEMU_LOCK_GUARD(&task->s->lock);
task->s->in_flight_bytes -= task->bytes;
if (ret < 0) {
bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
}
QLIST_REMOVE(task, list);
progress_set_remaining(task->s->progress,
bdrv_get_dirty_count(task->s->copy_bitmap) +
task->s->in_flight_bytes);
qemu_co_queue_restart_all(&task->wait_queue);
}
@ -230,6 +302,7 @@ void block_copy_state_free(BlockCopyState *s)
return;
}
ratelimit_destroy(&s->rate_limit);
bdrv_release_dirty_bitmap(s->copy_bitmap);
shres_destroy(s->mem);
g_free(s);
@ -265,36 +338,39 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
.mem = shres_create(BLOCK_COPY_MAX_MEM),
.max_transfer = QEMU_ALIGN_DOWN(
block_copy_max_transfer(source, target),
cluster_size),
};
if (block_copy_max_transfer(source, target) < cluster_size) {
if (s->max_transfer < cluster_size) {
/*
* copy_range does not respect max_transfer. We don't want to bother
* with requests smaller than block-copy cluster size, so fallback to
* buffered copying (read and write respect max_transfer on their
* behalf).
*/
s->use_copy_range = false;
s->copy_size = cluster_size;
s->method = COPY_READ_WRITE_CLUSTER;
} else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
/* Compression supports only cluster-size writes and no copy-range. */
s->use_copy_range = false;
s->copy_size = cluster_size;
s->method = COPY_READ_WRITE_CLUSTER;
} else {
/*
* We enable copy-range, but keep small copy_size, until first
* If copy range enabled, start with COPY_RANGE_SMALL, until first
* successful copy_range (look at block_copy_do_copy).
*/
s->use_copy_range = use_copy_range;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
}
ratelimit_init(&s->rate_limit);
qemu_co_mutex_init(&s->lock);
QLIST_INIT(&s->tasks);
QLIST_INIT(&s->calls);
return s;
}
/* Only set before running the job, no need for locking. */
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
{
s->progress = pm;
@ -340,11 +416,15 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
*
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
*
* @method is an in-out argument, so that copy_range can be either extended to
* a full-size buffer or disabled if the copy_range attempt fails. The output
* value of @method should be used for subsequent tasks.
* Returns 0 on success.
*/
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
int64_t offset, int64_t bytes,
bool zeroes, bool *error_is_read)
BlockCopyMethod *method,
bool *error_is_read)
{
int ret;
int64_t nbytes = MIN(offset + bytes, s->len) - offset;
@ -358,7 +438,8 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
assert(nbytes < INT_MAX);
if (zeroes) {
switch (*method) {
case COPY_WRITE_ZEROES:
ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
~BDRV_REQ_WRITE_COMPRESSED);
if (ret < 0) {
@ -366,84 +447,86 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
*error_is_read = false;
}
return ret;
}
if (s->use_copy_range) {
case COPY_RANGE_SMALL:
case COPY_RANGE_FULL:
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
0, s->write_flags);
if (ret >= 0) {
/* Successful copy-range, increase chunk size. */
*method = COPY_RANGE_FULL;
return 0;
}
trace_block_copy_copy_range_fail(s, offset, ret);
*method = COPY_READ_WRITE;
/* Fall through to read+write with allocated buffer */
case COPY_READ_WRITE_CLUSTER:
case COPY_READ_WRITE:
/*
* In case of failed copy_range request above, we may proceed with
* buffered request larger than BLOCK_COPY_MAX_BUFFER.
* Still, further requests will be properly limited, so don't care too
* much. Moreover the most likely case (copy_range is unsupported for
* the configuration, so the very first copy_range request fails)
* is handled by setting large copy_size only after first successful
* copy_range.
*/
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
if (ret < 0) {
trace_block_copy_copy_range_fail(s, offset, ret);
s->use_copy_range = false;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
/* Fallback to read+write with allocated buffer */
} else {
if (s->use_copy_range) {
/*
* Successful copy-range. Now increase copy_size. copy_range
* does not respect max_transfer (it's a TODO), so we factor
* that in here.
*
* Note: we double-check s->use_copy_range for the case when
* parallel block-copy request unsets it during previous
* bdrv_co_copy_range call.
*/
s->copy_size =
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
s->target),
s->cluster_size));
}
trace_block_copy_read_fail(s, offset, ret);
*error_is_read = true;
goto out;
}
ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_write_fail(s, offset, ret);
*error_is_read = false;
goto out;
}
out:
qemu_vfree(bounce_buffer);
break;
default:
abort();
}
/*
* In case of failed copy_range request above, we may proceed with buffered
* request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
* be properly limited, so don't care too much. Moreover the most likely
* case (copy_range is unsupported for the configuration, so the very first
* copy_range request fails) is handled by setting large copy_size only
* after first successful copy_range.
*/
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
if (ret < 0) {
trace_block_copy_read_fail(s, offset, ret);
*error_is_read = true;
goto out;
}
ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_write_fail(s, offset, ret);
*error_is_read = false;
goto out;
}
out:
qemu_vfree(bounce_buffer);
return ret;
}
static coroutine_fn int block_copy_task_entry(AioTask *task)
{
BlockCopyTask *t = container_of(task, BlockCopyTask, task);
BlockCopyState *s = t->s;
bool error_is_read = false;
BlockCopyMethod method = t->method;
int ret;
ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
&error_is_read);
if (ret < 0 && !t->call_state->ret) {
t->call_state->ret = ret;
t->call_state->error_is_read = error_is_read;
} else {
progress_work_done(t->s->progress, t->bytes);
ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
WITH_QEMU_LOCK_GUARD(&s->lock) {
if (s->method == t->method) {
s->method = method;
}
if (ret < 0) {
if (!t->call_state->ret) {
t->call_state->ret = ret;
t->call_state->error_is_read = error_is_read;
}
} else {
progress_work_done(s->progress, t->bytes);
}
}
co_put_to_shres(t->s->mem, t->bytes);
co_put_to_shres(s->mem, t->bytes);
block_copy_task_end(t, ret);
return ret;
@ -456,7 +539,7 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
BlockDriverState *base;
int ret;
if (s->skip_unallocated) {
if (qatomic_read(&s->skip_unallocated)) {
base = bdrv_backing_chain_next(s->source->bs);
} else {
base = NULL;
@ -543,10 +626,12 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
bytes = clusters * s->cluster_size;
if (!ret) {
qemu_co_mutex_lock(&s->lock);
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
qemu_co_mutex_unlock(&s->lock);
}
*count = bytes;
@ -582,7 +667,8 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
while (bytes && aio_task_pool_status(aio) == 0 &&
!qatomic_read(&call_state->cancelled)) {
BlockCopyTask *task;
int64_t status_bytes;
@ -604,34 +690,32 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
if (status_bytes < task->bytes) {
block_copy_task_shrink(task, status_bytes);
}
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
if (qatomic_read(&s->skip_unallocated) &&
!(ret & BDRV_BLOCK_ALLOCATED)) {
block_copy_task_end(task, 0);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
trace_block_copy_skip_range(s, task->offset, task->bytes);
offset = task_end(task);
bytes = end - offset;
g_free(task);
continue;
}
task->zeroes = ret & BDRV_BLOCK_ZERO;
if (s->speed) {
if (!call_state->ignore_ratelimit) {
uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
if (ns > 0) {
block_copy_task_end(task, -EAGAIN);
g_free(task);
qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, ns,
&call_state->sleep_state);
continue;
}
}
ratelimit_calculate_delay(&s->rate_limit, task->bytes);
if (ret & BDRV_BLOCK_ZERO) {
task->method = COPY_WRITE_ZEROES;
}
if (!call_state->ignore_ratelimit) {
uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
if (ns > 0) {
block_copy_task_end(task, -EAGAIN);
g_free(task);
qemu_co_sleep_ns_wakeable(&call_state->sleep,
QEMU_CLOCK_REALTIME, ns);
continue;
}
}
ratelimit_calculate_delay(&s->rate_limit, task->bytes);
trace_block_copy_process(s, task->offset);
co_get_from_shres(s->mem, task->bytes);
@ -672,9 +756,7 @@ out:
void block_copy_kick(BlockCopyCallState *call_state)
{
if (call_state->sleep_state) {
qemu_co_sleep_wake(call_state->sleep_state);
}
qemu_co_sleep_wake(&call_state->sleep);
}
/*
@ -689,15 +771,40 @@ void block_copy_kick(BlockCopyCallState *call_state)
static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
{
int ret;
BlockCopyState *s = call_state->s;
QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
qemu_co_mutex_lock(&s->lock);
QLIST_INSERT_HEAD(&s->calls, call_state, list);
qemu_co_mutex_unlock(&s->lock);
do {
ret = block_copy_dirty_clusters(call_state);
if (ret == 0 && !call_state->cancelled) {
ret = block_copy_wait_one(call_state->s, call_state->offset,
call_state->bytes);
if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
WITH_QEMU_LOCK_GUARD(&s->lock) {
/*
* Check that there is no task we still need to
* wait to complete
*/
ret = block_copy_wait_one(s, call_state->offset,
call_state->bytes);
if (ret == 0) {
/*
* No pending tasks, but check again the bitmap in this
* same critical section, since a task might have failed
* between this and the critical section in
* block_copy_dirty_clusters().
*
* block_copy_wait_one return value 0 also means that it
* didn't release the lock. So, we are still in the same
* critical section, not interrupted by any concurrent
* access to state.
*/
ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
call_state->offset,
call_state->bytes) >= 0;
}
}
}
/*
@ -709,15 +816,17 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
* 2. We have waited for some intersecting block-copy request
* It may have failed and produced new dirty bits.
*/
} while (ret > 0 && !call_state->cancelled);
} while (ret > 0 && !qatomic_read(&call_state->cancelled));
call_state->finished = true;
qatomic_store_release(&call_state->finished, true);
if (call_state->cb) {
call_state->cb(call_state->cb_opaque);
}
qemu_co_mutex_lock(&s->lock);
QLIST_REMOVE(call_state, list);
qemu_co_mutex_unlock(&s->lock);
return ret;
}
@ -772,44 +881,50 @@ void block_copy_call_free(BlockCopyCallState *call_state)
return;
}
assert(call_state->finished);
assert(qatomic_read(&call_state->finished));
g_free(call_state);
}
bool block_copy_call_finished(BlockCopyCallState *call_state)
{
return call_state->finished;
return qatomic_read(&call_state->finished);
}
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
{
return call_state->finished && !call_state->cancelled &&
call_state->ret == 0;
return qatomic_load_acquire(&call_state->finished) &&
!qatomic_read(&call_state->cancelled) &&
call_state->ret == 0;
}
bool block_copy_call_failed(BlockCopyCallState *call_state)
{
return call_state->finished && !call_state->cancelled &&
call_state->ret < 0;
return qatomic_load_acquire(&call_state->finished) &&
!qatomic_read(&call_state->cancelled) &&
call_state->ret < 0;
}
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
{
return call_state->cancelled;
return qatomic_read(&call_state->cancelled);
}
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
{
assert(call_state->finished);
assert(qatomic_load_acquire(&call_state->finished));
if (error_is_read) {
*error_is_read = call_state->error_is_read;
}
return call_state->ret;
}
/*
* Note that cancelling and finishing are racy.
* User can cancel a block-copy that is already finished.
*/
void block_copy_call_cancel(BlockCopyCallState *call_state)
{
call_state->cancelled = true;
qatomic_set(&call_state->cancelled, true);
block_copy_kick(call_state);
}
@ -820,15 +935,12 @@ BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
{
s->skip_unallocated = skip;
qatomic_set(&s->skip_unallocated, skip);
}
void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
{
s->speed = speed;
if (speed > 0) {
ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
}
ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
/*
* Note: it's good to kick all call states from here, but it should be done

View File

@ -119,24 +119,24 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
uint64_t delay_ns = 0;
int ret = 0;
int64_t n = 0; /* bytes */
void *buf = NULL;
QEMU_AUTO_VFREE void *buf = NULL;
int64_t len, base_len;
ret = len = blk_getlength(s->top);
len = blk_getlength(s->top);
if (len < 0) {
goto out;
return len;
}
job_progress_set_remaining(&s->common.job, len);
ret = base_len = blk_getlength(s->base);
base_len = blk_getlength(s->base);
if (base_len < 0) {
goto out;
return base_len;
}
if (base_len < len) {
ret = blk_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL);
if (ret) {
goto out;
return ret;
}
}
@ -174,7 +174,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
block_job_error_action(&s->common, s->on_error,
error_in_source, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
goto out;
return ret;
} else {
n = 0;
continue;
@ -190,12 +190,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
}
}
ret = 0;
out:
qemu_vfree(buf);
return ret;
return 0;
}
static const BlockJobDriver commit_job_driver = {
@ -312,6 +307,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
commit_top_bs->total_sectors = top->total_sectors;
ret = bdrv_append(commit_top_bs, top, errp);
bdrv_unref(commit_top_bs); /* referenced by new parents or failed */
if (ret < 0) {
commit_top_bs = NULL;
goto fail;
@ -434,7 +430,7 @@ int bdrv_commit(BlockDriverState *bs)
int ro;
int64_t n;
int ret = 0;
uint8_t *buf = NULL;
QEMU_AUTO_VFREE uint8_t *buf = NULL;
Error *local_err = NULL;
if (!drv)
@ -452,7 +448,7 @@ int bdrv_commit(BlockDriverState *bs)
return -EBUSY;
}
ro = backing_file_bs->read_only;
ro = bdrv_is_read_only(backing_file_bs);
if (ro) {
if (bdrv_reopen_set_read_only(backing_file_bs, false, NULL)) {
@ -555,8 +551,6 @@ int bdrv_commit(BlockDriverState *bs)
ret = 0;
ro_cleanup:
qemu_vfree(buf);
blk_unref(backing);
if (bdrv_cow_bs(bs) != backing_file_bs) {
bdrv_set_backing_hd(bs, backing_file_bs, &error_abort);

View File

@ -29,7 +29,6 @@
typedef struct BDRVStateCOR {
bool active;
BlockDriverState *bottom_bs;
bool chain_frozen;
} BDRVStateCOR;
@ -89,7 +88,6 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags,
*/
bdrv_ref(bottom_bs);
}
state->active = true;
state->bottom_bs = bottom_bs;
/*
@ -112,17 +110,6 @@ static void cor_child_perm(BlockDriverState *bs, BdrvChild *c,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
BDRVStateCOR *s = bs->opaque;
if (!s->active) {
/*
* While the filter is being removed
*/
*nperm = 0;
*nshared = BLK_PERM_ALL;
return;
}
*nperm = perm & PERM_PASSTHROUGH;
*nshared = (shared & PERM_PASSTHROUGH) | PERM_UNCHANGED;
@ -280,32 +267,14 @@ static BlockDriver bdrv_copy_on_read = {
void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs)
{
BdrvChild *child;
BlockDriverState *bs;
BDRVStateCOR *s = cor_filter_bs->opaque;
child = bdrv_filter_child(cor_filter_bs);
if (!child) {
return;
}
bs = child->bs;
/* Retain the BDS until we complete the graph change. */
bdrv_ref(bs);
/* Hold a guest back from writing while permissions are being reset. */
bdrv_drained_begin(bs);
/* Drop permissions before the graph change. */
s->active = false;
/* unfreeze, as otherwise bdrv_replace_node() will fail */
if (s->chain_frozen) {
s->chain_frozen = false;
bdrv_unfreeze_backing_chain(cor_filter_bs, s->bottom_bs);
}
bdrv_child_refresh_perms(cor_filter_bs, child, &error_abort);
bdrv_replace_node(cor_filter_bs, bs, &error_abort);
bdrv_drained_end(bs);
bdrv_unref(bs);
bdrv_drop_filter(cor_filter_bs, &error_abort);
bdrv_unref(cor_filter_bs);
}

View File

@ -66,4 +66,10 @@ int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
QEMUIOVector *qiov, int64_t pos);
int generated_co_wrapper
nbd_do_establish_connection(BlockDriverState *bs, Error **errp);
int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
#endif /* BLOCK_COROUTINES_INT_H */

View File

@ -193,7 +193,7 @@ int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags,
error_setg(errp, "Bitmap '%s' is inconsistent and cannot be used",
bitmap->name);
error_append_hint(errp, "Try block-dirty-bitmap-remove to delete"
" this bitmap from disk");
" this bitmap from disk\n");
return -1;
}

Some files were not shown because too many files have changed in this diff Show More