mirror of https://github.com/xemu-project/xemu.git
Testing and misc build updates:
- tests/vm support for aarch64 VMs - tests/tcg better cross-compiler detection - update docker tooling to support registries - update docker support for xtensa - gitlab build docker images and store in registry - gitlab use docker images for builds - a number of skipIf updates to support move - linux-user MAP_FIXED_NOREPLACE fix - qht-bench compiler tweaks - configure fix for secret keyring - tsan fiber annotation clean-up - doc updates for mttcg/icount/gdbstub - fix cirrus to use brew bash for iotests - revert virtio-gpu breakage - fix LC_ALL to avoid sorting changes in iotests -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAl8J0yoACgkQ+9DbCVqe KkSzTAf/Vn+9TU8Qt7nZvl7W4tz7Sy5K8EJGwj2RXx6CWWWLiFbsXurIM8Krw5Vc RmvUxwa359b+J0lQpfeNDHYm1nM8RZLFlkG0a5bl0I8sW0EcPjBRtwNaGKXh2p0u u2RS2QAi6A9AvYT4ZREYlBM+o9WzbxCEQm4s8fr6WEJCQfxBnb5/bGiEjWR64e8C j9Kvou+zAKfVizbQMtu+mwqjsoPtcS1b3vVcO7anhNuUsuaEKkS0dFWzWvw3lwJR STIYnb8Y/eJ1yKr0hPH2qtWv3n6yhlYvYmpUCH6AwshGMUoeFEzR2VoWS6yZPGG6 na6XA3UW5R9AxIDfkCJ5ueeo8t9xMQ== =HRWa -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-and-misc-110720-2' into staging Testing and misc build updates: - tests/vm support for aarch64 VMs - tests/tcg better cross-compiler detection - update docker tooling to support registries - update docker support for xtensa - gitlab build docker images and store in registry - gitlab use docker images for builds - a number of skipIf updates to support move - linux-user MAP_FIXED_NOREPLACE fix - qht-bench compiler tweaks - configure fix for secret keyring - tsan fiber annotation clean-up - doc updates for mttcg/icount/gdbstub - fix cirrus to use brew bash for iotests - revert virtio-gpu breakage - fix LC_ALL to avoid sorting changes in iotests # gpg: Signature made Sat 11 Jul 2020 15:56:42 BST # gpg: using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44 # gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full] # Primary key fingerprint: 6685 AE99 E751 67BC AFC8 DF35 FBD0 DB09 5A9E 2A44 * remotes/stsquad/tags/pull-testing-and-misc-110720-2: (50 commits) iotests: Set LC_ALL=C for sort Revert "vga: build virtio-gpu as module" tests: fix "make check-qtest" for modular builds .cirrus.yml: add bash to the brew packages tests/docker: update toolchain set in debian-xtensa-cross tests/docker: fall back more gracefully when pull fails docs: Add to gdbstub documentation the PhyMemMode docs/devel: add some notes on tcg-icount for developers docs/devel: convert and update MTTCG design document tests/qht-bench: Adjust threshold computation tests/qht-bench: Adjust testing rate by -1 travis.yml: Test also the other targets on s390x shippable: pull images from registry instead of building testing: add check-build target containers.yml: build with docker.py tooling gitlab: limit re-builds of the containers tests: improve performance of device-introspect-test gitlab: add avocado asset caching gitlab: enable check-tcg for linux-user tests linux-user/elfload: use MAP_FIXED_NOREPLACE in pgb_reserved_va ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9f526fce49
|
@ -20,7 +20,7 @@ macos_task:
|
||||||
osx_instance:
|
osx_instance:
|
||||||
image: mojave-base
|
image: mojave-base
|
||||||
install_script:
|
install_script:
|
||||||
- brew install pkg-config python gnu-sed glib pixman make sdl2
|
- brew install pkg-config python gnu-sed glib pixman make sdl2 bash
|
||||||
script:
|
script:
|
||||||
- mkdir build
|
- mkdir build
|
||||||
- cd build
|
- cd build
|
||||||
|
@ -33,7 +33,7 @@ macos_xcode_task:
|
||||||
# this is an alias for the latest Xcode
|
# this is an alias for the latest Xcode
|
||||||
image: mojave-xcode
|
image: mojave-xcode
|
||||||
install_script:
|
install_script:
|
||||||
- brew install pkg-config gnu-sed glib pixman make sdl2
|
- brew install pkg-config gnu-sed glib pixman make sdl2 bash
|
||||||
script:
|
script:
|
||||||
- mkdir build
|
- mkdir build
|
||||||
- cd build
|
- cd build
|
||||||
|
|
|
@ -93,6 +93,7 @@
|
||||||
*.tp
|
*.tp
|
||||||
*.vr
|
*.vr
|
||||||
*.d
|
*.d
|
||||||
|
!/.gitlab-ci.d
|
||||||
!/scripts/qemu-guest-agent/fsfreeze-hook.d
|
!/scripts/qemu-guest-agent/fsfreeze-hook.d
|
||||||
*.o
|
*.o
|
||||||
.sdk
|
.sdk
|
||||||
|
|
|
@ -0,0 +1,263 @@
|
||||||
|
.container_job_template: &container_job_definition
|
||||||
|
image: docker:stable
|
||||||
|
stage: containers
|
||||||
|
services:
|
||||||
|
- docker:dind
|
||||||
|
before_script:
|
||||||
|
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
|
||||||
|
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
|
||||||
|
- apk add python3
|
||||||
|
- docker info
|
||||||
|
- docker login registry.gitlab.com -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||||
|
script:
|
||||||
|
- echo "TAG:$TAG"
|
||||||
|
- echo "COMMON_TAG:$COMMON_TAG"
|
||||||
|
- docker pull "$TAG" || docker pull "$COMMON_TAG" || true
|
||||||
|
- ./tests/docker/docker.py --engine docker build
|
||||||
|
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
|
||||||
|
-r $CI_REGISTRY_IMAGE
|
||||||
|
- docker tag "qemu/$NAME" "$TAG"
|
||||||
|
- docker push "$TAG"
|
||||||
|
after_script:
|
||||||
|
- docker logout
|
||||||
|
rules:
|
||||||
|
- changes:
|
||||||
|
- .gitlab-ci.d/containers.yml
|
||||||
|
- tests/docker/*
|
||||||
|
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
|
||||||
|
- if: '$CI_COMMIT_REF_NAME == "testing/next"'
|
||||||
|
|
||||||
|
amd64-centos7-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: centos7
|
||||||
|
|
||||||
|
amd64-centos8-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: centos8
|
||||||
|
|
||||||
|
amd64-debian10-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: debian10
|
||||||
|
|
||||||
|
amd64-debian11-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: debian11
|
||||||
|
|
||||||
|
amd64-debian9-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: debian9
|
||||||
|
|
||||||
|
amd64-debian9-mxe-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian9-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian9-mxe
|
||||||
|
|
||||||
|
alpha-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-alpha-cross
|
||||||
|
|
||||||
|
amd64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-amd64-cross
|
||||||
|
|
||||||
|
amd64-debian-user-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-all-test-cross
|
||||||
|
|
||||||
|
amd64-debian-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-amd64
|
||||||
|
|
||||||
|
arm64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-arm64-cross
|
||||||
|
|
||||||
|
arm64-test-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian11-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-arm64-test-cross
|
||||||
|
|
||||||
|
armel-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-armel-cross
|
||||||
|
|
||||||
|
armhf-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-armhf-cross
|
||||||
|
|
||||||
|
hppa-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-hppa-cross
|
||||||
|
|
||||||
|
m68k-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-m68k-cross
|
||||||
|
|
||||||
|
mips64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-mips64-cross
|
||||||
|
|
||||||
|
mips64el-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-mips64el-cross
|
||||||
|
|
||||||
|
mips-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-mips-cross
|
||||||
|
|
||||||
|
mipsel-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-mipsel-cross
|
||||||
|
|
||||||
|
powerpc-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-powerpc-cross
|
||||||
|
|
||||||
|
ppc64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-ppc64-cross
|
||||||
|
|
||||||
|
ppc64el-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-ppc64el-cross
|
||||||
|
|
||||||
|
riscv64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-riscv64-cross
|
||||||
|
|
||||||
|
s390x-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-s390x-cross
|
||||||
|
|
||||||
|
sh4-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-sh4-cross
|
||||||
|
|
||||||
|
sparc64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian10-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-sparc64-cross
|
||||||
|
|
||||||
|
tricore-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer2
|
||||||
|
needs: ['amd64-debian9-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-tricore-cross
|
||||||
|
|
||||||
|
win32-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer3
|
||||||
|
needs: ['amd64-debian9-mxe-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-win32-cross
|
||||||
|
|
||||||
|
win64-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
stage: containers-layer3
|
||||||
|
needs: ['amd64-debian9-mxe-container']
|
||||||
|
variables:
|
||||||
|
NAME: debian-win64-cross
|
||||||
|
|
||||||
|
xtensa-debian-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: debian-xtensa-cross
|
||||||
|
|
||||||
|
cris-fedora-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: fedora-cris-cross
|
||||||
|
|
||||||
|
amd64-fedora-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: fedora
|
||||||
|
|
||||||
|
i386-fedora-cross-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: fedora-i386-cross
|
||||||
|
|
||||||
|
amd64-ubuntu1804-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: ubuntu1804
|
||||||
|
|
||||||
|
amd64-ubuntu2004-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: ubuntu2004
|
||||||
|
|
||||||
|
amd64-ubuntu-container:
|
||||||
|
<<: *container_job_definition
|
||||||
|
variables:
|
||||||
|
NAME: ubuntu
|
|
@ -1,8 +1,8 @@
|
||||||
docker-edk2:
|
docker-edk2:
|
||||||
stage: build
|
stage: containers
|
||||||
rules: # Only run this job when the Dockerfile is modified
|
rules: # Only run this job when the Dockerfile is modified
|
||||||
- changes:
|
- changes:
|
||||||
- .gitlab-ci-edk2.yml
|
- .gitlab-ci.d/edk2.yml
|
||||||
- .gitlab-ci.d/edk2/Dockerfile
|
- .gitlab-ci.d/edk2/Dockerfile
|
||||||
when: always
|
when: always
|
||||||
image: docker:19.03.1
|
image: docker:19.03.1
|
||||||
|
@ -24,6 +24,7 @@ docker-edk2:
|
||||||
- docker push $IMAGE_TAG
|
- docker push $IMAGE_TAG
|
||||||
|
|
||||||
build-edk2:
|
build-edk2:
|
||||||
|
stage: build
|
||||||
rules: # Only run this job when ...
|
rules: # Only run this job when ...
|
||||||
- changes: # ... roms/edk2/ is modified (submodule updated)
|
- changes: # ... roms/edk2/ is modified (submodule updated)
|
||||||
- roms/edk2/*
|
- roms/edk2/*
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
docker-opensbi:
|
docker-opensbi:
|
||||||
stage: build
|
stage: containers
|
||||||
rules: # Only run this job when the Dockerfile is modified
|
rules: # Only run this job when the Dockerfile is modified
|
||||||
- changes:
|
- changes:
|
||||||
- .gitlab-ci-opensbi.yml
|
- .gitlab-ci.d/opensbi.yml
|
||||||
- .gitlab-ci.d/opensbi/Dockerfile
|
- .gitlab-ci.d/opensbi/Dockerfile
|
||||||
when: always
|
when: always
|
||||||
image: docker:19.03.1
|
image: docker:19.03.1
|
||||||
|
@ -24,6 +24,7 @@ docker-opensbi:
|
||||||
- docker push $IMAGE_TAG
|
- docker push $IMAGE_TAG
|
||||||
|
|
||||||
build-opensbi:
|
build-opensbi:
|
||||||
|
stage: build
|
||||||
rules: # Only run this job when ...
|
rules: # Only run this job when ...
|
||||||
- changes: # ... roms/opensbi/ is modified (submodule updated)
|
- changes: # ... roms/opensbi/ is modified (submodule updated)
|
||||||
- roms/opensbi/*
|
- roms/opensbi/*
|
||||||
|
|
259
.gitlab-ci.yml
259
.gitlab-ci.yml
|
@ -1,127 +1,186 @@
|
||||||
|
# Currently we have two build stages after our containers are built:
|
||||||
|
# - build (for traditional build and test or first stage build)
|
||||||
|
# - test (for test stages, using build artefacts from a build stage)
|
||||||
|
stages:
|
||||||
|
- containers
|
||||||
|
- containers-layer2
|
||||||
|
- containers-layer3
|
||||||
|
- build
|
||||||
|
- test
|
||||||
|
|
||||||
|
# We assume GitLab has it's own caching set up for RPM/APT repositories so we
|
||||||
|
# just take care of avocado assets here.
|
||||||
|
cache:
|
||||||
|
paths:
|
||||||
|
- $HOME/avocado/data/cache
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- local: '/.gitlab-ci.d/edk2.yml'
|
- local: '/.gitlab-ci.d/edk2.yml'
|
||||||
- local: '/.gitlab-ci.d/opensbi.yml'
|
- local: '/.gitlab-ci.d/opensbi.yml'
|
||||||
|
- local: '/.gitlab-ci.d/containers.yml'
|
||||||
|
|
||||||
.update_apt_template: &before_script_apt
|
.native_build_job_template: &native_build_job_definition
|
||||||
before_script:
|
stage: build
|
||||||
- apt-get update -qq
|
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||||
- apt-get install -y -qq git gcc libglib2.0-dev libpixman-1-dev make
|
before_script:
|
||||||
genisoimage
|
- JOBS=$(expr $(nproc) + 1)
|
||||||
- JOBS=$(expr $(nproc) + 1)
|
script:
|
||||||
|
- mkdir build
|
||||||
|
- cd build
|
||||||
|
- if test -n "$TARGETS";
|
||||||
|
then
|
||||||
|
../configure --enable-werror $CONFIGURE_ARGS --target-list="$TARGETS" ;
|
||||||
|
else
|
||||||
|
../configure --enable-werror $CONFIGURE_ARGS ;
|
||||||
|
fi
|
||||||
|
- make -j"$JOBS"
|
||||||
|
- if test -n "$MAKE_CHECK_ARGS";
|
||||||
|
then
|
||||||
|
make -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||||
|
fi
|
||||||
|
|
||||||
.update_dnf_template: &before_script_dnf
|
.native_test_job_template: &native_test_job_definition
|
||||||
before_script:
|
stage: test
|
||||||
- dnf update -y
|
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||||
- dnf install -y bzip2 diffutils gcc git genisoimage findutils glib2-devel
|
script:
|
||||||
make python3 perl-podlators perl-Test-Harness pixman-devel zlib-devel
|
- cd build
|
||||||
- JOBS=$(expr $(nproc) + 1)
|
- find . -type f -exec touch {} +
|
||||||
|
- make $MAKE_CHECK_ARGS
|
||||||
|
|
||||||
build-system1:
|
.post_acceptance_template: &post_acceptance
|
||||||
image: ubuntu:19.10
|
after_script:
|
||||||
<<: *before_script_apt
|
- cd build
|
||||||
script:
|
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP")]' | xargs cat
|
||||||
- apt-get install -y -qq libgtk-3-dev libvte-dev nettle-dev libcacard-dev
|
- du -chs $HOME/avocado/data/cache
|
||||||
libusb-dev libvde-dev libspice-protocol-dev libgl1-mesa-dev libvdeplug-dev
|
|
||||||
- mkdir build
|
|
||||||
- cd build
|
|
||||||
- ../configure --enable-werror --target-list="aarch64-softmmu alpha-softmmu
|
|
||||||
cris-softmmu hppa-softmmu lm32-softmmu moxie-softmmu microblazeel-softmmu
|
|
||||||
mips64el-softmmu m68k-softmmu ppc-softmmu riscv64-softmmu sparc-softmmu"
|
|
||||||
- make -j"$JOBS"
|
|
||||||
- make -j"$JOBS" check
|
|
||||||
|
|
||||||
build-system2:
|
build-system-ubuntu-main:
|
||||||
image: fedora:latest
|
<<: *native_build_job_definition
|
||||||
<<: *before_script_dnf
|
variables:
|
||||||
script:
|
IMAGE: ubuntu2004
|
||||||
- yum install -y SDL2-devel libgcrypt-devel brlapi-devel libaio-devel
|
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu lm32-softmmu
|
||||||
libfdt-devel lzo-devel librdmacm-devel libibverbs-devel libibumad-devel
|
moxie-softmmu microblazeel-softmmu mips64el-softmmu m68k-softmmu ppc-softmmu
|
||||||
libzstd-devel
|
riscv64-softmmu sparc-softmmu
|
||||||
- mkdir build
|
MAKE_CHECK_ARGS: check-build
|
||||||
- cd build
|
artifacts:
|
||||||
- ../configure --enable-werror --target-list="tricore-softmmu unicore32-softmmu
|
paths:
|
||||||
microblaze-softmmu mips-softmmu riscv32-softmmu s390x-softmmu sh4-softmmu
|
- build
|
||||||
sparc64-softmmu x86_64-softmmu xtensa-softmmu nios2-softmmu or1k-softmmu"
|
|
||||||
- make -j"$JOBS"
|
check-system-ubuntu-main:
|
||||||
- make -j"$JOBS" check
|
<<: *native_test_job_definition
|
||||||
|
needs:
|
||||||
|
- job: build-system-ubuntu-main
|
||||||
|
artifacts: true
|
||||||
|
variables:
|
||||||
|
IMAGE: ubuntu2004
|
||||||
|
MAKE_CHECK_ARGS: check
|
||||||
|
|
||||||
|
acceptance-system-ubuntu-main:
|
||||||
|
<<: *native_test_job_definition
|
||||||
|
needs:
|
||||||
|
- job: build-system-ubuntu-main
|
||||||
|
artifacts: true
|
||||||
|
variables:
|
||||||
|
IMAGE: ubuntu2004
|
||||||
|
MAKE_CHECK_ARGS: check-acceptance
|
||||||
|
<<: *post_acceptance
|
||||||
|
|
||||||
|
build-system-fedora-alt:
|
||||||
|
<<: *native_build_job_definition
|
||||||
|
variables:
|
||||||
|
IMAGE: fedora
|
||||||
|
TARGETS: tricore-softmmu unicore32-softmmu microblaze-softmmu mips-softmmu
|
||||||
|
riscv32-softmmu s390x-softmmu sh4-softmmu sparc64-softmmu x86_64-softmmu
|
||||||
|
xtensa-softmmu nios2-softmmu or1k-softmmu
|
||||||
|
MAKE_CHECK_ARGS: check-build
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- build
|
||||||
|
|
||||||
|
check-system-fedora-alt:
|
||||||
|
<<: *native_test_job_definition
|
||||||
|
needs:
|
||||||
|
- job: build-system-fedora-alt
|
||||||
|
artifacts: true
|
||||||
|
variables:
|
||||||
|
IMAGE: fedora
|
||||||
|
MAKE_CHECK_ARGS: check
|
||||||
|
|
||||||
|
acceptance-system-fedora-alt:
|
||||||
|
<<: *native_test_job_definition
|
||||||
|
needs:
|
||||||
|
- job: build-system-fedora-alt
|
||||||
|
artifacts: true
|
||||||
|
variables:
|
||||||
|
IMAGE: fedora
|
||||||
|
MAKE_CHECK_ARGS: check-acceptance
|
||||||
|
<<: *post_acceptance
|
||||||
|
|
||||||
build-disabled:
|
build-disabled:
|
||||||
image: fedora:latest
|
<<: *native_build_job_definition
|
||||||
<<: *before_script_dnf
|
variables:
|
||||||
script:
|
IMAGE: fedora
|
||||||
- mkdir build
|
CONFIGURE_ARGS: --disable-rdma --disable-slirp --disable-curl
|
||||||
- cd build
|
|
||||||
- ../configure --enable-werror --disable-rdma --disable-slirp --disable-curl
|
|
||||||
--disable-capstone --disable-live-block-migration --disable-glusterfs
|
--disable-capstone --disable-live-block-migration --disable-glusterfs
|
||||||
--disable-replication --disable-coroutine-pool --disable-smartcard
|
--disable-replication --disable-coroutine-pool --disable-smartcard
|
||||||
--disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm
|
--disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm
|
||||||
--disable-qom-cast-debug --disable-spice --disable-vhost-vsock
|
--disable-qom-cast-debug --disable-spice --disable-vhost-vsock
|
||||||
--disable-vhost-net --disable-vhost-crypto --disable-vhost-user
|
--disable-vhost-net --disable-vhost-crypto --disable-vhost-user
|
||||||
--target-list="i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user"
|
TARGETS: i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user
|
||||||
- make -j"$JOBS"
|
MAKE_CHECK_ARGS: check-qtest SPEED=slow
|
||||||
- make -j"$JOBS" check-qtest SPEED=slow
|
|
||||||
|
|
||||||
build-tcg-disabled:
|
build-tcg-disabled:
|
||||||
image: centos:8
|
<<: *native_build_job_definition
|
||||||
<<: *before_script_dnf
|
variables:
|
||||||
script:
|
IMAGE: centos8
|
||||||
- dnf install -y clang gtk3-devel libusbx-devel libgcrypt-devel
|
script:
|
||||||
- mkdir build
|
- mkdir build
|
||||||
- cd build
|
- cd build
|
||||||
- ../configure --cc=clang --enable-werror --disable-tcg --audio-drv-list=""
|
- ../configure --disable-tcg --audio-drv-list=""
|
||||||
- make -j"$JOBS"
|
- make -j"$JOBS"
|
||||||
- make check-unit
|
- make check-unit
|
||||||
- make check-qapi-schema
|
- make check-qapi-schema
|
||||||
- cd tests/qemu-iotests/
|
- cd tests/qemu-iotests/
|
||||||
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
||||||
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
||||||
170 171 183 184 192 194 197 208 215 221 222 226 227 236 253 277
|
170 171 183 184 192 194 197 208 215 221 222 226 227 236 253 277
|
||||||
- ./check -qcow2 028 051 056 057 058 065 067 068 082 085 091 095 096 102 122
|
- ./check -qcow2 028 051 056 057 058 065 067 068 082 085 091 095 096 102 122
|
||||||
124 132 139 142 144 145 151 152 155 157 165 194 196 197 200 202
|
124 132 139 142 144 145 151 152 155 157 165 194 196 197 200 202
|
||||||
208 209 215 216 218 222 227 234 246 247 248 250 254 255 257 258
|
208 209 215 216 218 222 227 234 246 247 248 250 254 255 257 258
|
||||||
260 261 262 263 264 270 272 273 277 279
|
260 261 262 263 264 270 272 273 277 279
|
||||||
|
|
||||||
build-user:
|
build-user:
|
||||||
<<: *before_script_apt
|
<<: *native_build_job_definition
|
||||||
script:
|
variables:
|
||||||
- mkdir build
|
IMAGE: debian-all-test-cross
|
||||||
- cd build
|
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||||
- ../configure --enable-werror --disable-system --disable-guest-agent
|
MAKE_CHECK_ARGS: check-tcg
|
||||||
--disable-capstone --disable-slirp --disable-fdt
|
|
||||||
- make -j"$JOBS"
|
|
||||||
- make run-tcg-tests-i386-linux-user run-tcg-tests-x86_64-linux-user
|
|
||||||
|
|
||||||
build-clang:
|
build-clang:
|
||||||
image: fedora:latest
|
<<: *native_build_job_definition
|
||||||
<<: *before_script_dnf
|
variables:
|
||||||
script:
|
IMAGE: fedora
|
||||||
- yum install -y clang SDL2-devel libattr-devel libcap-ng-devel xfsprogs-devel
|
CONFIGURE_ARGS: --cc=clang --cxx=clang++
|
||||||
libiscsi-devel libnfs-devel libseccomp-devel gnutls-devel librbd-devel
|
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
|
||||||
- mkdir build
|
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user
|
||||||
- cd build
|
MAKE_CHECK_ARGS: check
|
||||||
- ../configure --cc=clang --cxx=clang++ --enable-werror
|
|
||||||
--target-list="alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
|
|
||||||
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user"
|
|
||||||
- make -j"$JOBS"
|
|
||||||
- make -j"$JOBS" check
|
|
||||||
|
|
||||||
build-tci:
|
build-tci:
|
||||||
image: centos:8
|
<<: *native_build_job_definition
|
||||||
<<: *before_script_dnf
|
variables:
|
||||||
script:
|
IMAGE: fedora
|
||||||
- TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64"
|
script:
|
||||||
- mkdir build
|
- TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64"
|
||||||
- cd build
|
- mkdir build
|
||||||
- ../configure --enable-tcg-interpreter
|
- cd build
|
||||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
- ../configure --enable-tcg-interpreter
|
||||||
- make -j"$JOBS"
|
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||||
- make run-tcg-tests-x86_64-softmmu
|
- make -j"$JOBS"
|
||||||
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
|
- make run-tcg-tests-x86_64-softmmu
|
||||||
- for tg in $TARGETS ; do
|
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
|
||||||
export QTEST_QEMU_BINARY="${tg}-softmmu/qemu-system-${tg}" ;
|
- for tg in $TARGETS ; do
|
||||||
./tests/qtest/boot-serial-test || exit 1 ;
|
export QTEST_QEMU_BINARY="${tg}-softmmu/qemu-system-${tg}" ;
|
||||||
./tests/qtest/cdrom-test || exit 1 ;
|
./tests/qtest/boot-serial-test || exit 1 ;
|
||||||
done
|
./tests/qtest/cdrom-test || exit 1 ;
|
||||||
- QTEST_QEMU_BINARY="x86_64-softmmu/qemu-system-x86_64" ./tests/qtest/pxe-test
|
done
|
||||||
- QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x"
|
- QTEST_QEMU_BINARY="x86_64-softmmu/qemu-system-x86_64" ./tests/qtest/pxe-test
|
||||||
./tests/qtest/pxe-test -m slow
|
- QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x" ./tests/qtest/pxe-test -m slow
|
||||||
|
|
|
@ -26,12 +26,10 @@ env:
|
||||||
- IMAGE=debian-ppc64el-cross
|
- IMAGE=debian-ppc64el-cross
|
||||||
TARGET_LIST=ppc64-softmmu,ppc64-linux-user,ppc64abi32-linux-user
|
TARGET_LIST=ppc64-softmmu,ppc64-linux-user,ppc64abi32-linux-user
|
||||||
build:
|
build:
|
||||||
pre_ci:
|
|
||||||
- make docker-image-${IMAGE} V=1
|
|
||||||
pre_ci_boot:
|
pre_ci_boot:
|
||||||
image_name: qemu
|
image_name: registry.gitlab.com/qemu-project/qemu/${IMAGE}
|
||||||
image_tag: ${IMAGE}
|
image_tag: latest
|
||||||
pull: false
|
pull: true
|
||||||
options: "-e HOME=/root"
|
options: "-e HOME=/root"
|
||||||
ci:
|
ci:
|
||||||
- unset CC
|
- unset CC
|
||||||
|
|
62
.travis.yml
62
.travis.yml
|
@ -289,29 +289,6 @@ jobs:
|
||||||
python: 3.6
|
python: 3.6
|
||||||
|
|
||||||
|
|
||||||
# Acceptance (Functional) tests
|
|
||||||
- name: "GCC check-acceptance"
|
|
||||||
dist: bionic
|
|
||||||
env:
|
|
||||||
- CONFIG="--enable-tools --target-list=aarch64-softmmu,alpha-softmmu,arm-softmmu,m68k-softmmu,microblaze-softmmu,mips-softmmu,mips64el-softmmu,nios2-softmmu,or1k-softmmu,ppc-softmmu,ppc64-softmmu,s390x-softmmu,sh4-softmmu,sparc-softmmu,x86_64-softmmu,xtensa-softmmu"
|
|
||||||
- TEST_CMD="make check-acceptance"
|
|
||||||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-acceptance"
|
|
||||||
after_script:
|
|
||||||
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP")]' | xargs cat
|
|
||||||
- du -chs $HOME/avocado/data/cache
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- python3-pil
|
|
||||||
- python3-pip
|
|
||||||
- python3-numpy
|
|
||||||
- python3-opencv
|
|
||||||
- python3-venv
|
|
||||||
- rpm2cpio
|
|
||||||
- tesseract-ocr
|
|
||||||
- tesseract-ocr-eng
|
|
||||||
|
|
||||||
|
|
||||||
# Using newer GCC with sanitizers
|
# Using newer GCC with sanitizers
|
||||||
- name: "GCC9 with sanitizers (softmmu)"
|
- name: "GCC9 with sanitizers (softmmu)"
|
||||||
addons:
|
addons:
|
||||||
|
@ -505,6 +482,45 @@ jobs:
|
||||||
$(exit $BUILD_RC);
|
$(exit $BUILD_RC);
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: "[s390x] GCC (other-softmmu)"
|
||||||
|
arch: s390x
|
||||||
|
dist: bionic
|
||||||
|
addons:
|
||||||
|
apt_packages:
|
||||||
|
- libaio-dev
|
||||||
|
- libattr1-dev
|
||||||
|
- libcap-ng-dev
|
||||||
|
- libgnutls28-dev
|
||||||
|
- libiscsi-dev
|
||||||
|
- liblttng-ust-dev
|
||||||
|
- liblzo2-dev
|
||||||
|
- libncurses-dev
|
||||||
|
- libnfs-dev
|
||||||
|
- libnss3-dev
|
||||||
|
- libpixman-1-dev
|
||||||
|
- libsdl2-dev
|
||||||
|
- libsdl2-image-dev
|
||||||
|
- libseccomp-dev
|
||||||
|
- libsnappy-dev
|
||||||
|
- libzstd-dev
|
||||||
|
- nettle-dev
|
||||||
|
- xfslibs-dev
|
||||||
|
# Tests dependencies
|
||||||
|
- genisoimage
|
||||||
|
env:
|
||||||
|
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
|
||||||
|
--target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
|
||||||
|
|
||||||
|
- name: "[s390x] GCC (user)"
|
||||||
|
arch: s390x
|
||||||
|
dist: bionic
|
||||||
|
addons:
|
||||||
|
apt_packages:
|
||||||
|
- libgcrypt20-dev
|
||||||
|
- libgnutls28-dev
|
||||||
|
env:
|
||||||
|
- CONFIG="--disable-containers --disable-system"
|
||||||
|
|
||||||
- name: "[s390x] Clang (disable-tcg)"
|
- name: "[s390x] Clang (disable-tcg)"
|
||||||
arch: s390x
|
arch: s390x
|
||||||
dist: bionic
|
dist: bionic
|
||||||
|
|
|
@ -418,6 +418,7 @@ prefix="/usr/local"
|
||||||
mandir="\${prefix}/share/man"
|
mandir="\${prefix}/share/man"
|
||||||
datadir="\${prefix}/share"
|
datadir="\${prefix}/share"
|
||||||
firmwarepath="\${prefix}/share/qemu-firmware"
|
firmwarepath="\${prefix}/share/qemu-firmware"
|
||||||
|
efi_aarch64=""
|
||||||
qemu_docdir="\${prefix}/share/doc/qemu"
|
qemu_docdir="\${prefix}/share/doc/qemu"
|
||||||
bindir="\${prefix}/bin"
|
bindir="\${prefix}/bin"
|
||||||
libdir="\${prefix}/lib"
|
libdir="\${prefix}/lib"
|
||||||
|
@ -960,6 +961,13 @@ do
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Check for existence of python3 yaml, needed to
|
||||||
|
# import yaml config files into vm-build.
|
||||||
|
python_yaml="no"
|
||||||
|
if $(python3 -c "import yaml" 2> /dev/null); then
|
||||||
|
python_yaml="yes"
|
||||||
|
fi
|
||||||
|
|
||||||
: ${smbd=${SMBD-/usr/sbin/smbd}}
|
: ${smbd=${SMBD-/usr/sbin/smbd}}
|
||||||
|
|
||||||
# Default objcc to clang if available, otherwise use CC
|
# Default objcc to clang if available, otherwise use CC
|
||||||
|
@ -1102,6 +1110,8 @@ for opt do
|
||||||
;;
|
;;
|
||||||
--firmwarepath=*) firmwarepath="$optarg"
|
--firmwarepath=*) firmwarepath="$optarg"
|
||||||
;;
|
;;
|
||||||
|
--efi-aarch64=*) efi_aarch64="$optarg"
|
||||||
|
;;
|
||||||
--host=*|--build=*|\
|
--host=*|--build=*|\
|
||||||
--disable-dependency-tracking|\
|
--disable-dependency-tracking|\
|
||||||
--sbindir=*|--sharedstatedir=*|\
|
--sbindir=*|--sharedstatedir=*|\
|
||||||
|
@ -1784,6 +1794,7 @@ Advanced options (experts only):
|
||||||
--sysconfdir=PATH install config in PATH$confsuffix
|
--sysconfdir=PATH install config in PATH$confsuffix
|
||||||
--localstatedir=PATH install local state in PATH (set at runtime on win32)
|
--localstatedir=PATH install local state in PATH (set at runtime on win32)
|
||||||
--firmwarepath=PATH search PATH for firmware files
|
--firmwarepath=PATH search PATH for firmware files
|
||||||
|
--efi-aarch64=PATH PATH of efi file to use for aarch64 VMs.
|
||||||
--with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix]
|
--with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix]
|
||||||
--with-pkgversion=VERS use specified string as sub-version of the package
|
--with-pkgversion=VERS use specified string as sub-version of the package
|
||||||
--enable-debug enable common debug build options
|
--enable-debug enable common debug build options
|
||||||
|
@ -3620,6 +3631,20 @@ EOF
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
############################################
|
||||||
|
# efi-aarch64 probe
|
||||||
|
# Check for efi files needed by aarch64 VMs.
|
||||||
|
# By default we will use the efi included with QEMU.
|
||||||
|
# Allow user to override the path for efi also.
|
||||||
|
if ! test -f "$efi_aarch64"; then
|
||||||
|
if test -f $source_path/pc-bios/edk2-aarch64-code.fd.bz2; then
|
||||||
|
# valid after build
|
||||||
|
efi_aarch64=$PWD/pc-bios/edk2-aarch64-code.fd
|
||||||
|
else
|
||||||
|
efi_aarch64=""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
##########################################
|
##########################################
|
||||||
# libcap-ng library probe
|
# libcap-ng library probe
|
||||||
if test "$cap_ng" != "no" ; then
|
if test "$cap_ng" != "no" ; then
|
||||||
|
@ -6486,7 +6511,7 @@ EOF
|
||||||
fi
|
fi
|
||||||
if test "$secret_keyring" != "no"
|
if test "$secret_keyring" != "no"
|
||||||
then
|
then
|
||||||
if test "$have_keyring" == "yes"
|
if test "$have_keyring" = "yes"
|
||||||
then
|
then
|
||||||
secret_keyring=yes
|
secret_keyring=yes
|
||||||
else
|
else
|
||||||
|
@ -6868,6 +6893,8 @@ if test "$docs" != "no"; then
|
||||||
echo "sphinx-build $sphinx_build"
|
echo "sphinx-build $sphinx_build"
|
||||||
fi
|
fi
|
||||||
echo "genisoimage $genisoimage"
|
echo "genisoimage $genisoimage"
|
||||||
|
echo "efi_aarch64 $efi_aarch64"
|
||||||
|
echo "python_yaml $python_yaml"
|
||||||
echo "slirp support $slirp $(echo_version $slirp $slirp_version)"
|
echo "slirp support $slirp $(echo_version $slirp $slirp_version)"
|
||||||
if test "$slirp" != "no" ; then
|
if test "$slirp" != "no" ; then
|
||||||
echo "smbd $smbd"
|
echo "smbd $smbd"
|
||||||
|
@ -7966,6 +7993,8 @@ echo "PYTHON=$python" >> $config_host_mak
|
||||||
echo "SPHINX_BUILD=$sphinx_build" >> $config_host_mak
|
echo "SPHINX_BUILD=$sphinx_build" >> $config_host_mak
|
||||||
echo "SPHINX_WERROR=$sphinx_werror" >> $config_host_mak
|
echo "SPHINX_WERROR=$sphinx_werror" >> $config_host_mak
|
||||||
echo "GENISOIMAGE=$genisoimage" >> $config_host_mak
|
echo "GENISOIMAGE=$genisoimage" >> $config_host_mak
|
||||||
|
echo "EFI_AARCH64=$efi_aarch64" >> $config_host_mak
|
||||||
|
echo "PYTHON_YAML=$python_yaml" >> $config_host_mak
|
||||||
echo "CC=$cc" >> $config_host_mak
|
echo "CC=$cc" >> $config_host_mak
|
||||||
if $iasl -h > /dev/null 2>&1; then
|
if $iasl -h > /dev/null 2>&1; then
|
||||||
echo "IASL=$iasl" >> $config_host_mak
|
echo "IASL=$iasl" >> $config_host_mak
|
||||||
|
|
|
@ -23,6 +23,8 @@ Contents:
|
||||||
decodetree
|
decodetree
|
||||||
secure-coding-practices
|
secure-coding-practices
|
||||||
tcg
|
tcg
|
||||||
|
tcg-icount
|
||||||
|
multi-thread-tcg
|
||||||
tcg-plugins
|
tcg-plugins
|
||||||
bitops
|
bitops
|
||||||
reset
|
reset
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
Copyright (c) 2015-2016 Linaro Ltd.
|
..
|
||||||
|
Copyright (c) 2015-2020 Linaro Ltd.
|
||||||
|
|
||||||
This work is licensed under the terms of the GNU GPL, version 2 or
|
This work is licensed under the terms of the GNU GPL, version 2 or
|
||||||
later. See the COPYING file in the top-level directory.
|
later. See the COPYING file in the top-level directory.
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
This document outlines the design for multi-threaded TCG system-mode
|
This document outlines the design for multi-threaded TCG (a.k.a MTTCG)
|
||||||
emulation. The current user-mode emulation mirrors the thread
|
system-mode emulation. user-mode emulation has always mirrored the
|
||||||
structure of the translated executable. Some of the work will be
|
thread structure of the translated executable although some of the
|
||||||
applicable to both system and linux-user emulation.
|
changes done for MTTCG system emulation have improved the stability of
|
||||||
|
linux-user emulation.
|
||||||
|
|
||||||
The original system-mode TCG implementation was single threaded and
|
The original system-mode TCG implementation was single threaded and
|
||||||
dealt with multiple CPUs with simple round-robin scheduling. This
|
dealt with multiple CPUs with simple round-robin scheduling. This
|
||||||
|
@ -21,9 +23,18 @@ vCPU Scheduling
|
||||||
===============
|
===============
|
||||||
|
|
||||||
We introduce a new running mode where each vCPU will run on its own
|
We introduce a new running mode where each vCPU will run on its own
|
||||||
user-space thread. This will be enabled by default for all FE/BE
|
user-space thread. This is enabled by default for all FE/BE
|
||||||
combinations that have had the required work done to support this
|
combinations where the host memory model is able to accommodate the
|
||||||
safely.
|
guest (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO is zero) and the
|
||||||
|
guest has had the required work done to support this safely
|
||||||
|
(TARGET_SUPPORTS_MTTCG).
|
||||||
|
|
||||||
|
System emulation will fall back to the original round robin approach
|
||||||
|
if:
|
||||||
|
|
||||||
|
* forced by --accel tcg,thread=single
|
||||||
|
* enabling --icount mode
|
||||||
|
* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST)
|
||||||
|
|
||||||
In the general case of running translated code there should be no
|
In the general case of running translated code there should be no
|
||||||
inter-vCPU dependencies and all vCPUs should be able to run at full
|
inter-vCPU dependencies and all vCPUs should be able to run at full
|
||||||
|
@ -61,7 +72,9 @@ have their block-to-block jumps patched.
|
||||||
Global TCG State
|
Global TCG State
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
### User-mode emulation
|
User-mode emulation
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
We need to protect the entire code generation cycle including any post
|
We need to protect the entire code generation cycle including any post
|
||||||
generation patching of the translated code. This also implies a shared
|
generation patching of the translated code. This also implies a shared
|
||||||
translation buffer which contains code running on all cores. Any
|
translation buffer which contains code running on all cores. Any
|
||||||
|
@ -78,9 +91,11 @@ patching.
|
||||||
|
|
||||||
Code generation is serialised with mmap_lock().
|
Code generation is serialised with mmap_lock().
|
||||||
|
|
||||||
### !User-mode emulation
|
!User-mode emulation
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Each vCPU has its own TCG context and associated TCG region, thereby
|
Each vCPU has its own TCG context and associated TCG region, thereby
|
||||||
requiring no locking.
|
requiring no locking during translation.
|
||||||
|
|
||||||
Translation Blocks
|
Translation Blocks
|
||||||
------------------
|
------------------
|
||||||
|
@ -92,6 +107,7 @@ including:
|
||||||
|
|
||||||
- debugging operations (breakpoint insertion/removal)
|
- debugging operations (breakpoint insertion/removal)
|
||||||
- some CPU helper functions
|
- some CPU helper functions
|
||||||
|
- linux-user spawning it's first thread
|
||||||
|
|
||||||
This is done with the async_safe_run_on_cpu() mechanism to ensure all
|
This is done with the async_safe_run_on_cpu() mechanism to ensure all
|
||||||
vCPUs are quiescent when changes are being made to shared global
|
vCPUs are quiescent when changes are being made to shared global
|
||||||
|
@ -250,8 +266,10 @@ to enforce a particular ordering of memory operations from the point
|
||||||
of view of external observers (e.g. another processor core). They can
|
of view of external observers (e.g. another processor core). They can
|
||||||
apply to any memory operations as well as just loads or stores.
|
apply to any memory operations as well as just loads or stores.
|
||||||
|
|
||||||
The Linux kernel has an excellent write-up on the various forms of
|
The Linux kernel has an excellent `write-up
|
||||||
memory barrier and the guarantees they can provide [1].
|
<https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/Documentation/memory-barriers.txt>`
|
||||||
|
on the various forms of memory barrier and the guarantees they can
|
||||||
|
provide.
|
||||||
|
|
||||||
Barriers are often wrapped around synchronisation primitives to
|
Barriers are often wrapped around synchronisation primitives to
|
||||||
provide explicit memory ordering semantics. However they can be used
|
provide explicit memory ordering semantics. However they can be used
|
||||||
|
@ -352,7 +370,3 @@ an exclusive lock which ensures all emulation is serialised.
|
||||||
While the atomic helpers look good enough for now there may be a need
|
While the atomic helpers look good enough for now there may be a need
|
||||||
to look at solutions that can more closely model the guest
|
to look at solutions that can more closely model the guest
|
||||||
architectures semantics.
|
architectures semantics.
|
||||||
|
|
||||||
==========
|
|
||||||
|
|
||||||
[1] https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/Documentation/memory-barriers.txt
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
..
|
||||||
|
Copyright (c) 2020, Linaro Limited
|
||||||
|
Written by Alex Bennée
|
||||||
|
|
||||||
|
|
||||||
|
========================
|
||||||
|
TCG Instruction Counting
|
||||||
|
========================
|
||||||
|
|
||||||
|
TCG has long supported a feature known as icount which allows for
|
||||||
|
instruction counting during execution. This should not be confused
|
||||||
|
with cycle accurate emulation - QEMU does not attempt to emulate how
|
||||||
|
long an instruction would take on real hardware. That is a job for
|
||||||
|
other more detailed (and slower) tools that simulate the rest of a
|
||||||
|
micro-architecture.
|
||||||
|
|
||||||
|
This feature is only available for system emulation and is
|
||||||
|
incompatible with multi-threaded TCG. It can be used to better align
|
||||||
|
execution time with wall-clock time so a "slow" device doesn't run too
|
||||||
|
fast on modern hardware. It can also provides for a degree of
|
||||||
|
deterministic execution and is an essential part of the record/replay
|
||||||
|
support in QEMU.
|
||||||
|
|
||||||
|
Core Concepts
|
||||||
|
=============
|
||||||
|
|
||||||
|
At its heart icount is simply a count of executed instructions which
|
||||||
|
is stored in the TimersState of QEMU's timer sub-system. The number of
|
||||||
|
executed instructions can then be used to calculate QEMU_CLOCK_VIRTUAL
|
||||||
|
which represents the amount of elapsed time in the system since
|
||||||
|
execution started. Depending on the icount mode this may either be a
|
||||||
|
fixed number of ns per instruction or adjusted as execution continues
|
||||||
|
to keep wall clock time and virtual time in sync.
|
||||||
|
|
||||||
|
To be able to calculate the number of executed instructions the
|
||||||
|
translator starts by allocating a budget of instructions to be
|
||||||
|
executed. The budget of instructions is limited by how long it will be
|
||||||
|
until the next timer will expire. We store this budget as part of a
|
||||||
|
vCPU icount_decr field which shared with the machinery for handling
|
||||||
|
cpu_exit(). The whole field is checked at the start of every
|
||||||
|
translated block and will cause a return to the outer loop to deal
|
||||||
|
with whatever caused the exit.
|
||||||
|
|
||||||
|
In the case of icount, before the flag is checked we subtract the
|
||||||
|
number of instructions the translation block would execute. If this
|
||||||
|
would cause the instruction budget to go negative we exit the main
|
||||||
|
loop and regenerate a new translation block with exactly the right
|
||||||
|
number of instructions to take the budget to 0 meaning whatever timer
|
||||||
|
was due to expire will expire exactly when we exit the main run loop.
|
||||||
|
|
||||||
|
Dealing with MMIO
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
While we can adjust the instruction budget for known events like timer
|
||||||
|
expiry we cannot do the same for MMIO. Every load/store we execute
|
||||||
|
might potentially trigger an I/O event, at which point we will need an
|
||||||
|
up to date and accurate reading of the icount number.
|
||||||
|
|
||||||
|
To deal with this case, when an I/O access is made we:
|
||||||
|
|
||||||
|
- restore un-executed instructions to the icount budget
|
||||||
|
- re-compile a single [1]_ instruction block for the current PC
|
||||||
|
- exit the cpu loop and execute the re-compiled block
|
||||||
|
|
||||||
|
The new block is created with the CF_LAST_IO compile flag which
|
||||||
|
ensures the final instruction translation starts with a call to
|
||||||
|
gen_io_start() so we don't enter a perpetual loop constantly
|
||||||
|
recompiling a single instruction block. For translators using the
|
||||||
|
common translator_loop this is done automatically.
|
||||||
|
|
||||||
|
.. [1] sometimes two instructions if dealing with delay slots
|
||||||
|
|
||||||
|
Other I/O operations
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
MMIO isn't the only type of operation for which we might need a
|
||||||
|
correct and accurate clock. IO port instructions and accesses to
|
||||||
|
system registers are the common examples here. These instructions have
|
||||||
|
to be handled by the individual translators which have the knowledge
|
||||||
|
of which operations are I/O operations.
|
||||||
|
|
||||||
|
When the translator is handling an instruction of this kind:
|
||||||
|
|
||||||
|
* it must call gen_io_start() if icount is enabled, at some
|
||||||
|
point before the generation of the code which actually does
|
||||||
|
the I/O, using a code fragment similar to:
|
||||||
|
|
||||||
|
.. code:: c
|
||||||
|
|
||||||
|
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||||
|
gen_io_start();
|
||||||
|
}
|
||||||
|
|
||||||
|
* it must end the TB immediately after this instruction
|
||||||
|
|
||||||
|
Note that some older front-ends call a "gen_io_end()" function:
|
||||||
|
this is obsolete and should not be used.
|
|
@ -87,3 +87,23 @@ three commands you can query and set the single step behavior:
|
||||||
(gdb) maintenance packet Qqemu.sstep=0x5
|
(gdb) maintenance packet Qqemu.sstep=0x5
|
||||||
sending: "qemu.sstep=0x5"
|
sending: "qemu.sstep=0x5"
|
||||||
received: "OK"
|
received: "OK"
|
||||||
|
|
||||||
|
|
||||||
|
Another feature that QEMU gdbstub provides is to toggle the memory GDB
|
||||||
|
works with, by default GDB will show the current process memory respecting
|
||||||
|
the virtual address translation.
|
||||||
|
|
||||||
|
If you want to examine/change the physical memory you can set the gdbstub
|
||||||
|
to work with the physical memory rather with the virtual one.
|
||||||
|
|
||||||
|
The memory mode can be checked by sending the following command:
|
||||||
|
|
||||||
|
``maintenance packet qqemu.PhyMemMode``
|
||||||
|
This will return either 0 or 1, 1 indicates you are currently in the
|
||||||
|
physical memory mode.
|
||||||
|
|
||||||
|
``maintenance packet Qqemu.PhyMemMode:1``
|
||||||
|
This will change the memory mode to physical memory.
|
||||||
|
|
||||||
|
``maintenance packet Qqemu.PhyMemMode:0``
|
||||||
|
This will change it back to normal memory mode.
|
||||||
|
|
|
@ -49,19 +49,16 @@ common-obj-m += qxl.mo
|
||||||
qxl.mo-objs = qxl.o qxl-logger.o qxl-render.o
|
qxl.mo-objs = qxl.o qxl-logger.o qxl-render.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(CONFIG_VIRTIO_GPU),y)
|
common-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
|
||||||
common-obj-m += virtio-gpu.mo
|
common-obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
|
||||||
virtio-gpu-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
|
common-obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
|
||||||
virtio-gpu-obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
|
common-obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
|
||||||
virtio-gpu-obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
|
common-obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
|
||||||
virtio-gpu-obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
|
common-obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
|
||||||
virtio-gpu-obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
|
virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
|
||||||
virtio-gpu-obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
|
virtio-gpu.o-libs += $(VIRGL_LIBS)
|
||||||
virtio-gpu.mo-objs := $(virtio-gpu-obj-y)
|
virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
|
||||||
virtio-gpu.mo-cflags := $(VIRGL_CFLAGS)
|
virtio-gpu-3d.o-libs += $(VIRGL_LIBS)
|
||||||
virtio-gpu.mo-libs := $(VIRGL_LIBS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
common-obj-$(CONFIG_DPCD) += dpcd.o
|
common-obj-$(CONFIG_DPCD) += dpcd.o
|
||||||
common-obj-$(CONFIG_XLNX_ZYNQMP_ARM) += xlnx_dp.o
|
common-obj-$(CONFIG_XLNX_ZYNQMP_ARM) += xlnx_dp.o
|
||||||
|
|
||||||
|
|
|
@ -2294,7 +2294,7 @@ static void pgb_dynamic(const char *image_name, long align)
|
||||||
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
||||||
abi_ulong guest_hiaddr, long align)
|
abi_ulong guest_hiaddr, long align)
|
||||||
{
|
{
|
||||||
const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
|
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
|
||||||
void *addr, *test;
|
void *addr, *test;
|
||||||
|
|
||||||
if (guest_hiaddr > reserved_va) {
|
if (guest_hiaddr > reserved_va) {
|
||||||
|
@ -2307,15 +2307,19 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
||||||
/* Widen the "image" to the entire reserved address space. */
|
/* Widen the "image" to the entire reserved address space. */
|
||||||
pgb_static(image_name, 0, reserved_va, align);
|
pgb_static(image_name, 0, reserved_va, align);
|
||||||
|
|
||||||
|
#ifdef MAP_FIXED_NOREPLACE
|
||||||
|
flags |= MAP_FIXED_NOREPLACE;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Reserve the memory on the host. */
|
/* Reserve the memory on the host. */
|
||||||
assert(guest_base != 0);
|
assert(guest_base != 0);
|
||||||
test = g2h(0);
|
test = g2h(0);
|
||||||
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
|
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
|
||||||
if (addr == MAP_FAILED) {
|
if (addr == MAP_FAILED) {
|
||||||
error_report("Unable to reserve 0x%lx bytes of virtual address "
|
error_report("Unable to reserve 0x%lx bytes of virtual address "
|
||||||
"space for use as guest address space (check your "
|
"space (%s) for use as guest address space (check your "
|
||||||
"virtual memory ulimit setting or reserve less "
|
"virtual memory ulimit setting or reserve less "
|
||||||
"using -R option)", reserved_va);
|
"using -R option)", reserved_va, strerror(errno));
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
assert(addr == test);
|
assert(addr == test);
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# This python module implements a ConsoleSocket object which is
|
||||||
|
# designed always drain the socket itself, and place
|
||||||
|
# the bytes into a in memory buffer for later processing.
|
||||||
|
#
|
||||||
|
# Optionally a file path can be passed in and we will also
|
||||||
|
# dump the characters to this file for debug.
|
||||||
|
#
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Robert Foley <robert.foley@linaro.org>
|
||||||
|
#
|
||||||
|
# This code is licensed under the GPL version 2 or later. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
#
|
||||||
|
import asyncore
|
||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from collections import deque
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
class ConsoleSocket(asyncore.dispatcher):
|
||||||
|
|
||||||
|
def __init__(self, address, file=None):
|
||||||
|
self._recv_timeout_sec = 300
|
||||||
|
self._buffer = deque()
|
||||||
|
self._asyncore_thread = None
|
||||||
|
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
|
self._sock.connect(address)
|
||||||
|
self._logfile = None
|
||||||
|
if file:
|
||||||
|
self._logfile = open(file, "w")
|
||||||
|
asyncore.dispatcher.__init__(self, sock=self._sock)
|
||||||
|
self._open = True
|
||||||
|
self._thread_start()
|
||||||
|
|
||||||
|
def _thread_start(self):
|
||||||
|
"""Kick off a thread to wait on the asyncore.loop"""
|
||||||
|
if self._asyncore_thread is not None:
|
||||||
|
return
|
||||||
|
self._asyncore_thread = threading.Thread(target=asyncore.loop,
|
||||||
|
kwargs={'timeout':1})
|
||||||
|
self._asyncore_thread.daemon = True
|
||||||
|
self._asyncore_thread.start()
|
||||||
|
|
||||||
|
def handle_close(self):
|
||||||
|
"""redirect close to base class"""
|
||||||
|
# Call the base class close, but not self.close() since
|
||||||
|
# handle_close() occurs in the context of the thread which
|
||||||
|
# self.close() attempts to join.
|
||||||
|
asyncore.dispatcher.close(self)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the base object and wait for the thread to terminate"""
|
||||||
|
if self._open:
|
||||||
|
self._open = False
|
||||||
|
asyncore.dispatcher.close(self)
|
||||||
|
if self._asyncore_thread is not None:
|
||||||
|
thread, self._asyncore_thread = self._asyncore_thread, None
|
||||||
|
thread.join()
|
||||||
|
if self._logfile:
|
||||||
|
self._logfile.close()
|
||||||
|
self._logfile = None
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
"""process arriving characters into in memory _buffer"""
|
||||||
|
try:
|
||||||
|
data = asyncore.dispatcher.recv(self, 1)
|
||||||
|
# latin1 is needed since there are some chars
|
||||||
|
# we are receiving that cannot be encoded to utf-8
|
||||||
|
# such as 0xe2, 0x80, 0xA6.
|
||||||
|
string = data.decode("latin1")
|
||||||
|
except:
|
||||||
|
print("Exception seen.")
|
||||||
|
traceback.print_exc()
|
||||||
|
return
|
||||||
|
if self._logfile:
|
||||||
|
self._logfile.write("{}".format(string))
|
||||||
|
self._logfile.flush()
|
||||||
|
for c in string:
|
||||||
|
self._buffer.extend(c)
|
||||||
|
|
||||||
|
def recv(self, n=1, sleep_delay_s=0.1):
|
||||||
|
"""Return chars from in memory buffer"""
|
||||||
|
start_time = time.time()
|
||||||
|
while len(self._buffer) < n:
|
||||||
|
time.sleep(sleep_delay_s)
|
||||||
|
elapsed_sec = time.time() - start_time
|
||||||
|
if elapsed_sec > self._recv_timeout_sec:
|
||||||
|
raise socket.timeout
|
||||||
|
chars = ''.join([self._buffer.popleft() for i in range(n)])
|
||||||
|
# We choose to use latin1 to remain consistent with
|
||||||
|
# handle_read() and give back the same data as the user would
|
||||||
|
# receive if they were reading directly from the
|
||||||
|
# socket w/o our intervention.
|
||||||
|
return chars.encode("latin1")
|
||||||
|
|
||||||
|
def set_blocking(self):
|
||||||
|
"""Maintain compatibility with socket API"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def settimeout(self, seconds):
|
||||||
|
"""Set current timeout on recv"""
|
||||||
|
self._recv_timeout_sec = seconds
|
|
@ -26,6 +26,7 @@ import socket
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Optional, Type
|
from typing import Optional, Type
|
||||||
from types import TracebackType
|
from types import TracebackType
|
||||||
|
from qemu.console_socket import ConsoleSocket
|
||||||
|
|
||||||
from . import qmp
|
from . import qmp
|
||||||
|
|
||||||
|
@ -75,7 +76,8 @@ class QEMUMachine:
|
||||||
|
|
||||||
def __init__(self, binary, args=None, wrapper=None, name=None,
|
def __init__(self, binary, args=None, wrapper=None, name=None,
|
||||||
test_dir="/var/tmp", monitor_address=None,
|
test_dir="/var/tmp", monitor_address=None,
|
||||||
socket_scm_helper=None, sock_dir=None):
|
socket_scm_helper=None, sock_dir=None,
|
||||||
|
drain_console=False, console_log=None):
|
||||||
'''
|
'''
|
||||||
Initialize a QEMUMachine
|
Initialize a QEMUMachine
|
||||||
|
|
||||||
|
@ -86,6 +88,9 @@ class QEMUMachine:
|
||||||
@param test_dir: where to create socket and log file
|
@param test_dir: where to create socket and log file
|
||||||
@param monitor_address: address for QMP monitor
|
@param monitor_address: address for QMP monitor
|
||||||
@param socket_scm_helper: helper program, required for send_fd_scm()
|
@param socket_scm_helper: helper program, required for send_fd_scm()
|
||||||
|
@param sock_dir: where to create socket (overrides test_dir for sock)
|
||||||
|
@param console_log: (optional) path to console log file
|
||||||
|
@param drain_console: (optional) True to drain console socket to buffer
|
||||||
@note: Qemu process is not started until launch() is used.
|
@note: Qemu process is not started until launch() is used.
|
||||||
'''
|
'''
|
||||||
if args is None:
|
if args is None:
|
||||||
|
@ -122,6 +127,12 @@ class QEMUMachine:
|
||||||
self._console_address = None
|
self._console_address = None
|
||||||
self._console_socket = None
|
self._console_socket = None
|
||||||
self._remove_files = []
|
self._remove_files = []
|
||||||
|
self._console_log_path = console_log
|
||||||
|
if self._console_log_path:
|
||||||
|
# In order to log the console, buffering needs to be enabled.
|
||||||
|
self._drain_console = True
|
||||||
|
else:
|
||||||
|
self._drain_console = drain_console
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
return self
|
||||||
|
@ -580,7 +591,11 @@ class QEMUMachine:
|
||||||
Returns a socket connected to the console
|
Returns a socket connected to the console
|
||||||
"""
|
"""
|
||||||
if self._console_socket is None:
|
if self._console_socket is None:
|
||||||
self._console_socket = socket.socket(socket.AF_UNIX,
|
if self._drain_console:
|
||||||
socket.SOCK_STREAM)
|
self._console_socket = ConsoleSocket(self._console_address,
|
||||||
self._console_socket.connect(self._console_address)
|
file=self._console_log_path)
|
||||||
|
else:
|
||||||
|
self._console_socket = socket.socket(socket.AF_UNIX,
|
||||||
|
socket.SOCK_STREAM)
|
||||||
|
self._console_socket.connect(self._console_address)
|
||||||
return self._console_socket
|
return self._console_socket
|
||||||
|
|
|
@ -22,6 +22,8 @@ endif
|
||||||
@echo " $(MAKE) check-venv Creates a Python venv for tests"
|
@echo " $(MAKE) check-venv Creates a Python venv for tests"
|
||||||
@echo " $(MAKE) check-clean Clean the tests and related data"
|
@echo " $(MAKE) check-clean Clean the tests and related data"
|
||||||
@echo
|
@echo
|
||||||
|
@echo "The following are useful for CI builds"
|
||||||
|
@echo " $(MAKE) check-build Build most test binaris"
|
||||||
@echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)"
|
@echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)"
|
||||||
@echo
|
@echo
|
||||||
@echo
|
@echo
|
||||||
|
@ -649,6 +651,10 @@ $(patsubst %, check-qtest-%, $(QTEST_TARGETS)): check-qtest-%: %-softmmu/all $(c
|
||||||
QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \
|
QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \
|
||||||
QTEST_QEMU_IMG=qemu-img$(EXESUF))
|
QTEST_QEMU_IMG=qemu-img$(EXESUF))
|
||||||
|
|
||||||
|
build-qtest: $(patsubst %, %-softmmu/all, $(QTEST_TARGETS)) $(check-qtest-y)
|
||||||
|
|
||||||
|
build-unit: $(check-unit-y)
|
||||||
|
|
||||||
check-unit: $(check-unit-y)
|
check-unit: $(check-unit-y)
|
||||||
$(call do_test_human, $^)
|
$(call do_test_human, $^)
|
||||||
|
|
||||||
|
@ -680,7 +686,6 @@ check-report.tap: $(patsubst %,check-report-qtest-%.tap, $(QTEST_TARGETS)) check
|
||||||
FP_TEST_BIN=$(BUILD_DIR)/tests/fp/fp-test
|
FP_TEST_BIN=$(BUILD_DIR)/tests/fp/fp-test
|
||||||
|
|
||||||
# the build dir is created by configure
|
# the build dir is created by configure
|
||||||
.PHONY: $(FP_TEST_BIN)
|
|
||||||
$(FP_TEST_BIN): config-host.h $(test-util-obj-y)
|
$(FP_TEST_BIN): config-host.h $(test-util-obj-y)
|
||||||
$(call quiet-command, \
|
$(call quiet-command, \
|
||||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" $(notdir $@), \
|
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" $(notdir $@), \
|
||||||
|
@ -814,9 +819,10 @@ check-softfloat-ops: $(SF_MATH_RULES)
|
||||||
|
|
||||||
.PHONY: check-softfloat
|
.PHONY: check-softfloat
|
||||||
ifeq ($(CONFIG_TCG),y)
|
ifeq ($(CONFIG_TCG),y)
|
||||||
check-softfloat: check-softfloat-conv check-softfloat-compare check-softfloat-ops
|
build-softfloat: $(FP_TEST_BIN)
|
||||||
|
check-softfloat: build-softfloat check-softfloat-conv check-softfloat-compare check-softfloat-ops
|
||||||
else
|
else
|
||||||
check-softfloat:
|
build-softfloat check-softfloat:
|
||||||
$(call quiet-command, /bin/true, "FLOAT TEST", \
|
$(call quiet-command, /bin/true, "FLOAT TEST", \
|
||||||
"SKIPPED for non-TCG builds")
|
"SKIPPED for non-TCG builds")
|
||||||
endif
|
endif
|
||||||
|
@ -944,7 +950,7 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images
|
||||||
--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
|
--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
|
||||||
--filter-by-tags-include-empty --filter-by-tags-include-empty-key \
|
--filter-by-tags-include-empty --filter-by-tags-include-empty-key \
|
||||||
$(AVOCADO_TAGS) \
|
$(AVOCADO_TAGS) \
|
||||||
--failfast=on tests/acceptance, \
|
$(if $(GITLAB_CI),,--failfast=on) tests/acceptance, \
|
||||||
"AVOCADO", "tests/acceptance")
|
"AVOCADO", "tests/acceptance")
|
||||||
|
|
||||||
# Consolidated targets
|
# Consolidated targets
|
||||||
|
@ -955,7 +961,8 @@ check-qtest: $(patsubst %,check-qtest-%, $(QTEST_TARGETS))
|
||||||
ifeq ($(CONFIG_TOOLS),y)
|
ifeq ($(CONFIG_TOOLS),y)
|
||||||
check-block: $(patsubst %,check-%, $(check-block-y))
|
check-block: $(patsubst %,check-%, $(check-block-y))
|
||||||
endif
|
endif
|
||||||
check: check-block check-qapi-schema check-unit check-softfloat check-qtest check-decodetree
|
check-build: build-unit build-softfloat build-qtest
|
||||||
|
|
||||||
check-clean:
|
check-clean:
|
||||||
rm -rf $(check-unit-y) tests/*.o tests/*/*.o $(QEMU_IOTESTS_HELPERS-y)
|
rm -rf $(check-unit-y) tests/*.o tests/*/*.o $(QEMU_IOTESTS_HELPERS-y)
|
||||||
rm -rf $(sort $(foreach target,$(SYSEMU_TARGET_LIST), $(check-qtest-$(target)-y:%=tests/qtest/%$(EXESUF))) $(check-qtest-generic-y:%=tests/qtest/%$(EXESUF)))
|
rm -rf $(sort $(foreach target,$(SYSEMU_TARGET_LIST), $(check-qtest-$(target)-y:%=tests/qtest/%$(EXESUF))) $(check-qtest-generic-y:%=tests/qtest/%$(EXESUF)))
|
||||||
|
@ -963,6 +970,8 @@ check-clean:
|
||||||
rm -f tests/qtest/dbus-vmstate1-gen-timestamp
|
rm -f tests/qtest/dbus-vmstate1-gen-timestamp
|
||||||
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR)
|
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR)
|
||||||
|
|
||||||
|
check: check-block check-qapi-schema check-unit check-softfloat check-qtest check-decodetree
|
||||||
|
|
||||||
clean: check-clean
|
clean: check-clean
|
||||||
|
|
||||||
# Build the help program automatically
|
# Build the help program automatically
|
||||||
|
|
|
@ -20,6 +20,7 @@ from avocado.utils import network
|
||||||
from avocado.utils import vmimage
|
from avocado.utils import vmimage
|
||||||
from avocado.utils import datadrainer
|
from avocado.utils import datadrainer
|
||||||
from avocado.utils.path import find_command
|
from avocado.utils.path import find_command
|
||||||
|
from avocado import skipIf
|
||||||
|
|
||||||
ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
|
ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
|
||||||
KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
|
KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
|
||||||
|
@ -220,6 +221,7 @@ class BootLinuxS390X(BootLinux):
|
||||||
|
|
||||||
chksum = '4caaab5a434fd4d1079149a072fdc7891e354f834d355069ca982fdcaf5a122d'
|
chksum = '4caaab5a434fd4d1079149a072fdc7891e354f834d355069ca982fdcaf5a122d'
|
||||||
|
|
||||||
|
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
|
||||||
def test_s390_ccw_virtio_tcg(self):
|
def test_s390_ccw_virtio_tcg(self):
|
||||||
"""
|
"""
|
||||||
:avocado: tags=machine:s390-ccw-virtio
|
:avocado: tags=machine:s390-ccw-virtio
|
||||||
|
|
|
@ -8,10 +8,12 @@
|
||||||
# This work is licensed under the terms of the GNU GPL, version 2 or
|
# This work is licensed under the terms of the GNU GPL, version 2 or
|
||||||
# later. See the COPYING file in the top-level directory.
|
# later. See the COPYING file in the top-level directory.
|
||||||
|
|
||||||
|
import os
|
||||||
import logging
|
import logging
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from avocado_qemu import Test
|
from avocado_qemu import Test
|
||||||
|
from avocado import skipIf
|
||||||
|
|
||||||
|
|
||||||
class LinuxInitrd(Test):
|
class LinuxInitrd(Test):
|
||||||
|
@ -51,6 +53,7 @@ class LinuxInitrd(Test):
|
||||||
max_size + 1)
|
max_size + 1)
|
||||||
self.assertRegex(self.vm.get_log(), expected_msg)
|
self.assertRegex(self.vm.get_log(), expected_msg)
|
||||||
|
|
||||||
|
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
|
||||||
def test_with_2gib_file_should_work_with_linux_v4_16(self):
|
def test_with_2gib_file_should_work_with_linux_v4_16(self):
|
||||||
"""
|
"""
|
||||||
QEMU has supported up to 4 GiB initrd for recent kernel
|
QEMU has supported up to 4 GiB initrd for recent kernel
|
||||||
|
|
|
@ -15,6 +15,7 @@ from avocado import skipUnless
|
||||||
from avocado_qemu import Test
|
from avocado_qemu import Test
|
||||||
from avocado_qemu import wait_for_console_pattern
|
from avocado_qemu import wait_for_console_pattern
|
||||||
from avocado.utils import archive
|
from avocado.utils import archive
|
||||||
|
from avocado import skipIf
|
||||||
|
|
||||||
|
|
||||||
NUMPY_AVAILABLE = True
|
NUMPY_AVAILABLE = True
|
||||||
|
@ -99,6 +100,7 @@ class MaltaMachineFramebuffer(Test):
|
||||||
"""
|
"""
|
||||||
self.do_test_i6400_framebuffer_logo(1)
|
self.do_test_i6400_framebuffer_logo(1)
|
||||||
|
|
||||||
|
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
|
||||||
def test_mips_malta_i6400_framebuffer_logo_7cores(self):
|
def test_mips_malta_i6400_framebuffer_logo_7cores(self):
|
||||||
"""
|
"""
|
||||||
:avocado: tags=arch:mips64el
|
:avocado: tags=arch:mips64el
|
||||||
|
@ -108,6 +110,7 @@ class MaltaMachineFramebuffer(Test):
|
||||||
"""
|
"""
|
||||||
self.do_test_i6400_framebuffer_logo(7)
|
self.do_test_i6400_framebuffer_logo(7)
|
||||||
|
|
||||||
|
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
|
||||||
def test_mips_malta_i6400_framebuffer_logo_8cores(self):
|
def test_mips_malta_i6400_framebuffer_logo_8cores(self):
|
||||||
"""
|
"""
|
||||||
:avocado: tags=arch:mips64el
|
:avocado: tags=arch:mips64el
|
||||||
|
|
|
@ -50,7 +50,7 @@ class RxGdbSimMachine(Test):
|
||||||
:avocado: tags=machine:gdbsim-r5f562n7
|
:avocado: tags=machine:gdbsim-r5f562n7
|
||||||
:avocado: tags=endian:little
|
:avocado: tags=endian:little
|
||||||
"""
|
"""
|
||||||
dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-qemu.dtb')
|
dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb')
|
||||||
dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18'
|
dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18'
|
||||||
dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash)
|
dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash)
|
||||||
kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage')
|
kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage')
|
||||||
|
|
|
@ -73,7 +73,7 @@ class ReplayKernel(LinuxKernelTest):
|
||||||
logger = logging.getLogger('replay')
|
logger = logging.getLogger('replay')
|
||||||
logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
|
logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
|
||||||
|
|
||||||
@skipIf(os.getenv('CONTINUOUS_INTEGRATION'), 'Running on Travis-CI')
|
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
|
||||||
def test_x86_64_pc(self):
|
def test_x86_64_pc(self):
|
||||||
"""
|
"""
|
||||||
:avocado: tags=arch:x86_64
|
:avocado: tags=arch:x86_64
|
||||||
|
|
|
@ -13,6 +13,7 @@ DOCKER_IMAGES := $(sort $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.doc
|
||||||
DOCKER_TARGETS := $(patsubst %,docker-image-%,$(DOCKER_IMAGES))
|
DOCKER_TARGETS := $(patsubst %,docker-image-%,$(DOCKER_IMAGES))
|
||||||
# Use a global constant ccache directory to speed up repetitive builds
|
# Use a global constant ccache directory to speed up repetitive builds
|
||||||
DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache
|
DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache
|
||||||
|
DOCKER_REGISTRY := $(if $(REGISTRY),$(REGISTRY),registry.gitlab.com/qemu-project/qemu)
|
||||||
|
|
||||||
DOCKER_TESTS := $(notdir $(shell \
|
DOCKER_TESTS := $(notdir $(shell \
|
||||||
find $(SRC_PATH)/tests/docker/ -name 'test-*' -type f))
|
find $(SRC_PATH)/tests/docker/ -name 'test-*' -type f))
|
||||||
|
@ -50,13 +51,15 @@ docker-image: ${DOCKER_TARGETS}
|
||||||
ifdef SKIP_DOCKER_BUILD
|
ifdef SKIP_DOCKER_BUILD
|
||||||
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
|
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
|
||||||
$(call quiet-command, \
|
$(call quiet-command, \
|
||||||
$(DOCKER_SCRIPT) check --quiet qemu:$* $<, \
|
$(DOCKER_SCRIPT) check --quiet qemu/$* $<, \
|
||||||
"CHECK", "$*")
|
"CHECK", "$*")
|
||||||
else
|
else
|
||||||
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
|
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
|
||||||
$(call quiet-command,\
|
$(call quiet-command,\
|
||||||
$(DOCKER_SCRIPT) build -t qemu:$* -f $< \
|
$(DOCKER_SCRIPT) build -t qemu/$* -f $< \
|
||||||
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
|
$(if $V,,--quiet) \
|
||||||
|
$(if $(NOCACHE),--no-cache, \
|
||||||
|
$(if $(DOCKER_REGISTRY),--registry $(DOCKER_REGISTRY))) \
|
||||||
$(if $(NOUSER),,--add-current-user) \
|
$(if $(NOUSER),,--add-current-user) \
|
||||||
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES))\
|
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES))\
|
||||||
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)),\
|
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)),\
|
||||||
|
@ -75,14 +78,14 @@ docker-binfmt-image-debian-%: $(DOCKER_FILES_DIR)/debian-bootstrap.docker
|
||||||
DEB_ARCH=$(DEB_ARCH) \
|
DEB_ARCH=$(DEB_ARCH) \
|
||||||
DEB_TYPE=$(DEB_TYPE) \
|
DEB_TYPE=$(DEB_TYPE) \
|
||||||
$(if $(DEB_URL),DEB_URL=$(DEB_URL),) \
|
$(if $(DEB_URL),DEB_URL=$(DEB_URL),) \
|
||||||
$(DOCKER_SCRIPT) build qemu:debian-$* $< \
|
$(DOCKER_SCRIPT) build qemu/debian-$* $< \
|
||||||
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
|
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
|
||||||
$(if $(NOUSER),,--add-current-user) \
|
$(if $(NOUSER),,--add-current-user) \
|
||||||
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES)) \
|
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES)) \
|
||||||
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)), \
|
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)), \
|
||||||
"BUILD","binfmt debian-$* (debootstrapped)"), \
|
"BUILD","binfmt debian-$* (debootstrapped)"), \
|
||||||
$(call quiet-command, \
|
$(call quiet-command, \
|
||||||
$(DOCKER_SCRIPT) check --quiet qemu:debian-$* $< || \
|
$(DOCKER_SCRIPT) check --quiet qemu/debian-$* $< || \
|
||||||
{ echo "You will need to build $(EXECUTABLE)"; exit 1;},\
|
{ echo "You will need to build $(EXECUTABLE)"; exit 1;},\
|
||||||
"CHECK", "debian-$* exists"))
|
"CHECK", "debian-$* exists"))
|
||||||
|
|
||||||
|
@ -131,6 +134,7 @@ docker-image-travis: NOUSER=1
|
||||||
|
|
||||||
# Specialist build images, sometimes very limited tools
|
# Specialist build images, sometimes very limited tools
|
||||||
docker-image-debian-tricore-cross: docker-image-debian9
|
docker-image-debian-tricore-cross: docker-image-debian9
|
||||||
|
docker-image-debian-all-test-cross: docker-image-debian10
|
||||||
docker-image-debian-arm64-test-cross: docker-image-debian11
|
docker-image-debian-arm64-test-cross: docker-image-debian11
|
||||||
|
|
||||||
# These images may be good enough for building tests but not for test builds
|
# These images may be good enough for building tests but not for test builds
|
||||||
|
@ -213,6 +217,7 @@ endif
|
||||||
@echo ' Include extra files in image.'
|
@echo ' Include extra files in image.'
|
||||||
@echo ' ENGINE=auto/docker/podman'
|
@echo ' ENGINE=auto/docker/podman'
|
||||||
@echo ' Specify which container engine to run.'
|
@echo ' Specify which container engine to run.'
|
||||||
|
@echo ' REGISTRY=url Cache builds from registry (default:$(DOCKER_REGISTRY))'
|
||||||
|
|
||||||
# This rule if for directly running against an arbitrary docker target.
|
# This rule if for directly running against an arbitrary docker target.
|
||||||
# It is called by the expanded docker targets (e.g. make
|
# It is called by the expanded docker targets (e.g. make
|
||||||
|
@ -258,7 +263,7 @@ docker-run: docker-qemu-src
|
||||||
docker-run-%: CMD = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\1/')
|
docker-run-%: CMD = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\1/')
|
||||||
docker-run-%: IMAGE = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\2/')
|
docker-run-%: IMAGE = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\2/')
|
||||||
docker-run-%:
|
docker-run-%:
|
||||||
@$(MAKE) docker-run TEST=$(CMD) IMAGE=qemu:$(IMAGE)
|
@$(MAKE) docker-run TEST=$(CMD) IMAGE=qemu/$(IMAGE)
|
||||||
|
|
||||||
docker-clean:
|
docker-clean:
|
||||||
$(call quiet-command, $(DOCKER_SCRIPT) clean)
|
$(call quiet-command, $(DOCKER_SCRIPT) clean)
|
||||||
|
|
|
@ -47,7 +47,7 @@ build_qemu()
|
||||||
check_qemu()
|
check_qemu()
|
||||||
{
|
{
|
||||||
# default to make check unless the caller specifies
|
# default to make check unless the caller specifies
|
||||||
if test -z "$@"; then
|
if [ $# = 0 ]; then
|
||||||
INVOCATION="check"
|
INVOCATION="check"
|
||||||
else
|
else
|
||||||
INVOCATION="$@"
|
INVOCATION="$@"
|
||||||
|
|
|
@ -204,7 +204,7 @@ def _dockerfile_preprocess(df):
|
||||||
for l in df.splitlines():
|
for l in df.splitlines():
|
||||||
if len(l.strip()) == 0 or l.startswith("#"):
|
if len(l.strip()) == 0 or l.startswith("#"):
|
||||||
continue
|
continue
|
||||||
from_pref = "FROM qemu:"
|
from_pref = "FROM qemu/"
|
||||||
if l.startswith(from_pref):
|
if l.startswith(from_pref):
|
||||||
# TODO: Alternatively we could replace this line with "FROM $ID"
|
# TODO: Alternatively we could replace this line with "FROM $ID"
|
||||||
# where $ID is the image's hex id obtained with
|
# where $ID is the image's hex id obtained with
|
||||||
|
@ -221,6 +221,13 @@ class Docker(object):
|
||||||
""" Running Docker commands """
|
""" Running Docker commands """
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._command = _guess_engine_command()
|
self._command = _guess_engine_command()
|
||||||
|
|
||||||
|
if "docker" in self._command and "TRAVIS" not in os.environ:
|
||||||
|
os.environ["DOCKER_BUILDKIT"] = "1"
|
||||||
|
self._buildkit = True
|
||||||
|
else:
|
||||||
|
self._buildkit = False
|
||||||
|
|
||||||
self._instance = None
|
self._instance = None
|
||||||
atexit.register(self._kill_instances)
|
atexit.register(self._kill_instances)
|
||||||
signal.signal(signal.SIGTERM, self._kill_instances)
|
signal.signal(signal.SIGTERM, self._kill_instances)
|
||||||
|
@ -289,10 +296,25 @@ class Docker(object):
|
||||||
return labels.get("com.qemu.dockerfile-checksum", "")
|
return labels.get("com.qemu.dockerfile-checksum", "")
|
||||||
|
|
||||||
def build_image(self, tag, docker_dir, dockerfile,
|
def build_image(self, tag, docker_dir, dockerfile,
|
||||||
quiet=True, user=False, argv=None, extra_files_cksum=[]):
|
quiet=True, user=False, argv=None, registry=None,
|
||||||
|
extra_files_cksum=[]):
|
||||||
if argv is None:
|
if argv is None:
|
||||||
argv = []
|
argv = []
|
||||||
|
|
||||||
|
# pre-calculate the docker checksum before any
|
||||||
|
# substitutions we make for caching
|
||||||
|
checksum = _text_checksum(_dockerfile_preprocess(dockerfile))
|
||||||
|
|
||||||
|
if registry is not None:
|
||||||
|
# see if we can fetch a cache copy, may fail...
|
||||||
|
pull_args = ["pull", "%s/%s" % (registry, tag)]
|
||||||
|
if self._do(pull_args, quiet=quiet) == 0:
|
||||||
|
dockerfile = dockerfile.replace("FROM qemu/",
|
||||||
|
"FROM %s/qemu/" %
|
||||||
|
(registry))
|
||||||
|
else:
|
||||||
|
registry = None
|
||||||
|
|
||||||
tmp_df = tempfile.NamedTemporaryFile(mode="w+t",
|
tmp_df = tempfile.NamedTemporaryFile(mode="w+t",
|
||||||
encoding='utf-8',
|
encoding='utf-8',
|
||||||
dir=docker_dir, suffix=".docker")
|
dir=docker_dir, suffix=".docker")
|
||||||
|
@ -306,15 +328,23 @@ class Docker(object):
|
||||||
(uname, uid, uname))
|
(uname, uid, uname))
|
||||||
|
|
||||||
tmp_df.write("\n")
|
tmp_df.write("\n")
|
||||||
tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s" %
|
tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s" % (checksum))
|
||||||
_text_checksum(_dockerfile_preprocess(dockerfile)))
|
|
||||||
for f, c in extra_files_cksum:
|
for f, c in extra_files_cksum:
|
||||||
tmp_df.write("LABEL com.qemu.%s-checksum=%s" % (f, c))
|
tmp_df.write("LABEL com.qemu.%s-checksum=%s" % (f, c))
|
||||||
|
|
||||||
tmp_df.flush()
|
tmp_df.flush()
|
||||||
|
|
||||||
self._do_check(["build", "-t", tag, "-f", tmp_df.name] + argv +
|
build_args = ["build", "-t", tag, "-f", tmp_df.name]
|
||||||
[docker_dir],
|
if self._buildkit:
|
||||||
|
build_args += ["--build-arg", "BUILDKIT_INLINE_CACHE=1"]
|
||||||
|
|
||||||
|
if registry is not None:
|
||||||
|
cache = "%s/%s" % (registry, tag)
|
||||||
|
build_args += ["--cache-from", cache]
|
||||||
|
build_args += argv
|
||||||
|
build_args += [docker_dir]
|
||||||
|
|
||||||
|
self._do_check(build_args,
|
||||||
quiet=quiet)
|
quiet=quiet)
|
||||||
|
|
||||||
def update_image(self, tag, tarball, quiet=True):
|
def update_image(self, tag, tarball, quiet=True):
|
||||||
|
@ -403,6 +433,8 @@ class BuildCommand(SubCommand):
|
||||||
parser.add_argument("--add-current-user", "-u", dest="user",
|
parser.add_argument("--add-current-user", "-u", dest="user",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Add the current user to image's passwd")
|
help="Add the current user to image's passwd")
|
||||||
|
parser.add_argument("--registry", "-r",
|
||||||
|
help="cache from docker registry")
|
||||||
parser.add_argument("-t", dest="tag",
|
parser.add_argument("-t", dest="tag",
|
||||||
help="Image Tag")
|
help="Image Tag")
|
||||||
parser.add_argument("-f", dest="dockerfile",
|
parser.add_argument("-f", dest="dockerfile",
|
||||||
|
@ -458,7 +490,8 @@ class BuildCommand(SubCommand):
|
||||||
for k, v in os.environ.items()
|
for k, v in os.environ.items()
|
||||||
if k.lower() in FILTERED_ENV_NAMES]
|
if k.lower() in FILTERED_ENV_NAMES]
|
||||||
dkr.build_image(tag, docker_dir, dockerfile,
|
dkr.build_image(tag, docker_dir, dockerfile,
|
||||||
quiet=args.quiet, user=args.user, argv=argv,
|
quiet=args.quiet, user=args.user,
|
||||||
|
argv=argv, registry=args.registry,
|
||||||
extra_files_cksum=cksum)
|
extra_files_cksum=cksum)
|
||||||
|
|
||||||
rmtree(docker_dir)
|
rmtree(docker_dir)
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
#
|
||||||
|
# Docker all cross-compiler target (tests only)
|
||||||
|
#
|
||||||
|
# While the normal cross builds take care to setup proper multiarch
|
||||||
|
# build environments which can cross build QEMU this just installs the
|
||||||
|
# basic compilers for as many targets as possible. We shall use this
|
||||||
|
# to build and run linux-user tests on GitLab
|
||||||
|
#
|
||||||
|
FROM qemu/debian10
|
||||||
|
|
||||||
|
# What we need to build QEMU itself
|
||||||
|
RUN apt update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
apt build-dep -yy qemu
|
||||||
|
|
||||||
|
# Add the foreign architecture we want and install dependencies
|
||||||
|
RUN DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
apt install -y --no-install-recommends \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
libc6-dev-arm64-cross \
|
||||||
|
gcc-alpha-linux-gnu \
|
||||||
|
libc6.1-dev-alpha-cross \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
libc6-dev-armhf-cross \
|
||||||
|
gcc-hppa-linux-gnu \
|
||||||
|
libc6-dev-hppa-cross \
|
||||||
|
gcc-m68k-linux-gnu \
|
||||||
|
libc6-dev-m68k-cross \
|
||||||
|
gcc-mips-linux-gnu \
|
||||||
|
libc6-dev-mips-cross \
|
||||||
|
gcc-mips64-linux-gnuabi64 \
|
||||||
|
libc6-dev-mips64-cross \
|
||||||
|
gcc-mips64el-linux-gnuabi64 \
|
||||||
|
libc6-dev-mips64el-cross \
|
||||||
|
gcc-mipsel-linux-gnu \
|
||||||
|
libc6-dev-mipsel-cross \
|
||||||
|
gcc-powerpc-linux-gnu \
|
||||||
|
libc6-dev-powerpc-cross \
|
||||||
|
gcc-powerpc64-linux-gnu \
|
||||||
|
libc6-dev-ppc64-cross \
|
||||||
|
gcc-powerpc64le-linux-gnu \
|
||||||
|
libc6-dev-ppc64el-cross \
|
||||||
|
gcc-riscv64-linux-gnu \
|
||||||
|
libc6-dev-riscv64-cross \
|
||||||
|
gcc-s390x-linux-gnu \
|
||||||
|
libc6-dev-s390x-cross \
|
||||||
|
gcc-sh4-linux-gnu \
|
||||||
|
libc6-dev-sh4-cross \
|
||||||
|
gcc-sparc64-linux-gnu \
|
||||||
|
libc6-dev-sparc64-cross
|
||||||
|
|
||||||
|
ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools
|
||||||
|
ENV DEF_TARGET_LIST aarch64-linux-user,alpha-linux-user,arm-linux-user,hppa-linux-user,i386-linux-user,m68k-linux-user,mips-linux-user,mips64-linux-user,mips64el-linux-user,mipsel-linux-user,ppc-linux-user,ppc64-linux-user,ppc64le-linux-user,riscv64-linux-user,s390x-linux-user,sh4-linux-user,sparc64-linux-user
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# This docker target is used on non-x86_64 machines which need the
|
# This docker target is used on non-x86_64 machines which need the
|
||||||
# x86_64 cross compilers installed.
|
# x86_64 cross compilers installed.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
MAINTAINER Alex Bennée <alex.bennee@linaro.org>
|
MAINTAINER Alex Bennée <alex.bennee@linaro.org>
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# This docker target builds on the debian Stretch base image. Further
|
# This docker target builds on the debian Stretch base image. Further
|
||||||
# libraries which are not widely available are installed by hand.
|
# libraries which are not widely available are installed by hand.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
RUN dpkg --add-architecture arm64
|
RUN dpkg --add-architecture arm64
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Bullseye base image.
|
# This docker target builds on the debian Bullseye base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian11
|
FROM qemu/debian11
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
RUN dpkg --add-architecture arm64
|
RUN dpkg --add-architecture arm64
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
RUN dpkg --add-architecture armhf
|
RUN dpkg --add-architecture armhf
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
|
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Docker ppc64 cross-compiler target
|
# Docker ppc64 cross-compiler target
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
# Add the foreign architecture we want and install dependencies
|
# Add the foreign architecture we want and install dependencies
|
||||||
RUN dpkg --add-architecture ppc64el && \
|
RUN dpkg --add-architecture ppc64el && \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
# Add the s390x architecture
|
# Add the s390x architecture
|
||||||
RUN dpkg --add-architecture s390x
|
RUN dpkg --add-architecture s390x
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Buster base image.
|
# This docker target builds on the debian Buster base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian10
|
FROM qemu/debian10
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
#
|
#
|
||||||
FROM qemu:debian9
|
FROM qemu/debian9
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch MXE base image.
|
# This docker target builds on the debian Stretch MXE base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian9-mxe
|
FROM qemu/debian9-mxe
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch MXE base image.
|
# This docker target builds on the debian Stretch MXE base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian9-mxe
|
FROM qemu/debian9-mxe
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -18,12 +18,12 @@ RUN apt-get update && \
|
||||||
git \
|
git \
|
||||||
python3-minimal
|
python3-minimal
|
||||||
|
|
||||||
ENV CPU_LIST csp dc232b dc233c
|
ENV CPU_LIST dc232b dc233c de233_fpu dsp3400
|
||||||
ENV TOOLCHAIN_RELEASE 2018.02
|
ENV TOOLCHAIN_RELEASE 2020.07
|
||||||
|
|
||||||
RUN for cpu in $CPU_LIST; do \
|
RUN for cpu in $CPU_LIST; do \
|
||||||
curl -#SL http://github.com/foss-xtensa/toolchain/releases/download/$TOOLCHAIN_RELEASE/x86_64-$TOOLCHAIN_RELEASE-xtensa-$cpu-elf.tar.gz \
|
curl -#SL http://github.com/foss-xtensa/toolchain/releases/download/$TOOLCHAIN_RELEASE/x86_64-$TOOLCHAIN_RELEASE-xtensa-$cpu-elf.tar.gz \
|
||||||
| tar -xzC /opt; \
|
| tar -xzC /opt; \
|
||||||
done
|
done
|
||||||
|
|
||||||
ENV PATH $PATH:/opt/$TOOLCHAIN_RELEASE/xtensa-dc232b-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dc233c-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-csp-elf/bin
|
ENV PATH $PATH:/opt/$TOOLCHAIN_RELEASE/xtensa-dc232b-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dc233c-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-de233_fpu-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dsp3400-elf/bin
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
# This docker target builds on the debian Stretch base image.
|
# This docker target builds on the debian Stretch base image.
|
||||||
#
|
#
|
||||||
FROM qemu:debian9
|
FROM qemu/debian9
|
||||||
|
|
||||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,12 @@ ENV PACKAGES \
|
||||||
pixman-devel \
|
pixman-devel \
|
||||||
python3 \
|
python3 \
|
||||||
python3-PyYAML \
|
python3-PyYAML \
|
||||||
|
python3-numpy \
|
||||||
|
python3-opencv \
|
||||||
|
python3-pillow \
|
||||||
|
python3-pip \
|
||||||
python3-sphinx \
|
python3-sphinx \
|
||||||
|
python3-virtualenv \
|
||||||
rdma-core-devel \
|
rdma-core-devel \
|
||||||
SDL2-devel \
|
SDL2-devel \
|
||||||
snappy-devel \
|
snappy-devel \
|
||||||
|
@ -89,6 +94,8 @@ ENV PACKAGES \
|
||||||
systemd-devel \
|
systemd-devel \
|
||||||
systemtap-sdt-devel \
|
systemtap-sdt-devel \
|
||||||
tar \
|
tar \
|
||||||
|
tesseract \
|
||||||
|
tesseract-langpack-eng \
|
||||||
texinfo \
|
texinfo \
|
||||||
usbredir-devel \
|
usbredir-devel \
|
||||||
virglrenderer-devel \
|
virglrenderer-devel \
|
||||||
|
|
|
@ -46,9 +46,17 @@ ENV PACKAGES flex bison \
|
||||||
libxen-dev \
|
libxen-dev \
|
||||||
libzstd-dev \
|
libzstd-dev \
|
||||||
make \
|
make \
|
||||||
python3-yaml \
|
python3-numpy \
|
||||||
|
python3-opencv \
|
||||||
|
python3-pil \
|
||||||
|
python3-pip \
|
||||||
python3-sphinx \
|
python3-sphinx \
|
||||||
|
python3-venv \
|
||||||
|
python3-yaml \
|
||||||
|
rpm2cpio \
|
||||||
sparse \
|
sparse \
|
||||||
|
tesseract-ocr \
|
||||||
|
tesseract-ocr-eng \
|
||||||
texinfo \
|
texinfo \
|
||||||
xfslibs-dev\
|
xfslibs-dev\
|
||||||
vim
|
vim
|
||||||
|
|
|
@ -186,7 +186,7 @@ _filter_img_create()
|
||||||
-e 's/^\(data_file\)/3-\1/' \
|
-e 's/^\(data_file\)/3-\1/' \
|
||||||
-e 's/^\(encryption\)/4-\1/' \
|
-e 's/^\(encryption\)/4-\1/' \
|
||||||
-e 's/^\(preallocation\)/8-\1/' \
|
-e 's/^\(preallocation\)/8-\1/' \
|
||||||
| sort \
|
| LC_ALL=C sort \
|
||||||
| $SED -e 's/^[0-9]-//' \
|
| $SED -e 's/^[0-9]-//' \
|
||||||
| tr '\n\0' ' \n' \
|
| tr '\n\0' ' \n' \
|
||||||
| $SED -e 's/^ *$//' -e 's/ *$//'
|
| $SED -e 's/^ *$//' -e 's/ *$//'
|
||||||
|
|
|
@ -25,7 +25,13 @@ struct thread_stats {
|
||||||
struct thread_info {
|
struct thread_info {
|
||||||
void (*func)(struct thread_info *);
|
void (*func)(struct thread_info *);
|
||||||
struct thread_stats stats;
|
struct thread_stats stats;
|
||||||
uint64_t r;
|
/*
|
||||||
|
* Seed is in the range [1..UINT64_MAX], because the RNG requires
|
||||||
|
* a non-zero seed. To use, subtract 1 and compare against the
|
||||||
|
* threshold with </>=. This lets threshold = 0 never match (0% hit),
|
||||||
|
* and threshold = UINT64_MAX always match (100% hit).
|
||||||
|
*/
|
||||||
|
uint64_t seed;
|
||||||
bool write_op; /* writes alternate between insertions and removals */
|
bool write_op; /* writes alternate between insertions and removals */
|
||||||
bool resize_down;
|
bool resize_down;
|
||||||
} QEMU_ALIGNED(64); /* avoid false sharing among threads */
|
} QEMU_ALIGNED(64); /* avoid false sharing among threads */
|
||||||
|
@ -131,8 +137,9 @@ static uint64_t xorshift64star(uint64_t x)
|
||||||
static void do_rz(struct thread_info *info)
|
static void do_rz(struct thread_info *info)
|
||||||
{
|
{
|
||||||
struct thread_stats *stats = &info->stats;
|
struct thread_stats *stats = &info->stats;
|
||||||
|
uint64_t r = info->seed - 1;
|
||||||
|
|
||||||
if (info->r < resize_threshold) {
|
if (r < resize_threshold) {
|
||||||
size_t size = info->resize_down ? resize_min : resize_max;
|
size_t size = info->resize_down ? resize_min : resize_max;
|
||||||
bool resized;
|
bool resized;
|
||||||
|
|
||||||
|
@ -151,13 +158,14 @@ static void do_rz(struct thread_info *info)
|
||||||
static void do_rw(struct thread_info *info)
|
static void do_rw(struct thread_info *info)
|
||||||
{
|
{
|
||||||
struct thread_stats *stats = &info->stats;
|
struct thread_stats *stats = &info->stats;
|
||||||
|
uint64_t r = info->seed - 1;
|
||||||
uint32_t hash;
|
uint32_t hash;
|
||||||
long *p;
|
long *p;
|
||||||
|
|
||||||
if (info->r >= update_threshold) {
|
if (r >= update_threshold) {
|
||||||
bool read;
|
bool read;
|
||||||
|
|
||||||
p = &keys[info->r & (lookup_range - 1)];
|
p = &keys[r & (lookup_range - 1)];
|
||||||
hash = hfunc(*p);
|
hash = hfunc(*p);
|
||||||
read = qht_lookup(&ht, p, hash);
|
read = qht_lookup(&ht, p, hash);
|
||||||
if (read) {
|
if (read) {
|
||||||
|
@ -166,7 +174,7 @@ static void do_rw(struct thread_info *info)
|
||||||
stats->not_rd++;
|
stats->not_rd++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
p = &keys[info->r & (update_range - 1)];
|
p = &keys[r & (update_range - 1)];
|
||||||
hash = hfunc(*p);
|
hash = hfunc(*p);
|
||||||
if (info->write_op) {
|
if (info->write_op) {
|
||||||
bool written = false;
|
bool written = false;
|
||||||
|
@ -208,7 +216,7 @@ static void *thread_func(void *p)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
while (!atomic_read(&test_stop)) {
|
while (!atomic_read(&test_stop)) {
|
||||||
info->r = xorshift64star(info->r);
|
info->seed = xorshift64star(info->seed);
|
||||||
info->func(info);
|
info->func(info);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -221,7 +229,7 @@ static void *thread_func(void *p)
|
||||||
static void prepare_thread_info(struct thread_info *info, int i)
|
static void prepare_thread_info(struct thread_info *info, int i)
|
||||||
{
|
{
|
||||||
/* seed for the RNG; each thread should have a different one */
|
/* seed for the RNG; each thread should have a different one */
|
||||||
info->r = (i + 1) ^ time(NULL);
|
info->seed = (i + 1) ^ time(NULL);
|
||||||
/* the first update will be a write */
|
/* the first update will be a write */
|
||||||
info->write_op = true;
|
info->write_op = true;
|
||||||
/* the first resize will be down */
|
/* the first resize will be down */
|
||||||
|
@ -281,11 +289,25 @@ static void pr_params(void)
|
||||||
|
|
||||||
static void do_threshold(double rate, uint64_t *threshold)
|
static void do_threshold(double rate, uint64_t *threshold)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* For 0 <= rate <= 1, scale to fit in a uint64_t.
|
||||||
|
*
|
||||||
|
* Scale by 2**64, with a special case for 1.0.
|
||||||
|
* The remainder of the possible values are scattered between 0
|
||||||
|
* and 0xfffffffffffff800 (nextafter(0x1p64, 0)).
|
||||||
|
*
|
||||||
|
* Note that we cannot simply scale by UINT64_MAX, because that
|
||||||
|
* value is not representable as an IEEE double value.
|
||||||
|
*
|
||||||
|
* If we scale by the next largest value, nextafter(0x1p64, 0),
|
||||||
|
* then the remainder of the possible values are scattered between
|
||||||
|
* 0 and 0xfffffffffffff000. Which leaves us with a gap between
|
||||||
|
* the final two inputs that is twice as large as any other.
|
||||||
|
*/
|
||||||
if (rate == 1.0) {
|
if (rate == 1.0) {
|
||||||
*threshold = UINT64_MAX;
|
*threshold = UINT64_MAX;
|
||||||
} else {
|
} else {
|
||||||
*threshold = (rate * 0xffff000000000000ull)
|
*threshold = rate * 0x1p64;
|
||||||
+ (rate * 0x0000ffffffffffffull);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,6 +279,7 @@ tests/qtest/tco-test$(EXESUF): tests/qtest/tco-test.o $(libqos-pc-obj-y)
|
||||||
tests/qtest/virtio-ccw-test$(EXESUF): tests/qtest/virtio-ccw-test.o
|
tests/qtest/virtio-ccw-test$(EXESUF): tests/qtest/virtio-ccw-test.o
|
||||||
tests/qtest/display-vga-test$(EXESUF): tests/qtest/display-vga-test.o
|
tests/qtest/display-vga-test$(EXESUF): tests/qtest/display-vga-test.o
|
||||||
tests/qtest/qom-test$(EXESUF): tests/qtest/qom-test.o
|
tests/qtest/qom-test$(EXESUF): tests/qtest/qom-test.o
|
||||||
|
tests/qtest/modules-test$(EXESUF): tests/qtest/modules-test.o
|
||||||
tests/qtest/test-hmp$(EXESUF): tests/qtest/test-hmp.o
|
tests/qtest/test-hmp$(EXESUF): tests/qtest/test-hmp.o
|
||||||
tests/qtest/machine-none-test$(EXESUF): tests/qtest/machine-none-test.o
|
tests/qtest/machine-none-test$(EXESUF): tests/qtest/machine-none-test.o
|
||||||
tests/qtest/device-plug-test$(EXESUF): tests/qtest/device-plug-test.o
|
tests/qtest/device-plug-test$(EXESUF): tests/qtest/device-plug-test.o
|
||||||
|
|
|
@ -105,14 +105,9 @@ static void test_one_device(QTestState *qts, const char *type)
|
||||||
{
|
{
|
||||||
QDict *resp;
|
QDict *resp;
|
||||||
char *help;
|
char *help;
|
||||||
char *qom_tree_start, *qom_tree_end;
|
|
||||||
char *qtree_start, *qtree_end;
|
|
||||||
|
|
||||||
g_test_message("Testing device '%s'", type);
|
g_test_message("Testing device '%s'", type);
|
||||||
|
|
||||||
qom_tree_start = qtest_hmp(qts, "info qom-tree");
|
|
||||||
qtree_start = qtest_hmp(qts, "info qtree");
|
|
||||||
|
|
||||||
resp = qtest_qmp(qts, "{'execute': 'device-list-properties',"
|
resp = qtest_qmp(qts, "{'execute': 'device-list-properties',"
|
||||||
" 'arguments': {'typename': %s}}",
|
" 'arguments': {'typename': %s}}",
|
||||||
type);
|
type);
|
||||||
|
@ -120,21 +115,6 @@ static void test_one_device(QTestState *qts, const char *type)
|
||||||
|
|
||||||
help = qtest_hmp(qts, "device_add \"%s,help\"", type);
|
help = qtest_hmp(qts, "device_add \"%s,help\"", type);
|
||||||
g_free(help);
|
g_free(help);
|
||||||
|
|
||||||
/*
|
|
||||||
* Some devices leave dangling pointers in QOM behind.
|
|
||||||
* "info qom-tree" or "info qtree" have a good chance at crashing then.
|
|
||||||
* Also make sure that the tree did not change.
|
|
||||||
*/
|
|
||||||
qom_tree_end = qtest_hmp(qts, "info qom-tree");
|
|
||||||
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
|
|
||||||
g_free(qom_tree_start);
|
|
||||||
g_free(qom_tree_end);
|
|
||||||
|
|
||||||
qtree_end = qtest_hmp(qts, "info qtree");
|
|
||||||
g_assert_cmpstr(qtree_start, ==, qtree_end);
|
|
||||||
g_free(qtree_start);
|
|
||||||
g_free(qtree_end);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_device_intro_list(void)
|
static void test_device_intro_list(void)
|
||||||
|
@ -213,16 +193,38 @@ static void test_qom_list_fields(void)
|
||||||
static void test_device_intro_none(void)
|
static void test_device_intro_none(void)
|
||||||
{
|
{
|
||||||
QTestState *qts = qtest_init(common_args);
|
QTestState *qts = qtest_init(common_args);
|
||||||
|
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_autofree char *qom_tree_end = NULL;
|
||||||
|
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
|
||||||
|
g_autofree char *qtree_end = NULL;
|
||||||
|
|
||||||
test_one_device(qts, "nonexistent");
|
test_one_device(qts, "nonexistent");
|
||||||
|
|
||||||
|
/* Make sure that really nothing changed in the trees */
|
||||||
|
qom_tree_end = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
|
||||||
|
qtree_end = qtest_hmp(qts, "info qtree");
|
||||||
|
g_assert_cmpstr(qtree_start, ==, qtree_end);
|
||||||
|
|
||||||
qtest_quit(qts);
|
qtest_quit(qts);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_device_intro_abstract(void)
|
static void test_device_intro_abstract(void)
|
||||||
{
|
{
|
||||||
QTestState *qts = qtest_init(common_args);
|
QTestState *qts = qtest_init(common_args);
|
||||||
|
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_autofree char *qom_tree_end = NULL;
|
||||||
|
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
|
||||||
|
g_autofree char *qtree_end = NULL;
|
||||||
|
|
||||||
test_one_device(qts, "device");
|
test_one_device(qts, "device");
|
||||||
|
|
||||||
|
/* Make sure that really nothing changed in the trees */
|
||||||
|
qom_tree_end = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
|
||||||
|
qtree_end = qtest_hmp(qts, "info qtree");
|
||||||
|
g_assert_cmpstr(qtree_start, ==, qtree_end);
|
||||||
|
|
||||||
qtest_quit(qts);
|
qtest_quit(qts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,9 +233,12 @@ static void test_device_intro_concrete(const void *args)
|
||||||
QList *types;
|
QList *types;
|
||||||
QListEntry *entry;
|
QListEntry *entry;
|
||||||
const char *type;
|
const char *type;
|
||||||
QTestState *qts;
|
QTestState *qts = qtest_init(args);
|
||||||
|
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_autofree char *qom_tree_end = NULL;
|
||||||
|
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
|
||||||
|
g_autofree char *qtree_end = NULL;
|
||||||
|
|
||||||
qts = qtest_init(args);
|
|
||||||
types = device_type_list(qts, false);
|
types = device_type_list(qts, false);
|
||||||
|
|
||||||
QLIST_FOREACH_ENTRY(types, entry) {
|
QLIST_FOREACH_ENTRY(types, entry) {
|
||||||
|
@ -243,6 +248,17 @@ static void test_device_intro_concrete(const void *args)
|
||||||
test_one_device(qts, type);
|
test_one_device(qts, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some devices leave dangling pointers in QOM behind.
|
||||||
|
* "info qom-tree" or "info qtree" have a good chance at crashing then.
|
||||||
|
* Also make sure that the tree did not change.
|
||||||
|
*/
|
||||||
|
qom_tree_end = qtest_hmp(qts, "info qom-tree");
|
||||||
|
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
|
||||||
|
|
||||||
|
qtree_end = qtest_hmp(qts, "info qtree");
|
||||||
|
g_assert_cmpstr(qtree_start, ==, qtree_end);
|
||||||
|
|
||||||
qobject_unref(types);
|
qobject_unref(types);
|
||||||
qtest_quit(qts);
|
qtest_quit(qts);
|
||||||
g_free((void *)args);
|
g_free((void *)args);
|
||||||
|
|
|
@ -47,7 +47,7 @@ ifneq ($(DOCKER_IMAGE),)
|
||||||
|
|
||||||
DOCKER_COMPILE_CMD="$(DOCKER_SCRIPT) cc \
|
DOCKER_COMPILE_CMD="$(DOCKER_SCRIPT) cc \
|
||||||
--cc $(DOCKER_CROSS_CC_GUEST) \
|
--cc $(DOCKER_CROSS_CC_GUEST) \
|
||||||
-i qemu:$(DOCKER_IMAGE) \
|
-i qemu/$(DOCKER_IMAGE) \
|
||||||
-s $(SRC_PATH) -- "
|
-s $(SRC_PATH) -- "
|
||||||
|
|
||||||
.PHONY: docker-build-guest-tests
|
.PHONY: docker-build-guest-tests
|
||||||
|
@ -57,7 +57,7 @@ docker-build-guest-tests: docker-image-$(DOCKER_IMAGE)
|
||||||
$(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" CC=$(DOCKER_COMPILE_CMD) \
|
$(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" CC=$(DOCKER_COMPILE_CMD) \
|
||||||
SRC_PATH="$(SRC_PATH)" BUILD_STATIC=y \
|
SRC_PATH="$(SRC_PATH)" BUILD_STATIC=y \
|
||||||
EXTRA_CFLAGS="$(CROSS_CC_GUEST_CFLAGS)"), \
|
EXTRA_CFLAGS="$(CROSS_CC_GUEST_CFLAGS)"), \
|
||||||
"BUILD","$(TARGET) guest-tests with docker qemu:$(DOCKER_IMAGE)")
|
"BUILD","$(TARGET) guest-tests with docker qemu/$(DOCKER_IMAGE)")
|
||||||
|
|
||||||
GUEST_BUILD=docker-build-guest-tests
|
GUEST_BUILD=docker-build-guest-tests
|
||||||
|
|
||||||
|
|
|
@ -46,20 +46,29 @@ fi
|
||||||
: ${cross_cc_aarch64="aarch64-linux-gnu-gcc"}
|
: ${cross_cc_aarch64="aarch64-linux-gnu-gcc"}
|
||||||
: ${cross_cc_aarch64_be="$cross_cc_aarch64"}
|
: ${cross_cc_aarch64_be="$cross_cc_aarch64"}
|
||||||
: ${cross_cc_cflags_aarch64_be="-mbig-endian"}
|
: ${cross_cc_cflags_aarch64_be="-mbig-endian"}
|
||||||
|
: $(cross_cc_alpha="alpha-linux-gnu-gcc")
|
||||||
: ${cross_cc_arm="arm-linux-gnueabihf-gcc"}
|
: ${cross_cc_arm="arm-linux-gnueabihf-gcc"}
|
||||||
: ${cross_cc_cflags_armeb="-mbig-endian"}
|
: ${cross_cc_cflags_armeb="-mbig-endian"}
|
||||||
|
: ${cross_cc_hppa="hppa-linux-gnu-gcc"}
|
||||||
: ${cross_cc_i386="i386-pc-linux-gnu-gcc"}
|
: ${cross_cc_i386="i386-pc-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_i386="-m32"}
|
: ${cross_cc_cflags_i386="-m32"}
|
||||||
: ${cross_cc_x86_64="x86_64-pc-linux-gnu-gcc"}
|
: ${cross_cc_m68k="m68k-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_x86_64="-m64"}
|
: $(cross_cc_mips64el="mips64el-linux-gnuabi64-gcc")
|
||||||
|
: $(cross_cc_mips64="mips64-linux-gnuabi64-gcc")
|
||||||
|
: $(cross_cc_mipsel="mipsel-linux-gnu-gcc")
|
||||||
|
: $(cross_cc_mips="mips-linux-gnu-gcc")
|
||||||
: ${cross_cc_ppc="powerpc-linux-gnu-gcc"}
|
: ${cross_cc_ppc="powerpc-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_ppc="-m32"}
|
: ${cross_cc_cflags_ppc="-m32"}
|
||||||
: ${cross_cc_ppc64="powerpc-linux-gnu-gcc"}
|
: ${cross_cc_ppc64="powerpc64-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_ppc64="-m64"}
|
|
||||||
: ${cross_cc_ppc64le="powerpc64le-linux-gnu-gcc"}
|
: ${cross_cc_ppc64le="powerpc64le-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_s390x="-m64"}
|
: $(cross_cc_riscv64="riscv64-linux-gnu-gcc")
|
||||||
|
: ${cross_cc_s390x="s390x-linux-gnu-gcc"}
|
||||||
|
: $(cross_cc_sh4="sh4-linux-gnu-gcc")
|
||||||
: ${cross_cc_cflags_sparc="-m32 -mv8plus -mcpu=ultrasparc"}
|
: ${cross_cc_cflags_sparc="-m32 -mv8plus -mcpu=ultrasparc"}
|
||||||
|
: ${cross_cc_sparc64="sparc64-linux-gnu-gcc"}
|
||||||
: ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"}
|
: ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"}
|
||||||
|
: ${cross_cc_x86_64="x86_64-pc-linux-gnu-gcc"}
|
||||||
|
: ${cross_cc_cflags_x86_64="-m64"}
|
||||||
|
|
||||||
for target in $target_list; do
|
for target in $target_list; do
|
||||||
arch=${target%%-*}
|
arch=${target%%-*}
|
||||||
|
@ -173,7 +182,7 @@ for target in $target_list; do
|
||||||
container_image=debian-xtensa-cross
|
container_image=debian-xtensa-cross
|
||||||
|
|
||||||
# default to the dc232b cpu
|
# default to the dc232b cpu
|
||||||
container_cross_cc=/opt/2018.02/xtensa-dc232b-elf/bin/xtensa-dc232b-elf-gcc
|
container_cross_cc=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf-gcc
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,9 @@
|
||||||
IMAGES := freebsd netbsd openbsd centos fedora
|
IMAGES := freebsd netbsd openbsd centos fedora
|
||||||
ifneq ($(GENISOIMAGE),)
|
ifneq ($(GENISOIMAGE),)
|
||||||
IMAGES += ubuntu.i386 centos
|
IMAGES += ubuntu.i386 centos
|
||||||
|
ifneq ($(EFI_AARCH64),)
|
||||||
|
IMAGES += ubuntu.aarch64 centos.aarch64
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
IMAGES_DIR := $(HOME)/.cache/qemu-vm/images
|
IMAGES_DIR := $(HOME)/.cache/qemu-vm/images
|
||||||
|
@ -23,6 +26,12 @@ vm-help vm-test:
|
||||||
ifneq ($(GENISOIMAGE),)
|
ifneq ($(GENISOIMAGE),)
|
||||||
@echo " vm-build-centos - Build QEMU in CentOS VM, with Docker"
|
@echo " vm-build-centos - Build QEMU in CentOS VM, with Docker"
|
||||||
@echo " vm-build-ubuntu.i386 - Build QEMU in ubuntu i386 VM"
|
@echo " vm-build-ubuntu.i386 - Build QEMU in ubuntu i386 VM"
|
||||||
|
ifneq ($(EFI_AARCH64),)
|
||||||
|
@echo " vm-build-ubuntu.aarch64 - Build QEMU in ubuntu aarch64 VM"
|
||||||
|
@echo " vm-build-centos.aarch64 - Build QEMU in CentOS aarch64 VM"
|
||||||
|
else
|
||||||
|
@echo " (to build centos/ubuntu aarch64 images use configure --efi-aarch64)"
|
||||||
|
endif
|
||||||
else
|
else
|
||||||
@echo " (install genisoimage to build centos/ubuntu images)"
|
@echo " (install genisoimage to build centos/ubuntu images)"
|
||||||
endif
|
endif
|
||||||
|
@ -40,10 +49,17 @@ endif
|
||||||
@echo ' EXTRA_CONFIGURE_OPTS="..."'
|
@echo ' EXTRA_CONFIGURE_OPTS="..."'
|
||||||
@echo " J=[0..9]* - Override the -jN parameter for make commands"
|
@echo " J=[0..9]* - Override the -jN parameter for make commands"
|
||||||
@echo " DEBUG=1 - Enable verbose output on host and interactive debugging"
|
@echo " DEBUG=1 - Enable verbose output on host and interactive debugging"
|
||||||
|
@echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm "
|
||||||
@echo " V=1 - Enable verbose ouput on host and guest commands"
|
@echo " V=1 - Enable verbose ouput on host and guest commands"
|
||||||
@echo " QEMU_LOCAL=1 - Use QEMU binary local to this build."
|
@echo " QEMU_LOCAL=1 - Use QEMU binary local to this build."
|
||||||
@echo " QEMU=/path/to/qemu - Change path to QEMU binary"
|
@echo " QEMU=/path/to/qemu - Change path to QEMU binary"
|
||||||
@echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool"
|
@echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool"
|
||||||
|
ifeq ($(PYTHON_YAML),yes)
|
||||||
|
@echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file."
|
||||||
|
else
|
||||||
|
@echo " (install python3-yaml to enable support for yaml file to configure a VM.)"
|
||||||
|
endif
|
||||||
|
@echo " See conf_example_*.yml for file format details."
|
||||||
|
|
||||||
vm-build-all: $(addprefix vm-build-, $(IMAGES))
|
vm-build-all: $(addprefix vm-build-, $(IMAGES))
|
||||||
|
|
||||||
|
@ -59,6 +75,8 @@ $(IMAGES_DIR)/%.img: $(SRC_PATH)/tests/vm/% \
|
||||||
$(if $(V)$(DEBUG), --debug) \
|
$(if $(V)$(DEBUG), --debug) \
|
||||||
$(if $(GENISOIMAGE),--genisoimage $(GENISOIMAGE)) \
|
$(if $(GENISOIMAGE),--genisoimage $(GENISOIMAGE)) \
|
||||||
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
||||||
|
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
|
||||||
|
$(if $(LOG_CONSOLE),--log-console) \
|
||||||
--image "$@" \
|
--image "$@" \
|
||||||
--force \
|
--force \
|
||||||
--build-image $@, \
|
--build-image $@, \
|
||||||
|
@ -74,6 +92,8 @@ vm-build-%: $(IMAGES_DIR)/%.img
|
||||||
$(if $(J),--jobs $(J)) \
|
$(if $(J),--jobs $(J)) \
|
||||||
$(if $(V),--verbose) \
|
$(if $(V),--verbose) \
|
||||||
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
||||||
|
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
|
||||||
|
$(if $(LOG_CONSOLE),--log-console) \
|
||||||
--image "$<" \
|
--image "$<" \
|
||||||
$(if $(BUILD_TARGET),--build-target $(BUILD_TARGET)) \
|
$(if $(BUILD_TARGET),--build-target $(BUILD_TARGET)) \
|
||||||
--snapshot \
|
--snapshot \
|
||||||
|
@ -96,6 +116,8 @@ vm-boot-ssh-%: $(IMAGES_DIR)/%.img
|
||||||
$(if $(J),--jobs $(J)) \
|
$(if $(J),--jobs $(J)) \
|
||||||
$(if $(V)$(DEBUG), --debug) \
|
$(if $(V)$(DEBUG), --debug) \
|
||||||
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
|
||||||
|
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
|
||||||
|
$(if $(LOG_CONSOLE),--log-console) \
|
||||||
--image "$<" \
|
--image "$<" \
|
||||||
--interactive \
|
--interactive \
|
||||||
false, \
|
false, \
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# VM testing aarch64 library
|
||||||
|
#
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Robert Foley <robert.foley@linaro.org>
|
||||||
|
#
|
||||||
|
# This code is licensed under the GPL version 2 or later. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
#
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import basevm
|
||||||
|
from qemu.accel import kvm_available
|
||||||
|
|
||||||
|
# This is the config needed for current version of QEMU.
|
||||||
|
# This works for both kvm and tcg.
|
||||||
|
CURRENT_CONFIG = {
|
||||||
|
'cpu' : "max",
|
||||||
|
'machine' : "virt,gic-version=max",
|
||||||
|
}
|
||||||
|
|
||||||
|
# The minimum minor version of QEMU we will support with aarch64 VMs is 3.
|
||||||
|
# QEMU versions less than 3 have various issues running these VMs.
|
||||||
|
QEMU_AARCH64_MIN_VERSION = 3
|
||||||
|
|
||||||
|
# The DEFAULT_CONFIG will default to a version of
|
||||||
|
# parameters that works for backwards compatibility.
|
||||||
|
DEFAULT_CONFIG = {'kvm' : {'cpu' : "host",
|
||||||
|
'machine' : "virt,gic-version=host"},
|
||||||
|
'tcg' : {'cpu' : "cortex-a57",
|
||||||
|
'machine' : "virt"},
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_config_defaults(vmcls, default_config):
|
||||||
|
"""Fetch the configuration defaults for this VM,
|
||||||
|
taking into consideration the defaults for
|
||||||
|
aarch64 first, followed by the defaults for this VM."""
|
||||||
|
config = default_config
|
||||||
|
config.update(aarch_get_config_defaults(vmcls))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def aarch_get_config_defaults(vmcls):
|
||||||
|
"""Set the defaults for current version of QEMU."""
|
||||||
|
config = CURRENT_CONFIG
|
||||||
|
args = basevm.parse_args(vmcls)
|
||||||
|
qemu_path = basevm.get_qemu_path(vmcls.arch, args.build_path)
|
||||||
|
qemu_version = basevm.get_qemu_version(qemu_path)
|
||||||
|
if qemu_version < QEMU_AARCH64_MIN_VERSION:
|
||||||
|
error = "\nThis major version of QEMU {} is to old for aarch64 VMs.\n"\
|
||||||
|
"The major version must be at least {}.\n"\
|
||||||
|
"To continue with the current build of QEMU, "\
|
||||||
|
"please restart with QEMU_LOCAL=1 .\n"
|
||||||
|
print(error.format(qemu_version, QEMU_AARCH64_MIN_VERSION))
|
||||||
|
exit(1)
|
||||||
|
if qemu_version == QEMU_AARCH64_MIN_VERSION:
|
||||||
|
# We have an older version of QEMU,
|
||||||
|
# set the config values for backwards compatibility.
|
||||||
|
if kvm_available('aarch64'):
|
||||||
|
config.update(DEFAULT_CONFIG['kvm'])
|
||||||
|
else:
|
||||||
|
config.update(DEFAULT_CONFIG['tcg'])
|
||||||
|
return config
|
||||||
|
|
||||||
|
def create_flash_images(flash_dir="./", efi_img=""):
|
||||||
|
"""Creates the appropriate pflash files
|
||||||
|
for an aarch64 VM."""
|
||||||
|
flash0_path = get_flash_path(flash_dir, "flash0")
|
||||||
|
flash1_path = get_flash_path(flash_dir, "flash1")
|
||||||
|
fd_null = open(os.devnull, 'w')
|
||||||
|
subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash0_path),
|
||||||
|
"bs=1M", "count=64"],
|
||||||
|
stdout=fd_null, stderr=subprocess.STDOUT)
|
||||||
|
# A reliable way to get the QEMU EFI image is via an installed package or
|
||||||
|
# via the bios included with qemu.
|
||||||
|
if not os.path.exists(efi_img):
|
||||||
|
sys.stderr.write("*** efi argument is invalid ({})\n".format(efi_img))
|
||||||
|
sys.stderr.write("*** please check --efi-aarch64 argument or "\
|
||||||
|
"install qemu-efi-aarch64 package\n")
|
||||||
|
exit(3)
|
||||||
|
subprocess.check_call(["dd", "if={}".format(efi_img),
|
||||||
|
"of={}".format(flash0_path),
|
||||||
|
"conv=notrunc"],
|
||||||
|
stdout=fd_null, stderr=subprocess.STDOUT)
|
||||||
|
subprocess.check_call(["dd", "if=/dev/zero",
|
||||||
|
"of={}".format(flash1_path),
|
||||||
|
"bs=1M", "count=64"],
|
||||||
|
stdout=fd_null, stderr=subprocess.STDOUT)
|
||||||
|
fd_null.close()
|
||||||
|
|
||||||
|
def get_pflash_args(flash_dir="./"):
|
||||||
|
"""Returns a string that can be used to
|
||||||
|
boot qemu using the appropriate pflash files
|
||||||
|
for aarch64."""
|
||||||
|
flash0_path = get_flash_path(flash_dir, "flash0")
|
||||||
|
flash1_path = get_flash_path(flash_dir, "flash1")
|
||||||
|
pflash_args_str = "-drive file={},format=raw,if=pflash "\
|
||||||
|
"-drive file={},format=raw,if=pflash"
|
||||||
|
pflash_args = pflash_args_str.format(flash0_path, flash1_path)
|
||||||
|
return pflash_args.split(" ")
|
||||||
|
|
||||||
|
def get_flash_path(flash_dir, name):
|
||||||
|
return os.path.join(flash_dir, "{}.img".format(name))
|
|
@ -23,22 +23,47 @@ from qemu.accel import kvm_available
|
||||||
from qemu.machine import QEMUMachine
|
from qemu.machine import QEMUMachine
|
||||||
import subprocess
|
import subprocess
|
||||||
import hashlib
|
import hashlib
|
||||||
import optparse
|
import argparse
|
||||||
import atexit
|
import atexit
|
||||||
import tempfile
|
import tempfile
|
||||||
import shutil
|
import shutil
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import traceback
|
import traceback
|
||||||
|
import shlex
|
||||||
|
|
||||||
SSH_KEY = open(os.path.join(os.path.dirname(__file__),
|
SSH_KEY_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
"..", "keys", "id_rsa")).read()
|
"..", "keys", "id_rsa")
|
||||||
SSH_PUB_KEY = open(os.path.join(os.path.dirname(__file__),
|
SSH_PUB_KEY_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
"..", "keys", "id_rsa.pub")).read()
|
"..", "keys", "id_rsa.pub")
|
||||||
|
|
||||||
|
# This is the standard configuration.
|
||||||
|
# Any or all of these can be overridden by
|
||||||
|
# passing in a config argument to the VM constructor.
|
||||||
|
DEFAULT_CONFIG = {
|
||||||
|
'cpu' : "max",
|
||||||
|
'machine' : 'pc',
|
||||||
|
'guest_user' : "qemu",
|
||||||
|
'guest_pass' : "qemupass",
|
||||||
|
'root_pass' : "qemupass",
|
||||||
|
'ssh_key_file' : SSH_KEY_FILE,
|
||||||
|
'ssh_pub_key_file': SSH_PUB_KEY_FILE,
|
||||||
|
'memory' : "4G",
|
||||||
|
'extra_args' : [],
|
||||||
|
'qemu_args' : "",
|
||||||
|
'dns' : "",
|
||||||
|
'ssh_port' : 0,
|
||||||
|
'install_cmds' : "",
|
||||||
|
'boot_dev_type' : "block",
|
||||||
|
'ssh_timeout' : 1,
|
||||||
|
}
|
||||||
|
BOOT_DEVICE = {
|
||||||
|
'block' : "-drive file={},if=none,id=drive0,cache=writeback "\
|
||||||
|
"-device virtio-blk,drive=drive0,bootindex=0",
|
||||||
|
'scsi' : "-device virtio-scsi-device,id=scsi "\
|
||||||
|
"-drive file={},format=raw,if=none,id=hd0 "\
|
||||||
|
"-device scsi-hd,drive=hd0,bootindex=0",
|
||||||
|
}
|
||||||
class BaseVM(object):
|
class BaseVM(object):
|
||||||
GUEST_USER = "qemu"
|
|
||||||
GUEST_PASS = "qemupass"
|
|
||||||
ROOT_PASS = "qemupass"
|
|
||||||
|
|
||||||
envvars = [
|
envvars = [
|
||||||
"https_proxy",
|
"https_proxy",
|
||||||
|
@ -57,49 +82,112 @@ class BaseVM(object):
|
||||||
poweroff = "poweroff"
|
poweroff = "poweroff"
|
||||||
# enable IPv6 networking
|
# enable IPv6 networking
|
||||||
ipv6 = True
|
ipv6 = True
|
||||||
|
# This is the timeout on the wait for console bytes.
|
||||||
|
socket_timeout = 120
|
||||||
# Scale up some timeouts under TCG.
|
# Scale up some timeouts under TCG.
|
||||||
# 4 is arbitrary, but greater than 2,
|
# 4 is arbitrary, but greater than 2,
|
||||||
# since we found we need to wait more than twice as long.
|
# since we found we need to wait more than twice as long.
|
||||||
tcg_ssh_timeout_multiplier = 4
|
tcg_ssh_timeout_multiplier = 4
|
||||||
def __init__(self, debug=False, vcpus=None, genisoimage=None,
|
def __init__(self, args, config=None):
|
||||||
build_path=None):
|
|
||||||
self._guest = None
|
self._guest = None
|
||||||
self._genisoimage = genisoimage
|
self._genisoimage = args.genisoimage
|
||||||
self._build_path = build_path
|
self._build_path = args.build_path
|
||||||
|
self._efi_aarch64 = args.efi_aarch64
|
||||||
|
# Allow input config to override defaults.
|
||||||
|
self._config = DEFAULT_CONFIG.copy()
|
||||||
|
if config != None:
|
||||||
|
self._config.update(config)
|
||||||
|
self.validate_ssh_keys()
|
||||||
self._tmpdir = os.path.realpath(tempfile.mkdtemp(prefix="vm-test-",
|
self._tmpdir = os.path.realpath(tempfile.mkdtemp(prefix="vm-test-",
|
||||||
suffix=".tmp",
|
suffix=".tmp",
|
||||||
dir="."))
|
dir="."))
|
||||||
atexit.register(shutil.rmtree, self._tmpdir)
|
atexit.register(shutil.rmtree, self._tmpdir)
|
||||||
|
# Copy the key files to a temporary directory.
|
||||||
|
# Also chmod the key file to agree with ssh requirements.
|
||||||
|
self._config['ssh_key'] = \
|
||||||
|
open(self._config['ssh_key_file']).read().rstrip()
|
||||||
|
self._config['ssh_pub_key'] = \
|
||||||
|
open(self._config['ssh_pub_key_file']).read().rstrip()
|
||||||
|
self._ssh_tmp_key_file = os.path.join(self._tmpdir, "id_rsa")
|
||||||
|
open(self._ssh_tmp_key_file, "w").write(self._config['ssh_key'])
|
||||||
|
subprocess.check_call(["chmod", "600", self._ssh_tmp_key_file])
|
||||||
|
|
||||||
self._ssh_key_file = os.path.join(self._tmpdir, "id_rsa")
|
self._ssh_tmp_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub")
|
||||||
open(self._ssh_key_file, "w").write(SSH_KEY)
|
open(self._ssh_tmp_pub_key_file,
|
||||||
subprocess.check_call(["chmod", "600", self._ssh_key_file])
|
"w").write(self._config['ssh_pub_key'])
|
||||||
|
|
||||||
self._ssh_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub")
|
self.debug = args.debug
|
||||||
open(self._ssh_pub_key_file, "w").write(SSH_PUB_KEY)
|
self._console_log_path = None
|
||||||
|
if args.log_console:
|
||||||
self.debug = debug
|
self._console_log_path = \
|
||||||
|
os.path.join(os.path.expanduser("~/.cache/qemu-vm"),
|
||||||
|
"{}.install.log".format(self.name))
|
||||||
self._stderr = sys.stderr
|
self._stderr = sys.stderr
|
||||||
self._devnull = open(os.devnull, "w")
|
self._devnull = open(os.devnull, "w")
|
||||||
if self.debug:
|
if self.debug:
|
||||||
self._stdout = sys.stdout
|
self._stdout = sys.stdout
|
||||||
else:
|
else:
|
||||||
self._stdout = self._devnull
|
self._stdout = self._devnull
|
||||||
|
netdev = "user,id=vnet,hostfwd=:127.0.0.1:{}-:22"
|
||||||
self._args = [ \
|
self._args = [ \
|
||||||
"-nodefaults", "-m", "4G",
|
"-nodefaults", "-m", self._config['memory'],
|
||||||
"-cpu", "max",
|
"-cpu", self._config['cpu'],
|
||||||
"-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22" +
|
"-netdev",
|
||||||
(",ipv6=no" if not self.ipv6 else ""),
|
netdev.format(self._config['ssh_port']) +
|
||||||
|
(",ipv6=no" if not self.ipv6 else "") +
|
||||||
|
(",dns=" + self._config['dns'] if self._config['dns'] else ""),
|
||||||
"-device", "virtio-net-pci,netdev=vnet",
|
"-device", "virtio-net-pci,netdev=vnet",
|
||||||
"-vnc", "127.0.0.1:0,to=20"]
|
"-vnc", "127.0.0.1:0,to=20"]
|
||||||
if vcpus and vcpus > 1:
|
if args.jobs and args.jobs > 1:
|
||||||
self._args += ["-smp", "%d" % vcpus]
|
self._args += ["-smp", "%d" % args.jobs]
|
||||||
if kvm_available(self.arch):
|
if kvm_available(self.arch):
|
||||||
self._args += ["-enable-kvm"]
|
self._args += ["-enable-kvm"]
|
||||||
else:
|
else:
|
||||||
logging.info("KVM not available, not using -enable-kvm")
|
logging.info("KVM not available, not using -enable-kvm")
|
||||||
self._data_args = []
|
self._data_args = []
|
||||||
|
|
||||||
|
if self._config['qemu_args'] != None:
|
||||||
|
qemu_args = self._config['qemu_args']
|
||||||
|
qemu_args = qemu_args.replace('\n',' ').replace('\r','')
|
||||||
|
# shlex groups quoted arguments together
|
||||||
|
# we need this to keep the quoted args together for when
|
||||||
|
# the QEMU command is issued later.
|
||||||
|
args = shlex.split(qemu_args)
|
||||||
|
self._config['extra_args'] = []
|
||||||
|
for arg in args:
|
||||||
|
if arg:
|
||||||
|
# Preserve quotes around arguments.
|
||||||
|
# shlex above takes them out, so add them in.
|
||||||
|
if " " in arg:
|
||||||
|
arg = '"{}"'.format(arg)
|
||||||
|
self._config['extra_args'].append(arg)
|
||||||
|
|
||||||
|
def validate_ssh_keys(self):
|
||||||
|
"""Check to see if the ssh key files exist."""
|
||||||
|
if 'ssh_key_file' not in self._config or\
|
||||||
|
not os.path.exists(self._config['ssh_key_file']):
|
||||||
|
raise Exception("ssh key file not found.")
|
||||||
|
if 'ssh_pub_key_file' not in self._config or\
|
||||||
|
not os.path.exists(self._config['ssh_pub_key_file']):
|
||||||
|
raise Exception("ssh pub key file not found.")
|
||||||
|
|
||||||
|
def wait_boot(self, wait_string=None):
|
||||||
|
"""Wait for the standard string we expect
|
||||||
|
on completion of a normal boot.
|
||||||
|
The user can also choose to override with an
|
||||||
|
alternate string to wait for."""
|
||||||
|
if wait_string is None:
|
||||||
|
if self.login_prompt is None:
|
||||||
|
raise Exception("self.login_prompt not defined")
|
||||||
|
wait_string = self.login_prompt
|
||||||
|
# Intentionally bump up the default timeout under TCG,
|
||||||
|
# since the console wait below takes longer.
|
||||||
|
timeout = self.socket_timeout
|
||||||
|
if not kvm_available(self.arch):
|
||||||
|
timeout *= 8
|
||||||
|
self.console_init(timeout=timeout)
|
||||||
|
self.console_wait(wait_string)
|
||||||
|
|
||||||
def _download_with_cache(self, url, sha256sum=None, sha512sum=None):
|
def _download_with_cache(self, url, sha256sum=None, sha512sum=None):
|
||||||
def check_sha256sum(fname):
|
def check_sha256sum(fname):
|
||||||
if not sha256sum:
|
if not sha256sum:
|
||||||
|
@ -131,8 +219,9 @@ class BaseVM(object):
|
||||||
"-t",
|
"-t",
|
||||||
"-o", "StrictHostKeyChecking=no",
|
"-o", "StrictHostKeyChecking=no",
|
||||||
"-o", "UserKnownHostsFile=" + os.devnull,
|
"-o", "UserKnownHostsFile=" + os.devnull,
|
||||||
"-o", "ConnectTimeout=1",
|
"-o",
|
||||||
"-p", self.ssh_port, "-i", self._ssh_key_file]
|
"ConnectTimeout={}".format(self._config["ssh_timeout"]),
|
||||||
|
"-p", self.ssh_port, "-i", self._ssh_tmp_key_file]
|
||||||
# If not in debug mode, set ssh to quiet mode to
|
# If not in debug mode, set ssh to quiet mode to
|
||||||
# avoid printing the results of commands.
|
# avoid printing the results of commands.
|
||||||
if not self.debug:
|
if not self.debug:
|
||||||
|
@ -148,13 +237,13 @@ class BaseVM(object):
|
||||||
return r
|
return r
|
||||||
|
|
||||||
def ssh(self, *cmd):
|
def ssh(self, *cmd):
|
||||||
return self._ssh_do(self.GUEST_USER, cmd, False)
|
return self._ssh_do(self._config["guest_user"], cmd, False)
|
||||||
|
|
||||||
def ssh_root(self, *cmd):
|
def ssh_root(self, *cmd):
|
||||||
return self._ssh_do("root", cmd, False)
|
return self._ssh_do("root", cmd, False)
|
||||||
|
|
||||||
def ssh_check(self, *cmd):
|
def ssh_check(self, *cmd):
|
||||||
self._ssh_do(self.GUEST_USER, cmd, True)
|
self._ssh_do(self._config["guest_user"], cmd, True)
|
||||||
|
|
||||||
def ssh_root_check(self, *cmd):
|
def ssh_root_check(self, *cmd):
|
||||||
self._ssh_do("root", cmd, True)
|
self._ssh_do("root", cmd, True)
|
||||||
|
@ -181,14 +270,20 @@ class BaseVM(object):
|
||||||
"virtio-blk,drive=%s,serial=%s,bootindex=1" % (name, name)]
|
"virtio-blk,drive=%s,serial=%s,bootindex=1" % (name, name)]
|
||||||
|
|
||||||
def boot(self, img, extra_args=[]):
|
def boot(self, img, extra_args=[]):
|
||||||
args = self._args + [
|
boot_dev = BOOT_DEVICE[self._config['boot_dev_type']]
|
||||||
"-drive", "file=%s,if=none,id=drive0,cache=writeback" % img,
|
boot_params = boot_dev.format(img)
|
||||||
"-device", "virtio-blk,drive=drive0,bootindex=0"]
|
args = self._args + boot_params.split(' ')
|
||||||
args += self._data_args + extra_args
|
args += self._data_args + extra_args + self._config['extra_args']
|
||||||
logging.debug("QEMU args: %s", " ".join(args))
|
logging.debug("QEMU args: %s", " ".join(args))
|
||||||
qemu_path = get_qemu_path(self.arch, self._build_path)
|
qemu_path = get_qemu_path(self.arch, self._build_path)
|
||||||
guest = QEMUMachine(binary=qemu_path, args=args)
|
|
||||||
guest.set_machine('pc')
|
# Since console_log_path is only set when the user provides the
|
||||||
|
# log_console option, we will set drain_console=True so the
|
||||||
|
# console is always drained.
|
||||||
|
guest = QEMUMachine(binary=qemu_path, args=args,
|
||||||
|
console_log=self._console_log_path,
|
||||||
|
drain_console=True)
|
||||||
|
guest.set_machine(self._config['machine'])
|
||||||
guest.set_console()
|
guest.set_console()
|
||||||
try:
|
try:
|
||||||
guest.launch()
|
guest.launch()
|
||||||
|
@ -201,6 +296,8 @@ class BaseVM(object):
|
||||||
raise
|
raise
|
||||||
atexit.register(self.shutdown)
|
atexit.register(self.shutdown)
|
||||||
self._guest = guest
|
self._guest = guest
|
||||||
|
# Init console so we can start consuming the chars.
|
||||||
|
self.console_init()
|
||||||
usernet_info = guest.qmp("human-monitor-command",
|
usernet_info = guest.qmp("human-monitor-command",
|
||||||
command_line="info usernet")
|
command_line="info usernet")
|
||||||
self.ssh_port = None
|
self.ssh_port = None
|
||||||
|
@ -212,7 +309,9 @@ class BaseVM(object):
|
||||||
raise Exception("Cannot find ssh port from 'info usernet':\n%s" % \
|
raise Exception("Cannot find ssh port from 'info usernet':\n%s" % \
|
||||||
usernet_info)
|
usernet_info)
|
||||||
|
|
||||||
def console_init(self, timeout = 120):
|
def console_init(self, timeout = None):
|
||||||
|
if timeout == None:
|
||||||
|
timeout = self.socket_timeout
|
||||||
vm = self._guest
|
vm = self._guest
|
||||||
vm.console_socket.settimeout(timeout)
|
vm.console_socket.settimeout(timeout)
|
||||||
self.console_raw_path = os.path.join(vm._temp_dir,
|
self.console_raw_path = os.path.join(vm._temp_dir,
|
||||||
|
@ -302,7 +401,8 @@ class BaseVM(object):
|
||||||
self.console_send(command)
|
self.console_send(command)
|
||||||
|
|
||||||
def console_ssh_init(self, prompt, user, pw):
|
def console_ssh_init(self, prompt, user, pw):
|
||||||
sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" % SSH_PUB_KEY.rstrip()
|
sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" \
|
||||||
|
% self._config['ssh_pub_key'].rstrip()
|
||||||
self.console_wait_send("login:", "%s\n" % user)
|
self.console_wait_send("login:", "%s\n" % user)
|
||||||
self.console_wait_send("Password:", "%s\n" % pw)
|
self.console_wait_send("Password:", "%s\n" % pw)
|
||||||
self.console_wait_send(prompt, "mkdir .ssh\n")
|
self.console_wait_send(prompt, "mkdir .ssh\n")
|
||||||
|
@ -361,23 +461,23 @@ class BaseVM(object):
|
||||||
"local-hostname: {}-guest\n".format(name)])
|
"local-hostname: {}-guest\n".format(name)])
|
||||||
mdata.close()
|
mdata.close()
|
||||||
udata = open(os.path.join(cidir, "user-data"), "w")
|
udata = open(os.path.join(cidir, "user-data"), "w")
|
||||||
print("guest user:pw {}:{}".format(self.GUEST_USER,
|
print("guest user:pw {}:{}".format(self._config['guest_user'],
|
||||||
self.GUEST_PASS))
|
self._config['guest_pass']))
|
||||||
udata.writelines(["#cloud-config\n",
|
udata.writelines(["#cloud-config\n",
|
||||||
"chpasswd:\n",
|
"chpasswd:\n",
|
||||||
" list: |\n",
|
" list: |\n",
|
||||||
" root:%s\n" % self.ROOT_PASS,
|
" root:%s\n" % self._config['root_pass'],
|
||||||
" %s:%s\n" % (self.GUEST_USER,
|
" %s:%s\n" % (self._config['guest_user'],
|
||||||
self.GUEST_PASS),
|
self._config['guest_pass']),
|
||||||
" expire: False\n",
|
" expire: False\n",
|
||||||
"users:\n",
|
"users:\n",
|
||||||
" - name: %s\n" % self.GUEST_USER,
|
" - name: %s\n" % self._config['guest_user'],
|
||||||
" sudo: ALL=(ALL) NOPASSWD:ALL\n",
|
" sudo: ALL=(ALL) NOPASSWD:ALL\n",
|
||||||
" ssh-authorized-keys:\n",
|
" ssh-authorized-keys:\n",
|
||||||
" - %s\n" % SSH_PUB_KEY,
|
" - %s\n" % self._config['ssh_pub_key'],
|
||||||
" - name: root\n",
|
" - name: root\n",
|
||||||
" ssh-authorized-keys:\n",
|
" ssh-authorized-keys:\n",
|
||||||
" - %s\n" % SSH_PUB_KEY,
|
" - %s\n" % self._config['ssh_pub_key'],
|
||||||
"locale: en_US.UTF-8\n"])
|
"locale: en_US.UTF-8\n"])
|
||||||
proxy = os.environ.get("http_proxy")
|
proxy = os.environ.get("http_proxy")
|
||||||
if not proxy is None:
|
if not proxy is None:
|
||||||
|
@ -390,7 +490,6 @@ class BaseVM(object):
|
||||||
cwd=cidir,
|
cwd=cidir,
|
||||||
stdin=self._devnull, stdout=self._stdout,
|
stdin=self._devnull, stdout=self._stdout,
|
||||||
stderr=self._stdout)
|
stderr=self._stdout)
|
||||||
|
|
||||||
return os.path.join(cidir, "cloud-init.iso")
|
return os.path.join(cidir, "cloud-init.iso")
|
||||||
|
|
||||||
def get_qemu_path(arch, build_path=None):
|
def get_qemu_path(arch, build_path=None):
|
||||||
|
@ -406,58 +505,121 @@ def get_qemu_path(arch, build_path=None):
|
||||||
qemu_path = "qemu-system-" + arch
|
qemu_path = "qemu-system-" + arch
|
||||||
return qemu_path
|
return qemu_path
|
||||||
|
|
||||||
|
def get_qemu_version(qemu_path):
|
||||||
|
"""Get the version number from the current QEMU,
|
||||||
|
and return the major number."""
|
||||||
|
output = subprocess.check_output([qemu_path, '--version'])
|
||||||
|
version_line = output.decode("utf-8")
|
||||||
|
version_num = re.split(' |\(', version_line)[3].split('.')[0]
|
||||||
|
return int(version_num)
|
||||||
|
|
||||||
|
def parse_config(config, args):
|
||||||
|
""" Parse yaml config and populate our config structure.
|
||||||
|
The yaml config allows the user to override the
|
||||||
|
defaults for VM parameters. In many cases these
|
||||||
|
defaults can be overridden without rebuilding the VM."""
|
||||||
|
if args.config:
|
||||||
|
config_file = args.config
|
||||||
|
elif 'QEMU_CONFIG' in os.environ:
|
||||||
|
config_file = os.environ['QEMU_CONFIG']
|
||||||
|
else:
|
||||||
|
return config
|
||||||
|
if not os.path.exists(config_file):
|
||||||
|
raise Exception("config file {} does not exist".format(config_file))
|
||||||
|
# We gracefully handle importing the yaml module
|
||||||
|
# since it might not be installed.
|
||||||
|
# If we are here it means the user supplied a .yml file,
|
||||||
|
# so if the yaml module is not installed we will exit with error.
|
||||||
|
try:
|
||||||
|
import yaml
|
||||||
|
except ImportError:
|
||||||
|
print("The python3-yaml package is needed "\
|
||||||
|
"to support config.yaml files")
|
||||||
|
# Instead of raising an exception we exit to avoid
|
||||||
|
# a raft of messy (expected) errors to stdout.
|
||||||
|
exit(1)
|
||||||
|
with open(config_file) as f:
|
||||||
|
yaml_dict = yaml.safe_load(f)
|
||||||
|
|
||||||
|
if 'qemu-conf' in yaml_dict:
|
||||||
|
config.update(yaml_dict['qemu-conf'])
|
||||||
|
else:
|
||||||
|
raise Exception("config file {} is not valid"\
|
||||||
|
" missing qemu-conf".format(config_file))
|
||||||
|
return config
|
||||||
|
|
||||||
def parse_args(vmcls):
|
def parse_args(vmcls):
|
||||||
|
|
||||||
def get_default_jobs():
|
def get_default_jobs():
|
||||||
if kvm_available(vmcls.arch):
|
if multiprocessing.cpu_count() > 1:
|
||||||
return multiprocessing.cpu_count() // 2
|
if kvm_available(vmcls.arch):
|
||||||
|
return multiprocessing.cpu_count() // 2
|
||||||
|
elif os.uname().machine == "x86_64" and \
|
||||||
|
vmcls.arch in ["aarch64", "x86_64", "i386"]:
|
||||||
|
# MTTCG is available on these arches and we can allow
|
||||||
|
# more cores. but only up to a reasonable limit. User
|
||||||
|
# can always override these limits with --jobs.
|
||||||
|
return min(multiprocessing.cpu_count() // 2, 8)
|
||||||
else:
|
else:
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
parser = optparse.OptionParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="VM test utility. Exit codes: "
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
"0 = success, "
|
description="Utility for provisioning VMs and running builds",
|
||||||
"1 = command line error, "
|
epilog="""Remaining arguments are passed to the command.
|
||||||
"2 = environment initialization failed, "
|
Exit codes: 0 = success, 1 = command line error,
|
||||||
"3 = test command failed")
|
2 = environment initialization failed,
|
||||||
parser.add_option("--debug", "-D", action="store_true",
|
3 = test command failed""")
|
||||||
help="enable debug output")
|
parser.add_argument("--debug", "-D", action="store_true",
|
||||||
parser.add_option("--image", "-i", default="%s.img" % vmcls.name,
|
help="enable debug output")
|
||||||
help="image file name")
|
parser.add_argument("--image", "-i", default="%s.img" % vmcls.name,
|
||||||
parser.add_option("--force", "-f", action="store_true",
|
help="image file name")
|
||||||
help="force build image even if image exists")
|
parser.add_argument("--force", "-f", action="store_true",
|
||||||
parser.add_option("--jobs", type=int, default=get_default_jobs(),
|
help="force build image even if image exists")
|
||||||
help="number of virtual CPUs")
|
parser.add_argument("--jobs", type=int, default=get_default_jobs(),
|
||||||
parser.add_option("--verbose", "-V", action="store_true",
|
help="number of virtual CPUs")
|
||||||
help="Pass V=1 to builds within the guest")
|
parser.add_argument("--verbose", "-V", action="store_true",
|
||||||
parser.add_option("--build-image", "-b", action="store_true",
|
help="Pass V=1 to builds within the guest")
|
||||||
help="build image")
|
parser.add_argument("--build-image", "-b", action="store_true",
|
||||||
parser.add_option("--build-qemu",
|
help="build image")
|
||||||
help="build QEMU from source in guest")
|
parser.add_argument("--build-qemu",
|
||||||
parser.add_option("--build-target",
|
help="build QEMU from source in guest")
|
||||||
help="QEMU build target", default="check")
|
parser.add_argument("--build-target",
|
||||||
parser.add_option("--build-path", default=None,
|
help="QEMU build target", default="check")
|
||||||
help="Path of build directory, "\
|
parser.add_argument("--build-path", default=None,
|
||||||
"for using build tree QEMU binary. ")
|
help="Path of build directory, "\
|
||||||
parser.add_option("--interactive", "-I", action="store_true",
|
"for using build tree QEMU binary. ")
|
||||||
help="Interactively run command")
|
parser.add_argument("--interactive", "-I", action="store_true",
|
||||||
parser.add_option("--snapshot", "-s", action="store_true",
|
help="Interactively run command")
|
||||||
help="run tests with a snapshot")
|
parser.add_argument("--snapshot", "-s", action="store_true",
|
||||||
parser.add_option("--genisoimage", default="genisoimage",
|
help="run tests with a snapshot")
|
||||||
help="iso imaging tool")
|
parser.add_argument("--genisoimage", default="genisoimage",
|
||||||
parser.disable_interspersed_args()
|
help="iso imaging tool")
|
||||||
|
parser.add_argument("--config", "-c", default=None,
|
||||||
|
help="Provide config yaml for configuration. "\
|
||||||
|
"See config_example.yaml for example.")
|
||||||
|
parser.add_argument("--efi-aarch64",
|
||||||
|
default="/usr/share/qemu-efi-aarch64/QEMU_EFI.fd",
|
||||||
|
help="Path to efi image for aarch64 VMs.")
|
||||||
|
parser.add_argument("--log-console", action="store_true",
|
||||||
|
help="Log console to file.")
|
||||||
|
parser.add_argument("commands", nargs="*", help="""Remaining
|
||||||
|
commands after -- are passed to command inside the VM""")
|
||||||
|
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
def main(vmcls):
|
def main(vmcls, config=None):
|
||||||
try:
|
try:
|
||||||
args, argv = parse_args(vmcls)
|
if config == None:
|
||||||
if not argv and not args.build_qemu and not args.build_image:
|
config = DEFAULT_CONFIG
|
||||||
|
args = parse_args(vmcls)
|
||||||
|
if not args.commands and not args.build_qemu and not args.build_image:
|
||||||
print("Nothing to do?")
|
print("Nothing to do?")
|
||||||
return 1
|
return 1
|
||||||
|
config = parse_config(config, args)
|
||||||
logging.basicConfig(level=(logging.DEBUG if args.debug
|
logging.basicConfig(level=(logging.DEBUG if args.debug
|
||||||
else logging.WARN))
|
else logging.WARN))
|
||||||
vm = vmcls(debug=args.debug, vcpus=args.jobs,
|
vm = vmcls(args, config=config)
|
||||||
genisoimage=args.genisoimage, build_path=args.build_path)
|
|
||||||
if args.build_image:
|
if args.build_image:
|
||||||
if os.path.exists(args.image) and not args.force:
|
if os.path.exists(args.image) and not args.force:
|
||||||
sys.stderr.writelines(["Image file exists: %s\n" % args.image,
|
sys.stderr.writelines(["Image file exists: %s\n" % args.image,
|
||||||
|
@ -467,12 +629,12 @@ def main(vmcls):
|
||||||
if args.build_qemu:
|
if args.build_qemu:
|
||||||
vm.add_source_dir(args.build_qemu)
|
vm.add_source_dir(args.build_qemu)
|
||||||
cmd = [vm.BUILD_SCRIPT.format(
|
cmd = [vm.BUILD_SCRIPT.format(
|
||||||
configure_opts = " ".join(argv),
|
configure_opts = " ".join(args.commands),
|
||||||
jobs=int(args.jobs),
|
jobs=int(args.jobs),
|
||||||
target=args.build_target,
|
target=args.build_target,
|
||||||
verbose = "V=1" if args.verbose else "")]
|
verbose = "V=1" if args.verbose else "")]
|
||||||
else:
|
else:
|
||||||
cmd = argv
|
cmd = args.commands
|
||||||
img = args.image
|
img = args.image
|
||||||
if args.snapshot:
|
if args.snapshot:
|
||||||
img += ",snapshot=on"
|
img += ",snapshot=on"
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
# CentOS aarch64 image kickstart file.
|
||||||
|
# This file is used by the CentOS installer to
|
||||||
|
# script the generation of the image.
|
||||||
|
#
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
ignoredisk --only-use=vda
|
||||||
|
# System bootloader configuration
|
||||||
|
bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=vda
|
||||||
|
autopart --type=plain
|
||||||
|
# Partition clearing information
|
||||||
|
clearpart --linux --initlabel --drives=vda
|
||||||
|
# Use text mode install
|
||||||
|
text
|
||||||
|
repo --name="AppStream" --baseurl=file:///run/install/repo/AppStream
|
||||||
|
# Use CDROM installation media
|
||||||
|
cdrom
|
||||||
|
# Keyboard layouts
|
||||||
|
keyboard --vckeymap=us --xlayouts=''
|
||||||
|
# System language
|
||||||
|
lang en_US.UTF-8
|
||||||
|
|
||||||
|
# Network information
|
||||||
|
network --bootproto=dhcp --device=enp0s1 --onboot=off --ipv6=auto --no-activate
|
||||||
|
network --hostname=localhost.localdomain
|
||||||
|
# Run the Setup Agent on first boot
|
||||||
|
firstboot --enable
|
||||||
|
# Do not configure the X Window System
|
||||||
|
skipx
|
||||||
|
# System services
|
||||||
|
services --enabled="chronyd"
|
||||||
|
# System timezone
|
||||||
|
timezone America/New_York --isUtc
|
||||||
|
|
||||||
|
# Shutdown after installation is complete.
|
||||||
|
shutdown
|
||||||
|
|
||||||
|
%packages
|
||||||
|
@^server-product-environment
|
||||||
|
kexec-tools
|
||||||
|
|
||||||
|
%end
|
||||||
|
|
||||||
|
%addon com_redhat_kdump --enable --reserve-mb='auto'
|
||||||
|
|
||||||
|
%end
|
||||||
|
%anaconda
|
||||||
|
pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty
|
||||||
|
pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok
|
||||||
|
pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty
|
||||||
|
%end
|
|
@ -0,0 +1,227 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Centos aarch64 image
|
||||||
|
#
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Robert Foley <robert.foley@linaro.org>
|
||||||
|
# Originally based on ubuntu.aarch64
|
||||||
|
#
|
||||||
|
# This code is licensed under the GPL version 2 or later. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import basevm
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import aarch64vm
|
||||||
|
|
||||||
|
DEFAULT_CONFIG = {
|
||||||
|
'cpu' : "max",
|
||||||
|
'machine' : "virt,gic-version=max",
|
||||||
|
'install_cmds' : "yum install -y make git python3 gcc gcc-c++ flex bison, "\
|
||||||
|
"yum install -y glib2-devel pixman-devel zlib-devel, "\
|
||||||
|
"yum install -y perl-Test-Harness, "\
|
||||||
|
"alternatives --set python /usr/bin/python3, "\
|
||||||
|
"sudo dnf config-manager "\
|
||||||
|
"--add-repo=https://download.docker.com/linux/centos/docker-ce.repo,"\
|
||||||
|
"sudo dnf install --nobest -y docker-ce.aarch64,"\
|
||||||
|
"systemctl enable docker",
|
||||||
|
# We increase beyond the default time since during boot
|
||||||
|
# it can take some time (many seconds) to log into the VM.
|
||||||
|
'ssh_timeout' : 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
class CentosAarch64VM(basevm.BaseVM):
|
||||||
|
name = "centos.aarch64"
|
||||||
|
arch = "aarch64"
|
||||||
|
login_prompt = "localhost login:"
|
||||||
|
prompt = '[root@localhost ~]#'
|
||||||
|
image_name = "CentOS-8-aarch64-1905-dvd1.iso"
|
||||||
|
image_link = "http://mirrors.usc.edu/pub/linux/distributions/centos/8.0.1905/isos/aarch64/"
|
||||||
|
image_link += image_name
|
||||||
|
BUILD_SCRIPT = """
|
||||||
|
set -e;
|
||||||
|
cd $(mktemp -d);
|
||||||
|
sudo chmod a+r /dev/vdb;
|
||||||
|
tar --checkpoint=.10 -xf /dev/vdb;
|
||||||
|
./configure {configure_opts};
|
||||||
|
make --output-sync {target} -j{jobs} {verbose};
|
||||||
|
"""
|
||||||
|
def set_key_perm(self):
|
||||||
|
"""Set permissions properly on certain files to allow
|
||||||
|
ssh access."""
|
||||||
|
self.console_wait_send(self.prompt,
|
||||||
|
"/usr/sbin/restorecon -R -v /root/.ssh\n")
|
||||||
|
self.console_wait_send(self.prompt,
|
||||||
|
"/usr/sbin/restorecon -R -v "\
|
||||||
|
"/home/{}/.ssh\n".format(self._config["guest_user"]))
|
||||||
|
|
||||||
|
def create_kickstart(self):
|
||||||
|
"""Generate the kickstart file used to generate the centos image."""
|
||||||
|
# Start with the template for the kickstart.
|
||||||
|
ks_file = "../tests/vm/centos-8-aarch64.ks"
|
||||||
|
subprocess.check_call("cp {} ./ks.cfg".format(ks_file), shell=True)
|
||||||
|
# Append the ssh keys to the kickstart file
|
||||||
|
# as the post processing phase of installation.
|
||||||
|
with open("ks.cfg", "a") as f:
|
||||||
|
# Add in the root pw and guest user.
|
||||||
|
rootpw = "rootpw --plaintext {}\n"
|
||||||
|
f.write(rootpw.format(self._config["root_pass"]))
|
||||||
|
add_user = "user --groups=wheel --name={} "\
|
||||||
|
"--password={} --plaintext\n"
|
||||||
|
f.write(add_user.format(self._config["guest_user"],
|
||||||
|
self._config["guest_pass"]))
|
||||||
|
# Add the ssh keys.
|
||||||
|
f.write("%post --log=/root/ks-post.log\n")
|
||||||
|
f.write("mkdir -p /root/.ssh\n")
|
||||||
|
addkey = 'echo "{}" >> /root/.ssh/authorized_keys\n'
|
||||||
|
addkey_cmd = addkey.format(self._config["ssh_pub_key"])
|
||||||
|
f.write(addkey_cmd)
|
||||||
|
f.write('mkdir -p /home/{}/.ssh\n'.format(self._config["guest_user"]))
|
||||||
|
addkey = 'echo "{}" >> /home/{}/.ssh/authorized_keys\n'
|
||||||
|
addkey_cmd = addkey.format(self._config["ssh_pub_key"],
|
||||||
|
self._config["guest_user"])
|
||||||
|
f.write(addkey_cmd)
|
||||||
|
f.write("%end\n")
|
||||||
|
# Take our kickstart file and create an .iso from it.
|
||||||
|
# The .iso will be provided to qemu as we boot
|
||||||
|
# from the install dvd.
|
||||||
|
# Anaconda will recognize the label "OEMDRV" and will
|
||||||
|
# start the automated installation.
|
||||||
|
gen_iso_img = 'genisoimage -output ks.iso -volid "OEMDRV" ks.cfg'
|
||||||
|
subprocess.check_call(gen_iso_img, shell=True)
|
||||||
|
|
||||||
|
def wait_for_shutdown(self):
|
||||||
|
"""We wait for qemu to shutdown the VM and exit.
|
||||||
|
While this happens we display the console view
|
||||||
|
for easier debugging."""
|
||||||
|
# The image creation is essentially done,
|
||||||
|
# so whether or not the wait is successful we want to
|
||||||
|
# wait for qemu to exit (the self.wait()) before we return.
|
||||||
|
try:
|
||||||
|
self.console_wait("reboot: Power down")
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write("Exception hit\n")
|
||||||
|
if isinstance(e, SystemExit) and e.code == 0:
|
||||||
|
return 0
|
||||||
|
traceback.print_exc()
|
||||||
|
finally:
|
||||||
|
self.wait()
|
||||||
|
|
||||||
|
def build_base_image(self, dest_img):
|
||||||
|
"""Run through the centos installer to create
|
||||||
|
a base image with name dest_img."""
|
||||||
|
# We create the temp image, and only rename
|
||||||
|
# to destination when we are done.
|
||||||
|
img = dest_img + ".tmp"
|
||||||
|
# Create an empty image.
|
||||||
|
# We will provide this as the install destination.
|
||||||
|
qemu_img_create = "qemu-img create {} 50G".format(img)
|
||||||
|
subprocess.check_call(qemu_img_create, shell=True)
|
||||||
|
|
||||||
|
# Create our kickstart file to be fed to the installer.
|
||||||
|
self.create_kickstart()
|
||||||
|
# Boot the install dvd with the params as our ks.iso
|
||||||
|
os_img = self._download_with_cache(self.image_link)
|
||||||
|
dvd_iso = "centos-8-dvd.iso"
|
||||||
|
subprocess.check_call(["cp", "-f", os_img, dvd_iso])
|
||||||
|
extra_args = "-cdrom ks.iso"
|
||||||
|
extra_args += " -drive file={},if=none,id=drive1,cache=writeback"
|
||||||
|
extra_args += " -device virtio-blk,drive=drive1,bootindex=1"
|
||||||
|
extra_args = extra_args.format(dvd_iso).split(" ")
|
||||||
|
self.boot(img, extra_args=extra_args)
|
||||||
|
self.console_wait_send("change the selection", "\n")
|
||||||
|
# We seem to need to hit esc (chr(27)) twice to abort the
|
||||||
|
# media check, which takes a long time.
|
||||||
|
# Waiting a bit seems to be more reliable before hitting esc.
|
||||||
|
self.console_wait("Checking")
|
||||||
|
time.sleep(5)
|
||||||
|
self.console_wait_send("Checking", chr(27))
|
||||||
|
time.sleep(5)
|
||||||
|
self.console_wait_send("Checking", chr(27))
|
||||||
|
print("Found Checking")
|
||||||
|
# Give sufficient time for the installer to create the image.
|
||||||
|
self.console_init(timeout=7200)
|
||||||
|
self.wait_for_shutdown()
|
||||||
|
os.rename(img, dest_img)
|
||||||
|
print("Done with base image build: {}".format(dest_img))
|
||||||
|
|
||||||
|
def check_create_base_img(self, img_base, img_dest):
|
||||||
|
"""Create a base image using the installer.
|
||||||
|
We will use the base image if it exists.
|
||||||
|
This helps cut down on install time in case we
|
||||||
|
need to restart image creation,
|
||||||
|
since the base image creation can take a long time."""
|
||||||
|
if not os.path.exists(img_base):
|
||||||
|
print("Generate new base image: {}".format(img_base))
|
||||||
|
self.build_base_image(img_base);
|
||||||
|
else:
|
||||||
|
print("Use existing base image: {}".format(img_base))
|
||||||
|
# Save a copy of the base image and copy it to dest.
|
||||||
|
# which we will use going forward.
|
||||||
|
subprocess.check_call(["cp", img_base, img_dest])
|
||||||
|
|
||||||
|
def boot(self, img, extra_args=None):
|
||||||
|
aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64)
|
||||||
|
default_args = aarch64vm.get_pflash_args(self._tmpdir)
|
||||||
|
if extra_args:
|
||||||
|
extra_args.extend(default_args)
|
||||||
|
else:
|
||||||
|
extra_args = default_args
|
||||||
|
# We always add these performance tweaks
|
||||||
|
# because without them, we boot so slowly that we
|
||||||
|
# can time out finding the boot efi device.
|
||||||
|
if '-smp' not in extra_args and \
|
||||||
|
'-smp' not in self._config['extra_args'] and \
|
||||||
|
'-smp' not in self._args:
|
||||||
|
# Only add if not already there to give caller option to change it.
|
||||||
|
extra_args.extend(["-smp", "8"])
|
||||||
|
# We have overridden boot() since aarch64 has additional parameters.
|
||||||
|
# Call down to the base class method.
|
||||||
|
super(CentosAarch64VM, self).boot(img, extra_args=extra_args)
|
||||||
|
|
||||||
|
def build_image(self, img):
|
||||||
|
img_tmp = img + ".tmp"
|
||||||
|
self.check_create_base_img(img + ".base", img_tmp)
|
||||||
|
|
||||||
|
# Boot the new image for the first time to finish installation.
|
||||||
|
self.boot(img_tmp)
|
||||||
|
self.console_init()
|
||||||
|
self.console_wait_send(self.login_prompt, "root\n")
|
||||||
|
self.console_wait_send("Password:",
|
||||||
|
"{}\n".format(self._config["root_pass"]))
|
||||||
|
|
||||||
|
self.set_key_perm()
|
||||||
|
self.console_wait_send(self.prompt, "rpm -q centos-release\n")
|
||||||
|
enable_adapter = "sed -i 's/ONBOOT=no/ONBOOT=yes/g'" \
|
||||||
|
" /etc/sysconfig/network-scripts/ifcfg-enp0s1\n"
|
||||||
|
self.console_wait_send(self.prompt, enable_adapter)
|
||||||
|
self.console_wait_send(self.prompt, "ifup enp0s1\n")
|
||||||
|
self.console_wait_send(self.prompt,
|
||||||
|
'echo "qemu ALL=(ALL) NOPASSWD:ALL" | '\
|
||||||
|
'sudo tee /etc/sudoers.d/qemu\n')
|
||||||
|
self.console_wait(self.prompt)
|
||||||
|
|
||||||
|
# Rest of the commands we issue through ssh.
|
||||||
|
self.wait_ssh(wait_root=True)
|
||||||
|
|
||||||
|
# If the user chooses *not* to do the second phase,
|
||||||
|
# then we will jump right to the graceful shutdown
|
||||||
|
if self._config['install_cmds'] != "":
|
||||||
|
install_cmds = self._config['install_cmds'].split(',')
|
||||||
|
for cmd in install_cmds:
|
||||||
|
self.ssh_root(cmd)
|
||||||
|
self.ssh_root("poweroff")
|
||||||
|
self.wait_for_shutdown()
|
||||||
|
os.rename(img_tmp, img)
|
||||||
|
print("image creation complete: {}".format(img))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
defaults = aarch64vm.get_config_defaults(CentosAarch64VM, DEFAULT_CONFIG)
|
||||||
|
sys.exit(basevm.main(CentosAarch64VM, defaults))
|
|
@ -0,0 +1,51 @@
|
||||||
|
#
|
||||||
|
# Example yaml for use by any of the scripts in tests/vm.
|
||||||
|
# Can be provided as an environment variable QEMU_CONFIG
|
||||||
|
#
|
||||||
|
qemu-conf:
|
||||||
|
|
||||||
|
# If any of the below are not provided, we will just use the qemu defaults.
|
||||||
|
|
||||||
|
# Login username and password(has to be sudo enabled)
|
||||||
|
guest_user: qemu
|
||||||
|
guest_pass: "qemupass"
|
||||||
|
|
||||||
|
# Password for root user can be different from guest.
|
||||||
|
root_pass: "qemupass"
|
||||||
|
|
||||||
|
# If one key is provided, both must be provided.
|
||||||
|
#ssh_key: /complete/path/of/your/keyfile/id_rsa
|
||||||
|
#ssh_pub_key: /complete/path/of/your/keyfile/id_rsa.pub
|
||||||
|
|
||||||
|
cpu: max
|
||||||
|
machine: virt,gic-version=max
|
||||||
|
memory: 16G
|
||||||
|
|
||||||
|
# The below is a example for how to configure NUMA topology with
|
||||||
|
# 4 NUMA nodes and 2 different NUMA distances.
|
||||||
|
qemu_args: "-smp cpus=16,sockets=2,cores=8
|
||||||
|
-numa node,cpus=0-3,nodeid=0 -numa node,cpus=4-7,nodeid=1
|
||||||
|
-numa node,cpus=8-11,nodeid=2 -numa node,cpus=12-15,nodeid=3
|
||||||
|
-numa dist,src=0,dst=1,val=15 -numa dist,src=2,dst=3,val=15
|
||||||
|
-numa dist,src=0,dst=2,val=20 -numa dist,src=0,dst=3,val=20
|
||||||
|
-numa dist,src=1,dst=2,val=20 -numa dist,src=1,dst=3,val=20"
|
||||||
|
|
||||||
|
# By default we do not set the DNS.
|
||||||
|
# You override the defaults by setting the below.
|
||||||
|
#dns: 1.234.567.89
|
||||||
|
|
||||||
|
# By default we will use a "block" device, but
|
||||||
|
# you can also boot from a "scsi" device.
|
||||||
|
# Just keep in mind your scripts might need to change
|
||||||
|
# As you will have /dev/sda instead of /dev/vda (for block device)
|
||||||
|
boot_dev_type: "block"
|
||||||
|
|
||||||
|
# By default the ssh port is not fixed.
|
||||||
|
# A fixed ssh port makes it easier for automated tests.
|
||||||
|
#ssh_port: 5555
|
||||||
|
|
||||||
|
# To install a different set of packages, provide a command to issue
|
||||||
|
#install_cmds: "apt-get update ; apt-get build-dep -y qemu"
|
||||||
|
|
||||||
|
# Or to skip the install entirely, just provide ""
|
||||||
|
#install_cmds: ""
|
|
@ -0,0 +1,50 @@
|
||||||
|
#
|
||||||
|
# Example yaml for use by any of the x86 based scripts in tests/vm.
|
||||||
|
# Can be provided as an environment variable QEMU_CONFIG
|
||||||
|
#
|
||||||
|
qemu-conf:
|
||||||
|
|
||||||
|
# If any of the below are not provided, we will just use the qemu defaults.
|
||||||
|
|
||||||
|
# Login username and password(has to be sudo enabled)
|
||||||
|
guest_user: "qemu"
|
||||||
|
guest_pass: "qemupass"
|
||||||
|
|
||||||
|
# Password for root user can be different from guest.
|
||||||
|
root_pass: "qemupass"
|
||||||
|
|
||||||
|
# Provide default ssh keys of current user.
|
||||||
|
# You need to edit the below for your user.
|
||||||
|
#ssh_key_file: /home/<user>/.ssh/id_rsa
|
||||||
|
#ssh_pub_key_file: /home/<user>/.ssh/id_rsa.pub
|
||||||
|
|
||||||
|
cpu: max
|
||||||
|
machine: pc
|
||||||
|
memory: 8G
|
||||||
|
|
||||||
|
# The below is a example for how to configure NUMA topology with
|
||||||
|
# 4 NUMA nodes and 2 different NUMA distances.
|
||||||
|
qemu_args: "-smp cpus=8,sockets=2,cores=4
|
||||||
|
-object memory-backend-ram,size=4G,policy=bind,host-nodes=0,id=ram-node0
|
||||||
|
-object memory-backend-ram,size=4G,policy=bind,host-nodes=0,id=ram-node1
|
||||||
|
-object memory-backend-ram,size=4G,policy=bind,host-nodes=1,id=ram-node2
|
||||||
|
-object memory-backend-ram,size=4G,policy=bind,host-nodes=1,id=ram-node3
|
||||||
|
-numa node,cpus=0-1,nodeid=0 -numa node,cpus=2-3,nodeid=1
|
||||||
|
-numa node,cpus=4-5,nodeid=2 -numa node,cpus=6-7,nodeid=3
|
||||||
|
-numa dist,src=0,dst=1,val=15 -numa dist,src=2,dst=3,val=15
|
||||||
|
-numa dist,src=0,dst=2,val=20 -numa dist,src=0,dst=3,val=20
|
||||||
|
-numa dist,src=1,dst=2,val=20 -numa dist,src=1,dst=3,val=20"
|
||||||
|
|
||||||
|
# By default we do not set the DNS.
|
||||||
|
# You override the defaults by setting the below.
|
||||||
|
#dns: "1.234.567.89"
|
||||||
|
|
||||||
|
# By default we will use a "block" device, but
|
||||||
|
# you can also boot from a "scsi" device.
|
||||||
|
# Just keep in mind your scripts might need to change
|
||||||
|
# As you will have /dev/sda instead of /dev/vda (for block device)
|
||||||
|
boot_dev_type: "block"
|
||||||
|
|
||||||
|
# By default the ssh port is not fixed.
|
||||||
|
# A fixed ssh port makes it easier for automated tests.
|
||||||
|
ssh_port: 5555
|
|
@ -108,20 +108,20 @@ class FedoraVM(basevm.BaseVM):
|
||||||
|
|
||||||
self.console_wait_send("7) [!] Root password", "7\n")
|
self.console_wait_send("7) [!] Root password", "7\n")
|
||||||
self.console_wait("Password:")
|
self.console_wait("Password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait("Password (confirm):")
|
self.console_wait("Password (confirm):")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
|
|
||||||
self.console_wait_send("8) [ ] User creation", "8\n")
|
self.console_wait_send("8) [ ] User creation", "8\n")
|
||||||
self.console_wait_send("1) [ ] Create user", "1\n")
|
self.console_wait_send("1) [ ] Create user", "1\n")
|
||||||
self.console_wait_send("3) User name", "3\n")
|
self.console_wait_send("3) User name", "3\n")
|
||||||
self.console_wait_send("ENTER:", "%s\n" % self.GUEST_USER)
|
self.console_wait_send("ENTER:", "%s\n" % self._config["guest_user"])
|
||||||
self.console_wait_send("4) [ ] Use password", "4\n")
|
self.console_wait_send("4) [ ] Use password", "4\n")
|
||||||
self.console_wait_send("5) Password", "5\n")
|
self.console_wait_send("5) Password", "5\n")
|
||||||
self.console_wait("Password:")
|
self.console_wait("Password:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait("Password (confirm):")
|
self.console_wait("Password (confirm):")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait_send("7) Groups", "c\n")
|
self.console_wait_send("7) Groups", "c\n")
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
@ -139,7 +139,7 @@ class FedoraVM(basevm.BaseVM):
|
||||||
if good:
|
if good:
|
||||||
break
|
break
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
self.console_send("r\n" % self.GUEST_PASS)
|
self.console_send("r\n" % self._config["guest_pass"])
|
||||||
|
|
||||||
self.console_wait_send("'b' to begin install", "b\n")
|
self.console_wait_send("'b' to begin install", "b\n")
|
||||||
|
|
||||||
|
@ -150,12 +150,13 @@ class FedoraVM(basevm.BaseVM):
|
||||||
|
|
||||||
# setup qemu user
|
# setup qemu user
|
||||||
prompt = " ~]$"
|
prompt = " ~]$"
|
||||||
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
|
self.console_ssh_init(prompt, self._config["guest_user"],
|
||||||
|
self._config["guest_pass"])
|
||||||
self.console_wait_send(prompt, "exit\n")
|
self.console_wait_send(prompt, "exit\n")
|
||||||
|
|
||||||
# setup root user
|
# setup root user
|
||||||
prompt = " ~]#"
|
prompt = " ~]#"
|
||||||
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
|
self.console_ssh_init(prompt, "root", self._config["root_pass"])
|
||||||
self.console_sshd_config(prompt)
|
self.console_sshd_config(prompt)
|
||||||
|
|
||||||
# setup virtio-blk #1 (tarfile)
|
# setup virtio-blk #1 (tarfile)
|
||||||
|
|
|
@ -113,9 +113,9 @@ class FreeBSDVM(basevm.BaseVM):
|
||||||
|
|
||||||
# post-install configuration
|
# post-install configuration
|
||||||
self.console_wait("New Password:")
|
self.console_wait("New Password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait("Retype New Password:")
|
self.console_wait("Retype New Password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
|
|
||||||
self.console_wait_send("Network Configuration", "\n")
|
self.console_wait_send("Network Configuration", "\n")
|
||||||
self.console_wait_send("IPv4", "y")
|
self.console_wait_send("IPv4", "y")
|
||||||
|
@ -134,9 +134,9 @@ class FreeBSDVM(basevm.BaseVM):
|
||||||
# qemu user
|
# qemu user
|
||||||
self.console_wait_send("Add User Accounts", "y")
|
self.console_wait_send("Add User Accounts", "y")
|
||||||
self.console_wait("Username")
|
self.console_wait("Username")
|
||||||
self.console_send("%s\n" % self.GUEST_USER)
|
self.console_send("%s\n" % self._config["guest_user"])
|
||||||
self.console_wait("Full name")
|
self.console_wait("Full name")
|
||||||
self.console_send("%s\n" % self.GUEST_USER)
|
self.console_send("%s\n" % self._config["guest_user"])
|
||||||
self.console_wait_send("Uid", "\n")
|
self.console_wait_send("Uid", "\n")
|
||||||
self.console_wait_send("Login group", "\n")
|
self.console_wait_send("Login group", "\n")
|
||||||
self.console_wait_send("Login group", "\n")
|
self.console_wait_send("Login group", "\n")
|
||||||
|
@ -148,9 +148,9 @@ class FreeBSDVM(basevm.BaseVM):
|
||||||
self.console_wait_send("Use an empty password", "\n")
|
self.console_wait_send("Use an empty password", "\n")
|
||||||
self.console_wait_send("Use a random password", "\n")
|
self.console_wait_send("Use a random password", "\n")
|
||||||
self.console_wait("Enter password:")
|
self.console_wait("Enter password:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait("Enter password again:")
|
self.console_wait("Enter password again:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait_send("Lock out", "\n")
|
self.console_wait_send("Lock out", "\n")
|
||||||
self.console_wait_send("OK", "yes\n")
|
self.console_wait_send("OK", "yes\n")
|
||||||
self.console_wait_send("Add another user", "no\n")
|
self.console_wait_send("Add another user", "no\n")
|
||||||
|
@ -164,12 +164,12 @@ class FreeBSDVM(basevm.BaseVM):
|
||||||
|
|
||||||
# setup qemu user
|
# setup qemu user
|
||||||
prompt = "$"
|
prompt = "$"
|
||||||
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
|
self.console_ssh_init(prompt, self._config["guest_user"], self._config["guest_pass"])
|
||||||
self.console_wait_send(prompt, "exit\n")
|
self.console_wait_send(prompt, "exit\n")
|
||||||
|
|
||||||
# setup root user
|
# setup root user
|
||||||
prompt = "root@freebsd:~ #"
|
prompt = "root@freebsd:~ #"
|
||||||
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
|
self.console_ssh_init(prompt, "root", self._config["root_pass"])
|
||||||
self.console_sshd_config(prompt)
|
self.console_sshd_config(prompt)
|
||||||
|
|
||||||
# setup serial console
|
# setup serial console
|
||||||
|
|
|
@ -120,24 +120,24 @@ class NetBSDVM(basevm.BaseVM):
|
||||||
self.console_wait_send("d: Change root password", "d\n")
|
self.console_wait_send("d: Change root password", "d\n")
|
||||||
self.console_wait_send("a: Yes", "a\n")
|
self.console_wait_send("a: Yes", "a\n")
|
||||||
self.console_wait("New password:")
|
self.console_wait("New password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait("New password:")
|
self.console_wait("New password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait("Retype new password:")
|
self.console_wait("Retype new password:")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
|
|
||||||
self.console_wait_send("o: Add a user", "o\n")
|
self.console_wait_send("o: Add a user", "o\n")
|
||||||
self.console_wait("username")
|
self.console_wait("username")
|
||||||
self.console_send("%s\n" % self.GUEST_USER)
|
self.console_send("%s\n" % self._config["guest_user"])
|
||||||
self.console_wait("to group wheel")
|
self.console_wait("to group wheel")
|
||||||
self.console_wait_send("a: Yes", "a\n")
|
self.console_wait_send("a: Yes", "a\n")
|
||||||
self.console_wait_send("a: /bin/sh", "a\n")
|
self.console_wait_send("a: /bin/sh", "a\n")
|
||||||
self.console_wait("New password:")
|
self.console_wait("New password:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait("New password:")
|
self.console_wait("New password:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait("Retype new password:")
|
self.console_wait("Retype new password:")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
|
|
||||||
self.console_wait_send("a: Configure network", "a\n")
|
self.console_wait_send("a: Configure network", "a\n")
|
||||||
self.console_wait_send("a: vioif0", "a\n")
|
self.console_wait_send("a: vioif0", "a\n")
|
||||||
|
@ -170,12 +170,13 @@ class NetBSDVM(basevm.BaseVM):
|
||||||
|
|
||||||
# setup qemu user
|
# setup qemu user
|
||||||
prompt = "localhost$"
|
prompt = "localhost$"
|
||||||
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
|
self.console_ssh_init(prompt, self._config["guest_user"],
|
||||||
|
self._config["guest_pass"])
|
||||||
self.console_wait_send(prompt, "exit\n")
|
self.console_wait_send(prompt, "exit\n")
|
||||||
|
|
||||||
# setup root user
|
# setup root user
|
||||||
prompt = "localhost#"
|
prompt = "localhost#"
|
||||||
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
|
self.console_ssh_init(prompt, "root", self._config["root_pass"])
|
||||||
self.console_sshd_config(prompt)
|
self.console_sshd_config(prompt)
|
||||||
|
|
||||||
# setup virtio-blk #1 (tarfile)
|
# setup virtio-blk #1 (tarfile)
|
||||||
|
|
|
@ -98,9 +98,9 @@ class OpenBSDVM(basevm.BaseVM):
|
||||||
self.console_wait_send("Which network interface", "done\n")
|
self.console_wait_send("Which network interface", "done\n")
|
||||||
self.console_wait_send("DNS domain name", "localnet\n")
|
self.console_wait_send("DNS domain name", "localnet\n")
|
||||||
self.console_wait("Password for root account")
|
self.console_wait("Password for root account")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait("Password for root account")
|
self.console_wait("Password for root account")
|
||||||
self.console_send("%s\n" % self.ROOT_PASS)
|
self.console_send("%s\n" % self._config["root_pass"])
|
||||||
self.console_wait_send("Start sshd(8)", "yes\n")
|
self.console_wait_send("Start sshd(8)", "yes\n")
|
||||||
self.console_wait_send("X Window System", "\n")
|
self.console_wait_send("X Window System", "\n")
|
||||||
self.console_wait_send("xenodm", "\n")
|
self.console_wait_send("xenodm", "\n")
|
||||||
|
@ -108,13 +108,13 @@ class OpenBSDVM(basevm.BaseVM):
|
||||||
self.console_wait_send("Which speed", "\n")
|
self.console_wait_send("Which speed", "\n")
|
||||||
|
|
||||||
self.console_wait("Setup a user")
|
self.console_wait("Setup a user")
|
||||||
self.console_send("%s\n" % self.GUEST_USER)
|
self.console_send("%s\n" % self._config["guest_user"])
|
||||||
self.console_wait("Full name")
|
self.console_wait("Full name")
|
||||||
self.console_send("%s\n" % self.GUEST_USER)
|
self.console_send("%s\n" % self._config["guest_user"])
|
||||||
self.console_wait("Password")
|
self.console_wait("Password")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
self.console_wait("Password")
|
self.console_wait("Password")
|
||||||
self.console_send("%s\n" % self.GUEST_PASS)
|
self.console_send("%s\n" % self._config["guest_pass"])
|
||||||
|
|
||||||
self.console_wait_send("Allow root ssh login", "yes\n")
|
self.console_wait_send("Allow root ssh login", "yes\n")
|
||||||
self.console_wait_send("timezone", "UTC\n")
|
self.console_wait_send("timezone", "UTC\n")
|
||||||
|
@ -135,12 +135,13 @@ class OpenBSDVM(basevm.BaseVM):
|
||||||
|
|
||||||
# setup qemu user
|
# setup qemu user
|
||||||
prompt = "$"
|
prompt = "$"
|
||||||
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
|
self.console_ssh_init(prompt, self._config["guest_user"],
|
||||||
|
self._config["guest_pass"])
|
||||||
self.console_wait_send(prompt, "exit\n")
|
self.console_wait_send(prompt, "exit\n")
|
||||||
|
|
||||||
# setup root user
|
# setup root user
|
||||||
prompt = "openbsd#"
|
prompt = "openbsd#"
|
||||||
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
|
self.console_ssh_init(prompt, "root", self._config["root_pass"])
|
||||||
self.console_sshd_config(prompt)
|
self.console_sshd_config(prompt)
|
||||||
|
|
||||||
# setup virtio-blk #1 (tarfile)
|
# setup virtio-blk #1 (tarfile)
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Ubuntu aarch64 image
|
||||||
|
#
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Robert Foley <robert.foley@linaro.org>
|
||||||
|
# Originally based on ubuntu.i386 Fam Zheng <famz@redhat.com>
|
||||||
|
#
|
||||||
|
# This code is licensed under the GPL version 2 or later. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
#
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import basevm
|
||||||
|
import aarch64vm
|
||||||
|
import ubuntuvm
|
||||||
|
|
||||||
|
DEFAULT_CONFIG = {
|
||||||
|
'cpu' : "cortex-a57",
|
||||||
|
'machine' : "virt,gic-version=3",
|
||||||
|
'install_cmds' : "apt-get update,"\
|
||||||
|
"apt-get build-dep -y --arch-only qemu,"\
|
||||||
|
"apt-get install -y libfdt-dev pkg-config language-pack-en",
|
||||||
|
# We increase beyond the default time since during boot
|
||||||
|
# it can take some time (many seconds) to log into the VM
|
||||||
|
# especially using softmmu.
|
||||||
|
'ssh_timeout' : 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
class UbuntuAarch64VM(ubuntuvm.UbuntuVM):
|
||||||
|
name = "ubuntu.aarch64"
|
||||||
|
arch = "aarch64"
|
||||||
|
image_name = "ubuntu-18.04-server-cloudimg-arm64.img"
|
||||||
|
image_link = "https://cloud-images.ubuntu.com/releases/18.04/release/" + image_name
|
||||||
|
image_sha256="0fdcba761965735a8a903d8b88df8e47f156f48715c00508e4315c506d7d3cb1"
|
||||||
|
BUILD_SCRIPT = """
|
||||||
|
set -e;
|
||||||
|
cd $(mktemp -d);
|
||||||
|
sudo chmod a+r /dev/vdb;
|
||||||
|
tar --checkpoint=.10 -xf /dev/vdb;
|
||||||
|
./configure {configure_opts};
|
||||||
|
make --output-sync {target} -j{jobs} {verbose};
|
||||||
|
"""
|
||||||
|
def boot(self, img, extra_args=None):
|
||||||
|
aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64)
|
||||||
|
default_args = aarch64vm.get_pflash_args(self._tmpdir)
|
||||||
|
if extra_args:
|
||||||
|
extra_args.extend(default_args)
|
||||||
|
else:
|
||||||
|
extra_args = default_args
|
||||||
|
# We always add these performance tweaks
|
||||||
|
# because without them, we boot so slowly that we
|
||||||
|
# can time out finding the boot efi device.
|
||||||
|
if '-smp' not in extra_args and \
|
||||||
|
'-smp' not in self._config['extra_args'] and \
|
||||||
|
'-smp' not in self._args:
|
||||||
|
# Only add if not already there to give caller option to change it.
|
||||||
|
extra_args.extend(["-smp", "8"])
|
||||||
|
|
||||||
|
# We have overridden boot() since aarch64 has additional parameters.
|
||||||
|
# Call down to the base class method.
|
||||||
|
super(UbuntuAarch64VM, self).boot(img, extra_args=extra_args)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
defaults = aarch64vm.get_config_defaults(UbuntuAarch64VM, DEFAULT_CONFIG)
|
||||||
|
sys.exit(basevm.main(UbuntuAarch64VM, defaults))
|
|
@ -11,15 +11,22 @@
|
||||||
# the COPYING file in the top-level directory.
|
# the COPYING file in the top-level directory.
|
||||||
#
|
#
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
|
||||||
import basevm
|
import basevm
|
||||||
import time
|
import ubuntuvm
|
||||||
|
|
||||||
class UbuntuX86VM(basevm.BaseVM):
|
DEFAULT_CONFIG = {
|
||||||
|
'install_cmds' : "apt-get update,"\
|
||||||
|
"apt-get build-dep -y qemu,"\
|
||||||
|
"apt-get install -y libfdt-dev language-pack-en",
|
||||||
|
}
|
||||||
|
|
||||||
|
class UbuntuX86VM(ubuntuvm.UbuntuVM):
|
||||||
name = "ubuntu.i386"
|
name = "ubuntu.i386"
|
||||||
arch = "i386"
|
arch = "i386"
|
||||||
|
image_link="https://cloud-images.ubuntu.com/releases/bionic/"\
|
||||||
|
"release-20191114/ubuntu-18.04-server-cloudimg-i386.img"
|
||||||
|
image_sha256="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef"
|
||||||
BUILD_SCRIPT = """
|
BUILD_SCRIPT = """
|
||||||
set -e;
|
set -e;
|
||||||
cd $(mktemp -d);
|
cd $(mktemp -d);
|
||||||
|
@ -29,34 +36,5 @@ class UbuntuX86VM(basevm.BaseVM):
|
||||||
make --output-sync {target} -j{jobs} {verbose};
|
make --output-sync {target} -j{jobs} {verbose};
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def build_image(self, img):
|
|
||||||
cimg = self._download_with_cache(
|
|
||||||
"https://cloud-images.ubuntu.com/releases/bionic/release-20191114/ubuntu-18.04-server-cloudimg-i386.img",
|
|
||||||
sha256sum="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef")
|
|
||||||
img_tmp = img + ".tmp"
|
|
||||||
subprocess.check_call(["cp", "-f", cimg, img_tmp])
|
|
||||||
self.exec_qemu_img("resize", img_tmp, "50G")
|
|
||||||
self.boot(img_tmp, extra_args = [
|
|
||||||
"-device", "VGA",
|
|
||||||
"-cdrom", self.gen_cloud_init_iso()
|
|
||||||
])
|
|
||||||
self.wait_ssh()
|
|
||||||
self.ssh_root_check("touch /etc/cloud/cloud-init.disabled")
|
|
||||||
self.ssh_root_check("apt-get update")
|
|
||||||
self.ssh_root_check("apt-get install -y cloud-initramfs-growroot")
|
|
||||||
# Don't check the status in case the guest hang up too quickly
|
|
||||||
self.ssh_root("sync && reboot")
|
|
||||||
time.sleep(5)
|
|
||||||
self.wait_ssh()
|
|
||||||
# The previous update sometimes doesn't survive a reboot, so do it again
|
|
||||||
self.ssh_root_check("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list")
|
|
||||||
self.ssh_root_check("apt-get update")
|
|
||||||
self.ssh_root_check("apt-get build-dep -y qemu")
|
|
||||||
self.ssh_root_check("apt-get install -y libfdt-dev language-pack-en")
|
|
||||||
self.ssh_root("poweroff")
|
|
||||||
self.wait()
|
|
||||||
os.rename(img_tmp, img)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(basevm.main(UbuntuX86VM))
|
sys.exit(basevm.main(UbuntuX86VM, DEFAULT_CONFIG))
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Ubuntu VM testing library
|
||||||
|
#
|
||||||
|
# Copyright 2017 Red Hat Inc.
|
||||||
|
# Copyright 2020 Linaro
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Robert Foley <robert.foley@linaro.org>
|
||||||
|
# Originally based on ubuntu.i386 Fam Zheng <famz@redhat.com>
|
||||||
|
#
|
||||||
|
# This code is licensed under the GPL version 2 or later. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import basevm
|
||||||
|
|
||||||
|
class UbuntuVM(basevm.BaseVM):
|
||||||
|
|
||||||
|
def __init__(self, args, config=None):
|
||||||
|
self.login_prompt = "ubuntu-{}-guest login:".format(self.arch)
|
||||||
|
basevm.BaseVM.__init__(self, args, config)
|
||||||
|
|
||||||
|
def build_image(self, img):
|
||||||
|
"""Build an Ubuntu VM image. The child class will
|
||||||
|
define the install_cmds to init the VM."""
|
||||||
|
os_img = self._download_with_cache(self.image_link,
|
||||||
|
sha256sum=self.image_sha256)
|
||||||
|
img_tmp = img + ".tmp"
|
||||||
|
subprocess.check_call(["cp", "-f", os_img, img_tmp])
|
||||||
|
self.exec_qemu_img("resize", img_tmp, "+50G")
|
||||||
|
ci_img = self.gen_cloud_init_iso()
|
||||||
|
|
||||||
|
self.boot(img_tmp, extra_args = [ "-device", "VGA", "-cdrom", ci_img, ])
|
||||||
|
|
||||||
|
# First command we issue is fix for slow ssh login.
|
||||||
|
self.wait_ssh(wait_root=True,
|
||||||
|
cmd="chmod -x /etc/update-motd.d/*")
|
||||||
|
# Wait for cloud init to finish
|
||||||
|
self.wait_ssh(wait_root=True,
|
||||||
|
cmd="ls /var/lib/cloud/instance/boot-finished")
|
||||||
|
self.ssh_root("touch /etc/cloud/cloud-init.disabled")
|
||||||
|
# Disable auto upgrades.
|
||||||
|
# We want to keep the VM system state stable.
|
||||||
|
self.ssh_root('sed -ie \'s/"1"/"0"/g\' '\
|
||||||
|
'/etc/apt/apt.conf.d/20auto-upgrades')
|
||||||
|
self.ssh_root("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list")
|
||||||
|
|
||||||
|
# If the user chooses not to do the install phase,
|
||||||
|
# then we will jump right to the graceful shutdown
|
||||||
|
if self._config['install_cmds'] != "":
|
||||||
|
# Issue the install commands.
|
||||||
|
# This can be overriden by the user in the config .yml.
|
||||||
|
install_cmds = self._config['install_cmds'].split(',')
|
||||||
|
for cmd in install_cmds:
|
||||||
|
self.ssh_root(cmd)
|
||||||
|
self.graceful_shutdown()
|
||||||
|
os.rename(img_tmp, img)
|
||||||
|
return 0
|
|
@ -52,8 +52,10 @@ typedef struct {
|
||||||
#endif
|
#endif
|
||||||
sigjmp_buf env;
|
sigjmp_buf env;
|
||||||
|
|
||||||
|
#ifdef CONFIG_TSAN
|
||||||
void *tsan_co_fiber;
|
void *tsan_co_fiber;
|
||||||
void *tsan_caller_fiber;
|
void *tsan_caller_fiber;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_VALGRIND_H
|
#ifdef CONFIG_VALGRIND_H
|
||||||
unsigned int valgrind_stack_id;
|
unsigned int valgrind_stack_id;
|
||||||
|
@ -77,7 +79,10 @@ union cc_arg {
|
||||||
int i[2];
|
int i[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it. */
|
/*
|
||||||
|
* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it.
|
||||||
|
* always_inline is required to avoid TSan runtime fatal errors.
|
||||||
|
*/
|
||||||
static inline __attribute__((always_inline))
|
static inline __attribute__((always_inline))
|
||||||
void on_new_fiber(CoroutineUContext *co)
|
void on_new_fiber(CoroutineUContext *co)
|
||||||
{
|
{
|
||||||
|
@ -87,6 +92,7 @@ void on_new_fiber(CoroutineUContext *co)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
||||||
static inline __attribute__((always_inline))
|
static inline __attribute__((always_inline))
|
||||||
void finish_switch_fiber(void *fake_stack_save)
|
void finish_switch_fiber(void *fake_stack_save)
|
||||||
{
|
{
|
||||||
|
@ -109,18 +115,29 @@ void finish_switch_fiber(void *fake_stack_save)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline __attribute__((always_inline)) void start_switch_fiber(
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
||||||
CoroutineAction action, void **fake_stack_save,
|
static inline __attribute__((always_inline))
|
||||||
const void *bottom, size_t size, void *new_fiber)
|
void start_switch_fiber_asan(CoroutineAction action, void **fake_stack_save,
|
||||||
|
const void *bottom, size_t size)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_ASAN
|
#ifdef CONFIG_ASAN
|
||||||
__sanitizer_start_switch_fiber(
|
__sanitizer_start_switch_fiber(
|
||||||
action == COROUTINE_TERMINATE ? NULL : fake_stack_save,
|
action == COROUTINE_TERMINATE ? NULL : fake_stack_save,
|
||||||
bottom, size);
|
bottom, size);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
||||||
|
static inline __attribute__((always_inline))
|
||||||
|
void start_switch_fiber_tsan(void **fake_stack_save,
|
||||||
|
CoroutineUContext *co,
|
||||||
|
bool caller)
|
||||||
|
{
|
||||||
#ifdef CONFIG_TSAN
|
#ifdef CONFIG_TSAN
|
||||||
void *curr_fiber =
|
void *new_fiber = caller ?
|
||||||
__tsan_get_current_fiber();
|
co->tsan_caller_fiber :
|
||||||
|
co->tsan_co_fiber;
|
||||||
|
void *curr_fiber = __tsan_get_current_fiber();
|
||||||
__tsan_acquire(curr_fiber);
|
__tsan_acquire(curr_fiber);
|
||||||
|
|
||||||
*fake_stack_save = curr_fiber;
|
*fake_stack_save = curr_fiber;
|
||||||
|
@ -144,12 +161,9 @@ static void coroutine_trampoline(int i0, int i1)
|
||||||
|
|
||||||
/* Initialize longjmp environment and switch back the caller */
|
/* Initialize longjmp environment and switch back the caller */
|
||||||
if (!sigsetjmp(self->env, 0)) {
|
if (!sigsetjmp(self->env, 0)) {
|
||||||
start_switch_fiber(
|
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, leader.stack,
|
||||||
COROUTINE_YIELD,
|
leader.stack_size);
|
||||||
&fake_stack_save,
|
start_switch_fiber_tsan(&fake_stack_save, self, true); /* true=caller */
|
||||||
leader.stack,
|
|
||||||
leader.stack_size,
|
|
||||||
self->tsan_caller_fiber);
|
|
||||||
siglongjmp(*(sigjmp_buf *)co->entry_arg, 1);
|
siglongjmp(*(sigjmp_buf *)co->entry_arg, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,10 +222,10 @@ Coroutine *qemu_coroutine_new(void)
|
||||||
|
|
||||||
/* swapcontext() in, siglongjmp() back out */
|
/* swapcontext() in, siglongjmp() back out */
|
||||||
if (!sigsetjmp(old_env, 0)) {
|
if (!sigsetjmp(old_env, 0)) {
|
||||||
start_switch_fiber(
|
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, co->stack,
|
||||||
COROUTINE_YIELD,
|
co->stack_size);
|
||||||
&fake_stack_save,
|
start_switch_fiber_tsan(&fake_stack_save,
|
||||||
co->stack, co->stack_size, co->tsan_co_fiber);
|
co, false); /* false=not caller */
|
||||||
|
|
||||||
#ifdef CONFIG_SAFESTACK
|
#ifdef CONFIG_SAFESTACK
|
||||||
/*
|
/*
|
||||||
|
@ -287,8 +301,10 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||||
|
|
||||||
ret = sigsetjmp(from->env, 0);
|
ret = sigsetjmp(from->env, 0);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
start_switch_fiber(action, &fake_stack_save,
|
start_switch_fiber_asan(action, &fake_stack_save, to->stack,
|
||||||
to->stack, to->stack_size, to->tsan_co_fiber);
|
to->stack_size);
|
||||||
|
start_switch_fiber_tsan(&fake_stack_save,
|
||||||
|
to, false); /* false=not caller */
|
||||||
siglongjmp(to->env, action);
|
siglongjmp(to->env, action);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -266,12 +266,6 @@ static struct {
|
||||||
{ "usb-redir", "hw-", "usb-redirect" },
|
{ "usb-redir", "hw-", "usb-redirect" },
|
||||||
{ "qxl-vga", "hw-", "display-qxl" },
|
{ "qxl-vga", "hw-", "display-qxl" },
|
||||||
{ "qxl", "hw-", "display-qxl" },
|
{ "qxl", "hw-", "display-qxl" },
|
||||||
{ "virtio-gpu-device", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "virtio-gpu-pci", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "virtio-vga", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "vhost-user-gpu-device", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "vhost-user-gpu-pci", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "vhost-user-vga", "hw-", "display-virtio-gpu" },
|
|
||||||
{ "chardev-braille", "chardev-", "baum" },
|
{ "chardev-braille", "chardev-", "baum" },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue