virtio,pc,pci: fixes, features, cleanups

CXL volatile memory support
 More memslots for vhost-user on x86 and ARM.
 vIOMMU support for vhost-vdpa
 pcie-to-pci bridge can now be compiled out
 MADT revision bumped to 3
 Fixes, cleanups all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmRniWoPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpN4MH/RqdvHmujrjvjzXbbN/gq87Njp+kQLKEooIE
 ZkqdNaVUE6vjCH8iU+chjsxt4VSquSjOL9CWWrYefEIeqCFLWsuXSAY0VDAbY67x
 +aes51tTYILVsx7fbb+T5mJKRgVuWW4C5KaGeQ1djSexy42nvplZUJdIJUhZr0t9
 dzzOsD+mezHS7Xu2QOzSfl5QQRuOVVJnjJXkqJG/yRvHrZM5aTolatr/X7jNGedm
 4oyMsVMaAcQ+dnEQigRJodf/MpFfs9DfNZAH55VwwQWsNT0t0ueD0xigR203jjaE
 mJJJipAqetFax2JjC7QMXWf+LR36BnL/0/xH+x/BWb0FI42wr0I=
 =ajmR
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: fixes, features, cleanups

CXL volatile memory support
More memslots for vhost-user on x86 and ARM.
vIOMMU support for vhost-vdpa
pcie-to-pci bridge can now be compiled out
MADT revision bumped to 3
Fixes, cleanups all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmRniWoPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpN4MH/RqdvHmujrjvjzXbbN/gq87Njp+kQLKEooIE
# ZkqdNaVUE6vjCH8iU+chjsxt4VSquSjOL9CWWrYefEIeqCFLWsuXSAY0VDAbY67x
# +aes51tTYILVsx7fbb+T5mJKRgVuWW4C5KaGeQ1djSexy42nvplZUJdIJUhZr0t9
# dzzOsD+mezHS7Xu2QOzSfl5QQRuOVVJnjJXkqJG/yRvHrZM5aTolatr/X7jNGedm
# 4oyMsVMaAcQ+dnEQigRJodf/MpFfs9DfNZAH55VwwQWsNT0t0ueD0xigR203jjaE
# mJJJipAqetFax2JjC7QMXWf+LR36BnL/0/xH+x/BWb0FI42wr0I=
# =ajmR
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 19 May 2023 07:36:26 AM PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (40 commits)
  hw/i386/pc: No need for rtc_state to be an out-parameter
  hw/i386/pc: Create RTC controllers in south bridges
  hw/cxl: Introduce cxl_device_get_timestamp() utility function
  hw/cxl: rename mailbox return code type from ret_code to CXLRetCode
  hw/pci-bridge: make building pcie-to-pci bridge configurable
  virtio-pci: add handling of PCI ATS and Device-TLB enable/disable
  hw/pci-host/pam: Make init_pam() usage more readable
  hw/i386/pc: Initialize ram_memory variable directly
  hw/i386/pc_{q35,piix}: Minimize usage of get_system_memory()
  hw/i386/pc_{q35,piix}: Reuse MachineClass::desc as SMB product name
  hw/i386/pc_q35: Reuse machine parameter
  hw/pci-host/q35: Inline sysbus_add_io()
  hw/pci-host/i440fx: Inline sysbus_add_io()
  vhost-vdpa: Add support for vIOMMU.
  vhost-vdpa: Add check for full 64-bit in region delete
  vhost_vdpa: fix the input in trace_vhost_vdpa_listener_region_del()
  vhost: expose function vhost_dev_has_iommu()
  virtio-crypto: fix NULL pointer dereference in virtio_crypto_free_request
  virtio-net: not enable vq reset feature unconditionally
  vhost-user: Remove acpi-specific memslot limit
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-19 12:17:16 -07:00
commit aa222a8e4f
57 changed files with 900 additions and 341 deletions

View File

@ -328,6 +328,14 @@ from Intel that was not properly allocated. Since version 5.2, the controller
has used a properly allocated identifier. Deprecate the ``use-intel-id`` has used a properly allocated identifier. Deprecate the ``use-intel-id``
machine compatibility parameter. machine compatibility parameter.
``-device cxl-type3,memdev=xxxx`` (since 8.0)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``cxl-type3`` device initially only used a single memory backend. With
the addition of volatile memory support, it is now necessary to distinguish
between persistent and volatile memory backends. As such, memdev is deprecated
in favor of persistent-memdev.
Block device options Block device options
'''''''''''''''''''' ''''''''''''''''''''

View File

@ -162,7 +162,7 @@ Example system Topology. x marks the match in each decoder level::
|<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->|
| __________ __________________________________ __________ | | __________ __________________________________ __________ |
| | | | | | | | | | | | | | | |
| | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 2 | |
| | HB0 only | | Configured to interleave memory | | HB1 only | | | | HB0 only | | Configured to interleave memory | | HB1 only | |
| | | | memory accesses across HB0/HB1 | | | | | | | | memory accesses across HB0/HB1 | | | |
| |__________| |_____x____________________________| |__________| | | |__________| |_____x____________________________| |__________| |
@ -208,8 +208,8 @@ Notes:
(1) **3 CXL Fixed Memory Windows (CFMW)** corresponding to different (1) **3 CXL Fixed Memory Windows (CFMW)** corresponding to different
ranges of the system physical address map. Each CFMW has ranges of the system physical address map. Each CFMW has
particular interleave setup across the CXL Host Bridges (HB) particular interleave setup across the CXL Host Bridges (HB)
CFMW0 provides uninterleaved access to HB0, CFW2 provides CFMW0 provides uninterleaved access to HB0, CFMW2 provides
uninterleaved access to HB1. CFW1 provides interleaved memory access uninterleaved access to HB1. CFMW1 provides interleaved memory access
across HB0 and HB1. across HB0 and HB1.
(2) **Two CXL Host Bridges**. Each of these has 2 CXL Root Ports and (2) **Two CXL Host Bridges**. Each of these has 2 CXL Root Ports and
@ -247,7 +247,7 @@ Example topology involving a switch::
|<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->|
| __________ __________________________________ __________ | | __________ __________________________________ __________ |
| | | | | | | | | | | | | | | |
| | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 2 | |
| | HB0 only | | Configured to interleave memory | | HB1 only | | | | HB0 only | | Configured to interleave memory | | HB1 only | |
| | | | memory accesses across HB0/HB1 | | | | | | | | memory accesses across HB0/HB1 | | | |
| |____x_____| |__________________________________| |__________| | | |____x_____| |__________________________________| |__________| |
@ -300,22 +300,43 @@ Example topology involving a switch::
Example command lines Example command lines
--------------------- ---------------------
A very simple setup with just one directly attached CXL Type 3 device:: A very simple setup with just one directly attached CXL Type 3 Persistent Memory device::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \
... ...
-object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \
-object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \ -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
-device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
A very simple setup with just one directly attached CXL Type 3 Volatile Memory device::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \
...
-object memory-backend-ram,id=vmem0,share=on,size=256M \
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
-device cxl-type3,bus=root_port13,volatile-memdev=vmem0,id=cxl-vmem0 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
The same volatile setup may optionally include an LSA region::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \
...
-object memory-backend-ram,id=vmem0,share=on,size=256M \
-object memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa.raw,size=256M \
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
-device cxl-type3,bus=root_port13,volatile-memdev=vmem0,lsa=cxl-lsa0,id=cxl-vmem0 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
A setup suitable for 4 way interleave. Only one fixed window provided, to enable 2 way A setup suitable for 4 way interleave. Only one fixed window provided, to enable 2 way
interleave across 2 CXL host bridges. Each host bridge has 2 CXL Root Ports, with interleave across 2 CXL host bridges. Each host bridge has 2 CXL Root Ports, with
the CXL Type3 device directly attached (no switches).:: the CXL Type3 device directly attached (no switches).::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \
... ...
-object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \
-object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \ -object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \
@ -328,18 +349,18 @@ the CXL Type3 device directly attached (no switches).::
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \ -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
-device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \
-device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \ -device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \
-device cxl-type3,bus=root_port14,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \ -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \
-device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \ -device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \
-device cxl-type3,bus=root_port15,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \ -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \
-device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \ -device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \
-device cxl-type3,bus=root_port16,memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k
An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \
... ...
-object memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M \
-object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M \ -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M \
@ -354,15 +375,23 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
-device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \ -device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \
-device cxl-upstream,bus=root_port0,id=us0 \ -device cxl-upstream,bus=root_port0,id=us0 \
-device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \
-device cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,size=256M \ -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \
-device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \
-device cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,size=256M \ -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \
-device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \
-device cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,size=256M \ -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \
-device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \
-device cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,size=256M \ -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k
Deprecations
------------
The Type 3 device [memdev] attribute has been deprecated in favor of the
[persistent-memdev] attributes. [memdev] will default to a persistent memory
device for backward compatibility and is incapable of being used in combination
with [persistent-memdev].
Kernel Configuration Options Kernel Configuration Options
---------------------------- ----------------------------

View File

@ -48,6 +48,7 @@ GlobalProperty hw_compat_7_2[] = {
{ "e1000e", "migrate-timadj", "off" }, { "e1000e", "migrate-timadj", "off" },
{ "virtio-mem", "x-early-migration", "false" }, { "virtio-mem", "x-early-migration", "false" },
{ "migration", "x-preempt-pre-7-2", "true" }, { "migration", "x-preempt-pre-7-2", "true" },
{ TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" },
}; };
const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2);

View File

@ -108,31 +108,21 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp)
static void ct3_load_cdat(CDATObject *cdat, Error **errp) static void ct3_load_cdat(CDATObject *cdat, Error **errp)
{ {
g_autofree CDATEntry *cdat_st = NULL; g_autofree CDATEntry *cdat_st = NULL;
g_autofree char *buf = NULL;
uint8_t sum = 0; uint8_t sum = 0;
int num_ent; int num_ent;
int i = 0, ent = 1, file_size = 0; int i = 0, ent = 1;
gsize file_size = 0;
CDATSubHeader *hdr; CDATSubHeader *hdr;
FILE *fp = NULL; GError *error = NULL;
/* Read CDAT file and create its cache */ /* Read CDAT file and create its cache */
fp = fopen(cdat->filename, "r"); if (!g_file_get_contents(cdat->filename, (gchar **)&buf,
if (!fp) { &file_size, &error)) {
error_setg(errp, "CDAT: Unable to open file"); error_setg(errp, "CDAT: File read failed: %s", error->message);
g_error_free(error);
return; return;
} }
fseek(fp, 0, SEEK_END);
file_size = ftell(fp);
fseek(fp, 0, SEEK_SET);
cdat->buf = g_malloc0(file_size);
if (fread(cdat->buf, file_size, 1, fp) == 0) {
error_setg(errp, "CDAT: File read failed");
return;
}
fclose(fp);
if (file_size < sizeof(CDATTableHeader)) { if (file_size < sizeof(CDATTableHeader)) {
error_setg(errp, "CDAT: File too short"); error_setg(errp, "CDAT: File too short");
return; return;
@ -140,9 +130,17 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp)
i = sizeof(CDATTableHeader); i = sizeof(CDATTableHeader);
num_ent = 1; num_ent = 1;
while (i < file_size) { while (i < file_size) {
hdr = (CDATSubHeader *)(cdat->buf + i); hdr = (CDATSubHeader *)(buf + i);
if (i + sizeof(CDATSubHeader) > file_size) {
error_setg(errp, "CDAT: Truncated table");
return;
}
cdat_len_check(hdr, errp); cdat_len_check(hdr, errp);
i += hdr->length; i += hdr->length;
if (i > file_size) {
error_setg(errp, "CDAT: Truncated table");
return;
}
num_ent++; num_ent++;
} }
if (i != file_size) { if (i != file_size) {
@ -150,33 +148,26 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp)
return; return;
} }
cdat_st = g_malloc0(sizeof(*cdat_st) * num_ent); cdat_st = g_new0(CDATEntry, num_ent);
if (!cdat_st) {
error_setg(errp, "CDAT: Failed to allocate entry array");
return;
}
/* Set CDAT header, Entry = 0 */ /* Set CDAT header, Entry = 0 */
cdat_st[0].base = cdat->buf; cdat_st[0].base = buf;
cdat_st[0].length = sizeof(CDATTableHeader); cdat_st[0].length = sizeof(CDATTableHeader);
i = 0; i = 0;
while (i < cdat_st[0].length) { while (i < cdat_st[0].length) {
sum += cdat->buf[i++]; sum += buf[i++];
} }
/* Read CDAT structures */ /* Read CDAT structures */
while (i < file_size) { while (i < file_size) {
hdr = (CDATSubHeader *)(cdat->buf + i); hdr = (CDATSubHeader *)(buf + i);
cdat_len_check(hdr, errp);
cdat_st[ent].base = hdr; cdat_st[ent].base = hdr;
cdat_st[ent].length = hdr->length; cdat_st[ent].length = hdr->length;
while (cdat->buf + i < while (buf + i < (char *)cdat_st[ent].base + cdat_st[ent].length) {
(uint8_t *)cdat_st[ent].base + cdat_st[ent].length) {
assert(i < file_size); assert(i < file_size);
sum += cdat->buf[i++]; sum += buf[i++];
} }
ent++; ent++;
@ -187,6 +178,7 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp)
} }
cdat->entry_len = num_ent; cdat->entry_len = num_ent;
cdat->entry = g_steal_pointer(&cdat_st); cdat->entry = g_steal_pointer(&cdat_st);
cdat->buf = g_steal_pointer(&buf);
} }
void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp) void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp)
@ -218,7 +210,5 @@ void cxl_doe_cdat_release(CXLComponentState *cxl_cstate)
cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len, cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len,
cdat->private); cdat->private);
} }
if (cdat->buf) { g_free(cdat->buf);
free(cdat->buf);
}
} }

View File

@ -38,23 +38,25 @@ static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
ComponentRegisters *cregs = &cxl_cstate->crb; ComponentRegisters *cregs = &cxl_cstate->crb;
uint32_t *cache_mem = cregs->cache_mem_registers; uint32_t *cache_mem = cregs->cache_mem_registers;
bool should_commit = false; bool should_commit = false;
bool should_uncommit = false;
switch (offset) { switch (offset) {
case A_CXL_HDM_DECODER0_CTRL: case A_CXL_HDM_DECODER0_CTRL:
should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
should_uncommit = !should_commit;
break; break;
default: default:
break; break;
} }
memory_region_transaction_begin();
stl_le_p((uint8_t *)cache_mem + offset, value);
if (should_commit) { if (should_commit) {
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); } else if (should_uncommit) {
value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
} }
memory_region_transaction_commit(); stl_le_p((uint8_t *)cache_mem + offset, value);
} }
static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,

View File

@ -269,3 +269,18 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
cxl_initialize_mailbox(cxl_dstate); cxl_initialize_mailbox(cxl_dstate);
} }
uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
{
uint64_t time, delta;
uint64_t final_time = 0;
if (cxl_dstate->timestamp.set) {
/* Find the delta from the last time the host set the time. */
time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delta = time - cxl_dstate->timestamp.last_set;
final_time = cxl_dstate->timestamp.host_set + delta;
}
return final_time;
}

View File

@ -23,7 +23,7 @@
* FOO = 0x7f, * FOO = 0x7f,
* #define BAR 0 * #define BAR 0
* 2. Implement the handler * 2. Implement the handler
* static ret_code cmd_foo_bar(struct cxl_cmd *cmd, * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
* CXLDeviceState *cxl_dstate, uint16_t *len) * CXLDeviceState *cxl_dstate, uint16_t *len)
* 3. Add the command to the cxl_cmd_set[][] * 3. Add the command to the cxl_cmd_set[][]
* [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
@ -90,10 +90,10 @@ typedef enum {
CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
CXL_MBOX_MAX = 0x17 CXL_MBOX_MAX = 0x17
} ret_code; } CXLRetCode;
struct cxl_cmd; struct cxl_cmd;
typedef ret_code (*opcode_handler)(struct cxl_cmd *cmd, typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, uint16_t *len); CXLDeviceState *cxl_dstate, uint16_t *len);
struct cxl_cmd { struct cxl_cmd {
const char *name; const char *name;
@ -105,16 +105,16 @@ struct cxl_cmd {
#define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \ #define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \
uint16_t __zero##name = size; \ uint16_t __zero##name = size; \
static ret_code cmd_##name(struct cxl_cmd *cmd, \ static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
CXLDeviceState *cxl_dstate, uint16_t *len) \ CXLDeviceState *cxl_dstate, uint16_t *len) \
{ \ { \
*len = __zero##name; \ *len = __zero##name; \
memset(cmd->payload, 0, *len); \ memset(cmd->payload, 0, *len); \
return CXL_MBOX_SUCCESS; \ return CXL_MBOX_SUCCESS; \
} }
#define DEFINE_MAILBOX_HANDLER_NOP(name) \ #define DEFINE_MAILBOX_HANDLER_NOP(name) \
static ret_code cmd_##name(struct cxl_cmd *cmd, \ static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
CXLDeviceState *cxl_dstate, uint16_t *len) \ CXLDeviceState *cxl_dstate, uint16_t *len) \
{ \ { \
return CXL_MBOX_SUCCESS; \ return CXL_MBOX_SUCCESS; \
} }
@ -125,9 +125,9 @@ DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4);
DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy); DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy);
/* 8.2.9.2.1 */ /* 8.2.9.2.1 */
static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
uint8_t slots_supported; uint8_t slots_supported;
@ -141,7 +141,8 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd,
} QEMU_PACKED *fw_info; } QEMU_PACKED *fw_info;
QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
if (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) { if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
(cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
return CXL_MBOX_INTERNAL_ERROR; return CXL_MBOX_INTERNAL_ERROR;
} }
@ -158,21 +159,12 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd,
} }
/* 8.2.9.3.1 */ /* 8.2.9.3.1 */
static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
uint64_t time, delta; uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
uint64_t final_time = 0;
if (cxl_dstate->timestamp.set) {
/* First find the delta from the last time the host set the time. */
time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delta = time - cxl_dstate->timestamp.last_set;
final_time = cxl_dstate->timestamp.host_set + delta;
}
/* Then adjust the actual time */
stq_le_p(cmd->payload, final_time); stq_le_p(cmd->payload, final_time);
*len = 8; *len = 8;
@ -180,7 +172,7 @@ static ret_code cmd_timestamp_get(struct cxl_cmd *cmd,
} }
/* 8.2.9.3.2 */ /* 8.2.9.3.2 */
static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
@ -200,9 +192,9 @@ static const QemuUUID cel_uuid = {
}; };
/* 8.2.9.4.1 */ /* 8.2.9.4.1 */
static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
uint16_t entries; uint16_t entries;
@ -223,9 +215,9 @@ static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd,
} }
/* 8.2.9.4.2 */ /* 8.2.9.4.2 */
static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
QemuUUID uuid; QemuUUID uuid;
@ -264,9 +256,9 @@ static ret_code cmd_logs_get_log(struct cxl_cmd *cmd,
} }
/* 8.2.9.5.1.1 */ /* 8.2.9.5.1.1 */
static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
char fw_revision[0x10]; char fw_revision[0x10];
@ -288,29 +280,29 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd,
CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
uint64_t size = cxl_dstate->pmem_size;
if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
(!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
return CXL_MBOX_INTERNAL_ERROR; return CXL_MBOX_INTERNAL_ERROR;
} }
id = (void *)cmd->payload; id = (void *)cmd->payload;
memset(id, 0, sizeof(*id)); memset(id, 0, sizeof(*id));
/* PMEM only */
snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
id->total_capacity = size / CXL_CAPACITY_MULTIPLIER; stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
id->persistent_capacity = size / CXL_CAPACITY_MULTIPLIER; stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
id->lsa_size = cvc->get_lsa_size(ct3d); stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
*len = sizeof(*id); *len = sizeof(*id);
return CXL_MBOX_SUCCESS; return CXL_MBOX_SUCCESS;
} }
static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
uint64_t active_vmem; uint64_t active_vmem;
@ -319,25 +311,28 @@ static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd,
uint64_t next_pmem; uint64_t next_pmem;
} QEMU_PACKED *part_info = (void *)cmd->payload; } QEMU_PACKED *part_info = (void *)cmd->payload;
QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
uint64_t size = cxl_dstate->pmem_size;
if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
(!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
return CXL_MBOX_INTERNAL_ERROR; return CXL_MBOX_INTERNAL_ERROR;
} }
/* PMEM only */ stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
part_info->active_vmem = 0; /*
part_info->next_vmem = 0; * When both next_vmem and next_pmem are 0, there is no pending change to
part_info->active_pmem = size / CXL_CAPACITY_MULTIPLIER; * partitioning.
part_info->next_pmem = 0; */
stq_le_p(&part_info->next_vmem, 0);
stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
stq_le_p(&part_info->next_pmem, 0);
*len = sizeof(*part_info); *len = sizeof(*part_info);
return CXL_MBOX_SUCCESS; return CXL_MBOX_SUCCESS;
} }
static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct { struct {
uint32_t offset; uint32_t offset;
@ -360,9 +355,9 @@ static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS; return CXL_MBOX_SUCCESS;
} }
static ret_code cmd_ccls_set_lsa(struct cxl_cmd *cmd, static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate, CXLDeviceState *cxl_dstate,
uint16_t *len) uint16_t *len)
{ {
struct set_lsa_pl { struct set_lsa_pl {
uint32_t offset; uint32_t offset;

View File

@ -102,7 +102,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker,
MachineClass *mc = MACHINE_GET_CLASS(x86ms); MachineClass *mc = MACHINE_GET_CLASS(x86ms);
const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms));
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev);
AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = oem_id,
.oem_table_id = oem_table_id }; .oem_table_id = oem_table_id };
acpi_table_begin(&table, table_data); acpi_table_begin(&table, table_data);

View File

@ -116,7 +116,9 @@
{ "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
{ "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },
GlobalProperty pc_compat_8_0[] = {}; GlobalProperty pc_compat_8_0[] = {
{ "virtio-mem", "unplugged-inaccessible", "auto" },
};
const size_t pc_compat_8_0_len = G_N_ELEMENTS(pc_compat_8_0); const size_t pc_compat_8_0_len = G_N_ELEMENTS(pc_compat_8_0);
GlobalProperty pc_compat_7_2[] = { GlobalProperty pc_compat_7_2[] = {
@ -950,7 +952,6 @@ static hwaddr pc_max_used_gpa(PCMachineState *pcms, uint64_t pci_hole64_size)
void pc_memory_init(PCMachineState *pcms, void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory, MemoryRegion *system_memory,
MemoryRegion *rom_memory, MemoryRegion *rom_memory,
MemoryRegion **ram_memory,
uint64_t pci_hole64_size) uint64_t pci_hole64_size)
{ {
int linux_boot, i; int linux_boot, i;
@ -1008,7 +1009,6 @@ void pc_memory_init(PCMachineState *pcms,
* Split single memory region and use aliases to address portions of it, * Split single memory region and use aliases to address portions of it,
* done for backwards compatibility with older qemus. * done for backwards compatibility with older qemus.
*/ */
*ram_memory = machine->ram;
ram_below_4g = g_malloc(sizeof(*ram_below_4g)); ram_below_4g = g_malloc(sizeof(*ram_below_4g));
memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram, memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram,
0, x86ms->below_4g_mem_size); 0, x86ms->below_4g_mem_size);
@ -1265,7 +1265,7 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl,
void pc_basic_device_init(struct PCMachineState *pcms, void pc_basic_device_init(struct PCMachineState *pcms,
ISABus *isa_bus, qemu_irq *gsi, ISABus *isa_bus, qemu_irq *gsi,
ISADevice **rtc_state, ISADevice *rtc_state,
bool create_fdctrl, bool create_fdctrl,
uint32_t hpet_irqs) uint32_t hpet_irqs)
{ {
@ -1318,7 +1318,17 @@ void pc_basic_device_init(struct PCMachineState *pcms,
pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT);
rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT);
} }
*rtc_state = ISA_DEVICE(mc146818_rtc_init(isa_bus, 2000, rtc_irq));
if (rtc_irq) {
qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq);
} else {
uint32_t irq = object_property_get_uint(OBJECT(rtc_state),
"irq",
&error_fatal);
isa_connect_gpio_out(rtc_state, 0, irq);
}
object_property_add_alias(OBJECT(pcms), "rtc-time", OBJECT(rtc_state),
"date");
#ifdef CONFIG_XEN_EMU #ifdef CONFIG_XEN_EMU
if (xen_mode == XEN_EMULATE) { if (xen_mode == XEN_EMULATE) {
@ -1331,7 +1341,7 @@ void pc_basic_device_init(struct PCMachineState *pcms,
} }
#endif #endif
qemu_register_boot_set(pc_boot_set, *rtc_state); qemu_register_boot_set(pc_boot_set, rtc_state);
if (!xen_enabled() && if (!xen_enabled() &&
(x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) {

View File

@ -32,6 +32,7 @@
#include "hw/i386/pc.h" #include "hw/i386/pc.h"
#include "hw/i386/apic.h" #include "hw/i386/apic.h"
#include "hw/pci-host/i440fx.h" #include "hw/pci-host/i440fx.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/southbridge/piix.h" #include "hw/southbridge/piix.h"
#include "hw/display/ramfb.h" #include "hw/display/ramfb.h"
#include "hw/firmware/smbios.h" #include "hw/firmware/smbios.h"
@ -144,6 +145,7 @@ static void pc_init1(MachineState *machine,
if (xen_enabled()) { if (xen_enabled()) {
xen_hvm_init_pc(pcms, &ram_memory); xen_hvm_init_pc(pcms, &ram_memory);
} else { } else {
ram_memory = machine->ram;
if (!pcms->max_ram_below_4g) { if (!pcms->max_ram_below_4g) {
pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */
} }
@ -198,7 +200,7 @@ static void pc_init1(MachineState *machine,
if (pcmc->smbios_defaults) { if (pcmc->smbios_defaults) {
MachineClass *mc = MACHINE_GET_CLASS(machine); MachineClass *mc = MACHINE_GET_CLASS(machine);
/* These values are guest ABI, do not change */ /* These values are guest ABI, do not change */
smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", smbios_set_defaults("QEMU", mc->desc,
mc->name, pcmc->smbios_legacy_mode, mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded, pcmc->smbios_uuid_encoded,
pcms->smbios_entry_point_type); pcms->smbios_entry_point_type);
@ -206,8 +208,7 @@ static void pc_init1(MachineState *machine,
/* allocate ram and load rom/bios */ /* allocate ram and load rom/bios */
if (!xen_enabled()) { if (!xen_enabled()) {
pc_memory_init(pcms, system_memory, pc_memory_init(pcms, system_memory, rom_memory, hole64_size);
rom_memory, &ram_memory, hole64_size);
} else { } else {
pc_system_flash_cleanup_unused(pcms); pc_system_flash_cleanup_unused(pcms);
if (machine->kernel_filename != NULL) { if (machine->kernel_filename != NULL) {
@ -240,10 +241,17 @@ static void pc_init1(MachineState *machine,
piix3->pic = x86ms->gsi; piix3->pic = x86ms->gsi;
piix3_devfn = piix3->dev.devfn; piix3_devfn = piix3->dev.devfn;
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0"));
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev),
"rtc"));
} else { } else {
pci_bus = NULL; pci_bus = NULL;
isa_bus = isa_bus_new(NULL, get_system_memory(), system_io, isa_bus = isa_bus_new(NULL, system_memory, system_io,
&error_abort); &error_abort);
rtc_state = isa_new(TYPE_MC146818_RTC);
qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000);
isa_realize_and_unref(rtc_state, isa_bus, &error_fatal);
i8257_dma_init(isa_bus, 0); i8257_dma_init(isa_bus, 0);
pcms->hpet_enabled = false; pcms->hpet_enabled = false;
} }
@ -269,7 +277,7 @@ static void pc_init1(MachineState *machine,
} }
/* init basic PC hardware */ /* init basic PC hardware */
pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, true, pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, true,
0x4); 0x4);
pc_nic_init(pcmc, isa_bus, pci_bus); pc_nic_init(pcmc, isa_bus, pci_bus);

View File

@ -126,10 +126,10 @@ static void pc_q35_init(MachineState *machine)
DeviceState *lpc_dev; DeviceState *lpc_dev;
BusState *idebus[MAX_SATA_PORTS]; BusState *idebus[MAX_SATA_PORTS];
ISADevice *rtc_state; ISADevice *rtc_state;
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *system_io = get_system_io(); MemoryRegion *system_io = get_system_io();
MemoryRegion *pci_memory; MemoryRegion *pci_memory;
MemoryRegion *rom_memory; MemoryRegion *rom_memory;
MemoryRegion *ram_memory;
GSIState *gsi_state; GSIState *gsi_state;
ISABus *isa_bus; ISABus *isa_bus;
int i; int i;
@ -192,14 +192,14 @@ static void pc_q35_init(MachineState *machine)
rom_memory = pci_memory; rom_memory = pci_memory;
} else { } else {
pci_memory = NULL; pci_memory = NULL;
rom_memory = get_system_memory(); rom_memory = system_memory;
} }
pc_guest_info_init(pcms); pc_guest_info_init(pcms);
if (pcmc->smbios_defaults) { if (pcmc->smbios_defaults) {
/* These values are guest ABI, do not change */ /* These values are guest ABI, do not change */
smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)", smbios_set_defaults("QEMU", mc->desc,
mc->name, pcmc->smbios_legacy_mode, mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded, pcmc->smbios_uuid_encoded,
pcms->smbios_entry_point_type); pcms->smbios_entry_point_type);
@ -215,16 +215,15 @@ static void pc_q35_init(MachineState *machine)
} }
/* allocate ram and load rom/bios */ /* allocate ram and load rom/bios */
pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory, pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size);
pci_hole64_size);
object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host)); object_property_add_child(OBJECT(machine), "q35", OBJECT(q35_host));
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM, object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM,
OBJECT(ram_memory), NULL); OBJECT(machine->ram), NULL);
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM, object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM,
OBJECT(pci_memory), NULL); OBJECT(pci_memory), NULL);
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM, object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM,
OBJECT(get_system_memory()), NULL); OBJECT(system_memory), NULL);
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM, object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM,
OBJECT(system_io), NULL); OBJECT(system_io), NULL);
object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE, object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE,
@ -242,6 +241,8 @@ static void pc_q35_init(MachineState *machine)
x86_machine_is_smm_enabled(x86ms)); x86_machine_is_smm_enabled(x86ms));
pci_realize_and_unref(lpc, host_bus, &error_fatal); pci_realize_and_unref(lpc, host_bus, &error_fatal);
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc"));
object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP,
TYPE_HOTPLUG_HANDLER, TYPE_HOTPLUG_HANDLER,
(Object **)&x86ms->acpi_dev, (Object **)&x86ms->acpi_dev,
@ -291,7 +292,7 @@ static void pc_q35_init(MachineState *machine)
} }
/* init basic PC hardware */ /* init basic PC hardware */
pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy, pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, !mc->no_floppy,
0xff0104); 0xff0104);
if (pcms->sata_enabled) { if (pcms->sata_enabled) {

View File

@ -35,6 +35,7 @@ config PIIX3
bool bool
select I8257 select I8257
select ISA_BUS select ISA_BUS
select MC146818RTC
config PIIX4 config PIIX4
bool bool
@ -79,3 +80,4 @@ config LPC_ICH9
select I8257 select I8257
select ISA_BUS select ISA_BUS
select ACPI_ICH9 select ACPI_ICH9
select MC146818RTC

View File

@ -658,6 +658,8 @@ static void ich9_lpc_initfn(Object *obj)
static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE; static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE;
static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE; static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE;
object_initialize_child(obj, "rtc", &lpc->rtc, TYPE_MC146818_RTC);
object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT, object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT,
&lpc->sci_gsi, OBJ_PROP_FLAG_READ); &lpc->sci_gsi, OBJ_PROP_FLAG_READ);
object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD, object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD,
@ -723,6 +725,12 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
i8257_dma_init(isa_bus, 0); i8257_dma_init(isa_bus, 0);
/* RTC */
qdev_prop_set_int32(DEVICE(&lpc->rtc), "base_year", 2000);
if (!qdev_realize(DEVICE(&lpc->rtc), BUS(isa_bus), errp)) {
return;
}
pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS); pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS);
pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq); pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq);
pci_bus_set_route_irq_fn(pci_bus, ich9_route_intx_pin_to_irq); pci_bus_set_route_irq_fn(pci_bus, ich9_route_intx_pin_to_irq);

View File

@ -28,6 +28,7 @@
#include "hw/dma/i8257.h" #include "hw/dma/i8257.h"
#include "hw/southbridge/piix.h" #include "hw/southbridge/piix.h"
#include "hw/irq.h" #include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/isa/isa.h" #include "hw/isa/isa.h"
#include "hw/xen/xen.h" #include "hw/xen/xen.h"
#include "sysemu/runstate.h" #include "sysemu/runstate.h"
@ -301,6 +302,12 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
PIIX_RCR_IOPORT, &d->rcr_mem, 1); PIIX_RCR_IOPORT, &d->rcr_mem, 1);
i8257_dma_init(isa_bus, 0); i8257_dma_init(isa_bus, 0);
/* RTC */
qdev_prop_set_int32(DEVICE(&d->rtc), "base_year", 2000);
if (!qdev_realize(DEVICE(&d->rtc), BUS(isa_bus), errp)) {
return;
}
} }
static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
@ -324,6 +331,13 @@ static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
qbus_build_aml(bus, scope); qbus_build_aml(bus, scope);
} }
static void pci_piix3_init(Object *obj)
{
PIIX3State *d = PIIX3_PCI_DEVICE(obj);
object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC);
}
static void pci_piix3_class_init(ObjectClass *klass, void *data) static void pci_piix3_class_init(ObjectClass *klass, void *data)
{ {
DeviceClass *dc = DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass);
@ -350,6 +364,7 @@ static const TypeInfo piix3_pci_type_info = {
.name = TYPE_PIIX3_PCI_DEVICE, .name = TYPE_PIIX3_PCI_DEVICE,
.parent = TYPE_PCI_DEVICE, .parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PIIX3State), .instance_size = sizeof(PIIX3State),
.instance_init = pci_piix3_init,
.abstract = true, .abstract = true,
.class_init = pci_piix3_class_init, .class_init = pci_piix3_class_init,
.interfaces = (InterfaceInfo[]) { .interfaces = (InterfaceInfo[]) {

View File

@ -31,7 +31,8 @@ enum {
}; };
static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
int dsmad_handle, MemoryRegion *mr) int dsmad_handle, MemoryRegion *mr,
bool is_pmem, uint64_t dpa_base)
{ {
g_autofree CDATDsmas *dsmas = NULL; g_autofree CDATDsmas *dsmas = NULL;
g_autofree CDATDslbis *dslbis0 = NULL; g_autofree CDATDslbis *dslbis0 = NULL;
@ -50,9 +51,9 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
.length = sizeof(*dsmas), .length = sizeof(*dsmas),
}, },
.DSMADhandle = dsmad_handle, .DSMADhandle = dsmad_handle,
.flags = CDAT_DSMAS_FLAG_NV, .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
.DPA_base = 0, .DPA_base = dpa_base,
.DPA_length = int128_get64(mr->size), .DPA_length = memory_region_size(mr),
}; };
/* For now, no memory side cache, plausiblish numbers */ /* For now, no memory side cache, plausiblish numbers */
@ -130,10 +131,13 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
.length = sizeof(*dsemts), .length = sizeof(*dsemts),
}, },
.DSMAS_handle = dsmad_handle, .DSMAS_handle = dsmad_handle,
/* Reserved - the non volatile from DSMAS matters */ /*
.EFI_memory_type_attr = 2, * NV: Reserved - the non volatile from DSMAS matters
* V: EFI_MEMORY_SP
*/
.EFI_memory_type_attr = is_pmem ? 2 : 1,
.DPA_offset = 0, .DPA_offset = 0,
.DPA_length = int128_get64(mr->size), .DPA_length = memory_region_size(mr),
}; };
/* Header always at start of structure */ /* Header always at start of structure */
@ -150,33 +154,68 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
{ {
g_autofree CDATSubHeader **table = NULL; g_autofree CDATSubHeader **table = NULL;
MemoryRegion *nonvolatile_mr;
CXLType3Dev *ct3d = priv; CXLType3Dev *ct3d = priv;
MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
int dsmad_handle = 0; int dsmad_handle = 0;
int rc; int cur_ent = 0;
int len = 0;
int rc, i;
if (!ct3d->hostmem) { if (!ct3d->hostpmem && !ct3d->hostvmem) {
return 0; return 0;
} }
nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem); if (ct3d->hostvmem) {
if (!nonvolatile_mr) { volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
return -EINVAL; if (!volatile_mr) {
return -EINVAL;
}
len += CT3_CDAT_NUM_ENTRIES;
} }
table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table)); if (ct3d->hostpmem) {
nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
if (!nonvolatile_mr) {
return -EINVAL;
}
len += CT3_CDAT_NUM_ENTRIES;
}
table = g_malloc0(len * sizeof(*table));
if (!table) { if (!table) {
return -ENOMEM; return -ENOMEM;
} }
rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr); /* Now fill them in */
if (rc < 0) { if (volatile_mr) {
return rc; rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
false, 0);
if (rc < 0) {
return rc;
}
cur_ent = CT3_CDAT_NUM_ENTRIES;
} }
if (nonvolatile_mr) {
rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
nonvolatile_mr, true,
(volatile_mr ?
memory_region_size(volatile_mr) : 0));
if (rc < 0) {
goto error_cleanup;
}
cur_ent += CT3_CDAT_NUM_ENTRIES;
}
assert(len == cur_ent);
*cdat_table = g_steal_pointer(&table); *cdat_table = g_steal_pointer(&table);
return CT3_CDAT_NUM_ENTRIES; return len;
error_cleanup:
for (i = 0; i < cur_ent; i++) {
g_free(table[i]);
}
return rc;
} }
static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
@ -264,16 +303,42 @@ static void build_dvsecs(CXLType3Dev *ct3d)
{ {
CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
uint8_t *dvsec; uint8_t *dvsec;
uint32_t range1_size_hi, range1_size_lo,
range1_base_hi = 0, range1_base_lo = 0,
range2_size_hi = 0, range2_size_lo = 0,
range2_base_hi = 0, range2_base_lo = 0;
/*
* Volatile memory is mapped as (0x0)
* Persistent memory is mapped at (volatile->size)
*/
if (ct3d->hostvmem) {
range1_size_hi = ct3d->hostvmem->size >> 32;
range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostvmem->size & 0xF0000000);
if (ct3d->hostpmem) {
range2_size_hi = ct3d->hostpmem->size >> 32;
range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
}
} else {
range1_size_hi = ct3d->hostpmem->size >> 32;
range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
}
dvsec = (uint8_t *)&(CXLDVSECDevice){ dvsec = (uint8_t *)&(CXLDVSECDevice){
.cap = 0x1e, .cap = 0x1e,
.ctrl = 0x2, .ctrl = 0x2,
.status2 = 0x2, .status2 = 0x2,
.range1_size_hi = ct3d->hostmem->size >> 32, .range1_size_hi = range1_size_hi,
.range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | .range1_size_lo = range1_size_lo,
(ct3d->hostmem->size & 0xF0000000), .range1_base_hi = range1_base_hi,
.range1_base_hi = 0, .range1_base_lo = range1_base_lo,
.range1_base_lo = 0, .range2_size_hi = range2_size_hi,
.range2_size_lo = range2_size_lo,
.range2_base_hi = range2_base_hi,
.range2_base_lo = range2_base_lo,
}; };
cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
PCIE_CXL_DEVICE_DVSEC_LENGTH, PCIE_CXL_DEVICE_DVSEC_LENGTH,
@ -314,14 +379,32 @@ static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
{ {
ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
uint32_t *cache_mem = cregs->cache_mem_registers; uint32_t *cache_mem = cregs->cache_mem_registers;
uint32_t ctrl;
assert(which == 0); assert(which == 0);
ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
/* TODO: Sanity checks that the decoder is possible */ /* TODO: Sanity checks that the decoder is possible */
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
}
static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
{
ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
uint32_t *cache_mem = cregs->cache_mem_registers;
uint32_t ctrl;
assert(which == 0);
ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
} }
static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err) static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
@ -392,6 +475,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
uint32_t *cache_mem = cregs->cache_mem_registers; uint32_t *cache_mem = cregs->cache_mem_registers;
bool should_commit = false; bool should_commit = false;
bool should_uncommit = false;
int which_hdm = -1; int which_hdm = -1;
assert(size == 4); assert(size == 4);
@ -400,6 +484,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
switch (offset) { switch (offset) {
case A_CXL_HDM_DECODER0_CTRL: case A_CXL_HDM_DECODER0_CTRL:
should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
should_uncommit = !should_commit;
which_hdm = 0; which_hdm = 0;
break; break;
case A_CXL_RAS_UNC_ERR_STATUS: case A_CXL_RAS_UNC_ERR_STATUS:
@ -486,42 +571,77 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
stl_le_p((uint8_t *)cache_mem + offset, value); stl_le_p((uint8_t *)cache_mem + offset, value);
if (should_commit) { if (should_commit) {
hdm_decoder_commit(ct3d, which_hdm); hdm_decoder_commit(ct3d, which_hdm);
} else if (should_uncommit) {
hdm_decoder_uncommit(ct3d, which_hdm);
} }
} }
static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
{ {
DeviceState *ds = DEVICE(ct3d); DeviceState *ds = DEVICE(ct3d);
MemoryRegion *mr;
char *name;
if (!ct3d->hostmem) { if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
error_setg(errp, "memdev property must be set"); error_setg(errp, "at least one memdev property must be set");
return false;
} else if (ct3d->hostmem && ct3d->hostpmem) {
error_setg(errp, "[memdev] cannot be used with new "
"[persistent-memdev] property");
return false;
} else if (ct3d->hostmem) {
/* Use of hostmem property implies pmem */
ct3d->hostpmem = ct3d->hostmem;
ct3d->hostmem = NULL;
}
if (ct3d->hostpmem && !ct3d->lsa) {
error_setg(errp, "lsa property must be set for persistent devices");
return false; return false;
} }
mr = host_memory_backend_get_memory(ct3d->hostmem); if (ct3d->hostvmem) {
if (!mr) { MemoryRegion *vmr;
error_setg(errp, "memdev property must be set"); char *v_name;
return false;
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
if (!vmr) {
error_setg(errp, "volatile memdev must have backing device");
return false;
}
memory_region_set_nonvolatile(vmr, false);
memory_region_set_enabled(vmr, true);
host_memory_backend_set_mapped(ct3d->hostvmem, true);
if (ds->id) {
v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
} else {
v_name = g_strdup("cxl-type3-dpa-vmem-space");
}
address_space_init(&ct3d->hostvmem_as, vmr, v_name);
ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
g_free(v_name);
} }
memory_region_set_nonvolatile(mr, true);
memory_region_set_enabled(mr, true);
host_memory_backend_set_mapped(ct3d->hostmem, true);
if (ds->id) { if (ct3d->hostpmem) {
name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id); MemoryRegion *pmr;
} else { char *p_name;
name = g_strdup("cxl-type3-dpa-space");
}
address_space_init(&ct3d->hostmem_as, mr, name);
g_free(name);
ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size; pmr = host_memory_backend_get_memory(ct3d->hostpmem);
if (!pmr) {
if (!ct3d->lsa) { error_setg(errp, "persistent memdev must have backing device");
error_setg(errp, "lsa property must be set"); return false;
return false; }
memory_region_set_nonvolatile(pmr, true);
memory_region_set_enabled(pmr, true);
host_memory_backend_set_mapped(ct3d->hostpmem, true);
if (ds->id) {
p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
} else {
p_name = g_strdup("cxl-type3-dpa-pmem-space");
}
address_space_init(&ct3d->hostpmem_as, pmr, p_name);
ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
g_free(p_name);
} }
return true; return true;
@ -593,6 +713,9 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d; cxl_cstate->cdat.private = ct3d;
cxl_doe_cdat_init(cxl_cstate, errp); cxl_doe_cdat_init(cxl_cstate, errp);
if (*errp) {
goto err_free_special_ops;
}
pcie_cap_deverr_init(pci_dev); pcie_cap_deverr_init(pci_dev);
/* Leave a bit of room for expansion */ /* Leave a bit of room for expansion */
@ -605,9 +728,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
err_release_cdat: err_release_cdat:
cxl_doe_cdat_release(cxl_cstate); cxl_doe_cdat_release(cxl_cstate);
err_free_special_ops:
g_free(regs->special_ops); g_free(regs->special_ops);
err_address_space_free: err_address_space_free:
address_space_destroy(&ct3d->hostmem_as); if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
if (ct3d->hostvmem) {
address_space_destroy(&ct3d->hostvmem_as);
}
return; return;
} }
@ -620,7 +749,12 @@ static void ct3_exit(PCIDevice *pci_dev)
pcie_aer_exit(pci_dev); pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate); cxl_doe_cdat_release(cxl_cstate);
g_free(regs->special_ops); g_free(regs->special_ops);
address_space_destroy(&ct3d->hostmem_as); if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
if (ct3d->hostvmem) {
address_space_destroy(&ct3d->hostvmem_as);
}
} }
/* TODO: Support multiple HDM decoders and DPA skip */ /* TODO: Support multiple HDM decoders and DPA skip */
@ -655,51 +789,77 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
return true; return true;
} }
static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
hwaddr host_addr,
unsigned int size,
AddressSpace **as,
uint64_t *dpa_offset)
{
MemoryRegion *vmr = NULL, *pmr = NULL;
if (ct3d->hostvmem) {
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
}
if (ct3d->hostpmem) {
pmr = host_memory_backend_get_memory(ct3d->hostpmem);
}
if (!vmr && !pmr) {
return -ENODEV;
}
if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
return -EINVAL;
}
if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
return -EINVAL;
}
if (vmr) {
if (*dpa_offset < memory_region_size(vmr)) {
*as = &ct3d->hostvmem_as;
} else {
*as = &ct3d->hostpmem_as;
*dpa_offset -= memory_region_size(vmr);
}
} else {
*as = &ct3d->hostpmem_as;
}
return 0;
}
MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
unsigned size, MemTxAttrs attrs) unsigned size, MemTxAttrs attrs)
{ {
CXLType3Dev *ct3d = CXL_TYPE3(d); uint64_t dpa_offset = 0;
uint64_t dpa_offset; AddressSpace *as = NULL;
MemoryRegion *mr; int res;
/* TODO support volatile region */ res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
mr = host_memory_backend_get_memory(ct3d->hostmem); &as, &dpa_offset);
if (!mr) { if (res) {
return MEMTX_ERROR; return MEMTX_ERROR;
} }
if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { return address_space_read(as, dpa_offset, attrs, data, size);
return MEMTX_ERROR;
}
if (dpa_offset > int128_get64(mr->size)) {
return MEMTX_ERROR;
}
return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size);
} }
MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
unsigned size, MemTxAttrs attrs) unsigned size, MemTxAttrs attrs)
{ {
CXLType3Dev *ct3d = CXL_TYPE3(d); uint64_t dpa_offset = 0;
uint64_t dpa_offset; AddressSpace *as = NULL;
MemoryRegion *mr; int res;
mr = host_memory_backend_get_memory(ct3d->hostmem); res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
if (!mr) { &as, &dpa_offset);
return MEMTX_OK; if (res) {
return MEMTX_ERROR;
} }
if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { return address_space_write(as, dpa_offset, attrs, &data, size);
return MEMTX_OK;
}
if (dpa_offset > int128_get64(mr->size)) {
return MEMTX_OK;
}
return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs,
&data, size);
} }
static void ct3d_reset(DeviceState *dev) static void ct3d_reset(DeviceState *dev)
@ -714,7 +874,11 @@ static void ct3d_reset(DeviceState *dev)
static Property ct3_props[] = { static Property ct3_props[] = {
DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
HostMemoryBackend *), HostMemoryBackend *), /* for backward compatibility */
DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
HostMemoryBackend *), HostMemoryBackend *),
DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
@ -726,6 +890,10 @@ static uint64_t get_lsa_size(CXLType3Dev *ct3d)
{ {
MemoryRegion *mr; MemoryRegion *mr;
if (!ct3d->lsa) {
return 0;
}
mr = host_memory_backend_get_memory(ct3d->lsa); mr = host_memory_backend_get_memory(ct3d->lsa);
return memory_region_size(mr); return memory_region_size(mr);
} }
@ -743,6 +911,10 @@ static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
MemoryRegion *mr; MemoryRegion *mr;
void *lsa; void *lsa;
if (!ct3d->lsa) {
return 0;
}
mr = host_memory_backend_get_memory(ct3d->lsa); mr = host_memory_backend_get_memory(ct3d->lsa);
validate_lsa_access(mr, size, offset); validate_lsa_access(mr, size, offset);
@ -758,6 +930,10 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
MemoryRegion *mr; MemoryRegion *mr;
void *lsa; void *lsa;
if (!ct3d->lsa) {
return;
}
mr = host_memory_backend_get_memory(ct3d->lsa); mr = host_memory_backend_get_memory(ct3d->lsa);
validate_lsa_access(mr, size, offset); validate_lsa_access(mr, size, offset);
@ -929,7 +1105,7 @@ static void ct3_class_init(ObjectClass *oc, void *data)
pc->config_read = ct3d_config_read; pc->config_read = ct3d_config_read;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "CXL PMEM Device (Type 3)"; dc->desc = "CXL Memory Device (Type 3)";
dc->reset = ct3d_reset; dc->reset = ct3d_reset;
device_class_set_props(dc, ct3_props); device_class_set_props(dc, ct3_props);

View File

@ -805,7 +805,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
} }
if (!get_vhost_net(nc->peer)) { if (!get_vhost_net(nc->peer)) {
virtio_add_feature(&features, VIRTIO_F_RING_RESET);
return features; return features;
} }

View File

@ -3,6 +3,11 @@ config PCIE_PORT
default y if PCI_DEVICES default y if PCI_DEVICES
depends on PCI_EXPRESS && MSI_NONBROKEN depends on PCI_EXPRESS && MSI_NONBROKEN
config PCIE_PCI_BRIDGE
bool
default y if PCIE_PORT
depends on PCIE_PORT
config PXB config PXB
bool bool
default y if Q35 || ARM_VIRT default y if Q35 || ARM_VIRT

View File

@ -346,6 +346,9 @@ static void cxl_usp_realize(PCIDevice *d, Error **errp)
cxl_cstate->cdat.free_cdat_table = free_default_cdat_table; cxl_cstate->cdat.free_cdat_table = free_default_cdat_table;
cxl_cstate->cdat.private = d; cxl_cstate->cdat.private = d;
cxl_doe_cdat_init(cxl_cstate, errp); cxl_doe_cdat_init(cxl_cstate, errp);
if (*errp) {
goto err_cap;
}
return; return;

View File

@ -2,7 +2,8 @@ pci_ss = ss.source_set()
pci_ss.add(files('pci_bridge_dev.c')) pci_ss.add(files('pci_bridge_dev.c'))
pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c'))
pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c'))
pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c'))
pci_ss.add(when: 'CONFIG_PCIE_PCI_BRIDGE', if_true: files('pcie_pci_bridge.c'))
pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'), pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'),
if_false: files('pci_expander_bridge_stubs.c')) if_false: files('pci_expander_bridge_stubs.c'))
pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c'))

View File

@ -27,6 +27,7 @@
#include "qemu/range.h" #include "qemu/range.h"
#include "hw/i386/pc.h" #include "hw/i386/pc.h"
#include "hw/pci/pci.h" #include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h" #include "hw/pci/pci_host.h"
#include "hw/pci-host/i440fx.h" #include "hw/pci-host/i440fx.h"
#include "hw/qdev-properties.h" #include "hw/qdev-properties.h"
@ -217,10 +218,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
PCIHostState *s = PCI_HOST_BRIDGE(dev); PCIHostState *s = PCI_HOST_BRIDGE(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
sysbus_add_io(sbd, 0xcf8, &s->conf_mem); memory_region_add_subregion(s->bus->address_space_io, 0xcf8, &s->conf_mem);
sysbus_init_ioports(sbd, 0xcf8, 4); sysbus_init_ioports(sbd, 0xcf8, 4);
sysbus_add_io(sbd, 0xcfc, &s->data_mem); memory_region_add_subregion(s->bus->address_space_io, 0xcfc, &s->data_mem);
sysbus_init_ioports(sbd, 0xcfc, 4); sysbus_init_ioports(sbd, 0xcfc, 4);
/* register i440fx 0xcf8 port as coalesced pio */ /* register i440fx 0xcf8 port as coalesced pio */
@ -291,12 +292,12 @@ PCIBus *i440fx_init(const char *pci_type,
object_property_add_const_link(qdev_get_machine(), "smram", object_property_add_const_link(qdev_get_machine(), "smram",
OBJECT(&f->smram)); OBJECT(&f->smram));
init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, init_pam(&f->pam_regions[0], OBJECT(d), f->ram_memory, f->system_memory,
&f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); f->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE);
for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) { for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) {
init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, init_pam(&f->pam_regions[i + 1], OBJECT(d), f->ram_memory,
&f->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, f->system_memory, f->pci_address_space,
PAM_EXPAN_SIZE); PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
} }
ram_size = ram_size / 8 / 1024 / 1024; ram_size = ram_size / 8 / 1024 / 1024;

View File

@ -30,24 +30,24 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "hw/pci-host/pam.h" #include "hw/pci-host/pam.h"
void init_pam(DeviceState *dev, MemoryRegion *ram_memory, void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram_memory,
MemoryRegion *system_memory, MemoryRegion *pci_address_space, MemoryRegion *system_memory, MemoryRegion *pci_address_space,
PAMMemoryRegion *mem, uint32_t start, uint32_t size) uint32_t start, uint32_t size)
{ {
int i; int i;
/* RAM */ /* RAM */
memory_region_init_alias(&mem->alias[3], OBJECT(dev), "pam-ram", ram_memory, memory_region_init_alias(&mem->alias[3], owner, "pam-ram", ram_memory,
start, size); start, size);
/* ROM (XXX: not quite correct) */ /* ROM (XXX: not quite correct) */
memory_region_init_alias(&mem->alias[1], OBJECT(dev), "pam-rom", ram_memory, memory_region_init_alias(&mem->alias[1], owner, "pam-rom", ram_memory,
start, size); start, size);
memory_region_set_readonly(&mem->alias[1], true); memory_region_set_readonly(&mem->alias[1], true);
/* XXX: should distinguish read/write cases */ /* XXX: should distinguish read/write cases */
memory_region_init_alias(&mem->alias[0], OBJECT(dev), "pam-pci", pci_address_space, memory_region_init_alias(&mem->alias[0], owner, "pam-pci", pci_address_space,
start, size); start, size);
memory_region_init_alias(&mem->alias[2], OBJECT(dev), "pam-pci", ram_memory, memory_region_init_alias(&mem->alias[2], owner, "pam-pci", ram_memory,
start, size); start, size);
memory_region_transaction_begin(); memory_region_transaction_begin();

View File

@ -50,10 +50,12 @@ static void q35_host_realize(DeviceState *dev, Error **errp)
Q35PCIHost *s = Q35_HOST_DEVICE(dev); Q35PCIHost *s = Q35_HOST_DEVICE(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); memory_region_add_subregion(s->mch.address_space_io,
MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem);
sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4);
sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); memory_region_add_subregion(s->mch.address_space_io,
MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem);
sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4);
/* register q35 0xcf8 port as coalesced pio */ /* register q35 0xcf8 port as coalesced pio */
@ -643,12 +645,12 @@ static void mch_realize(PCIDevice *d, Error **errp)
object_property_add_const_link(qdev_get_machine(), "smram", object_property_add_const_link(qdev_get_machine(), "smram",
OBJECT(&mch->smram)); OBJECT(&mch->smram));
init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory,
mch->pci_address_space, &mch->pam_regions[0], mch->system_memory, mch->pci_address_space,
PAM_BIOS_BASE, PAM_BIOS_SIZE); PAM_BIOS_BASE, PAM_BIOS_SIZE);
for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) {
init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory,
mch->pci_address_space, &mch->pam_regions[i+1], mch->system_memory, mch->pci_address_space,
PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
} }
} }

View File

@ -79,6 +79,8 @@ static Property pci_props[] = {
DEFINE_PROP_STRING("failover_pair_id", PCIDevice, DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
failover_pair_id), failover_pair_id),
DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
DEFINE_PROP_END_OF_LIST() DEFINE_PROP_END_OF_LIST()
}; };
@ -2307,15 +2309,14 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
Error **errp) Error **errp)
{ {
int64_t size; int64_t size;
char *path; g_autofree char *path = NULL;
void *ptr; void *ptr;
char name[32]; char name[32];
const VMStateDescription *vmsd; const VMStateDescription *vmsd;
if (!pdev->romfile) if (!pdev->romfile || !strlen(pdev->romfile)) {
return;
if (strlen(pdev->romfile) == 0)
return; return;
}
if (!pdev->rom_bar) { if (!pdev->rom_bar) {
/* /*
@ -2350,23 +2351,20 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
size = get_image_size(path); size = get_image_size(path);
if (size < 0) { if (size < 0) {
error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
g_free(path);
return; return;
} else if (size == 0) { } else if (size == 0) {
error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
g_free(path);
return; return;
} else if (size > 2 * GiB) { } else if (size > 2 * GiB) {
error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)", error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)",
pdev->romfile); pdev->romfile);
g_free(path);
return; return;
} }
if (pdev->romsize != -1) { if (pdev->romsize != -1) {
if (size > pdev->romsize) { if (size > pdev->romsize) {
error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u", error_setg(errp, "romfile \"%s\" (%u bytes) "
"is too large for ROM size %u",
pdev->romfile, (uint32_t)size, pdev->romsize); pdev->romfile, (uint32_t)size, pdev->romsize);
g_free(path);
return; return;
} }
} else { } else {
@ -2374,21 +2372,18 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
} }
vmsd = qdev_get_vmsd(DEVICE(pdev)); vmsd = qdev_get_vmsd(DEVICE(pdev));
snprintf(name, sizeof(name), "%s.rom",
vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
if (vmsd) {
snprintf(name, sizeof(name), "%s.rom", vmsd->name);
} else {
snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev)));
}
pdev->has_rom = true; pdev->has_rom = true;
memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal); memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
&error_fatal);
ptr = memory_region_get_ram_ptr(&pdev->rom); ptr = memory_region_get_ram_ptr(&pdev->rom);
if (load_image_size(path, ptr, size) < 0) { if (load_image_size(path, ptr, size) < 0) {
error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
g_free(path);
return; return;
} }
g_free(path);
if (is_default_rom) { if (is_default_rom) {
/* Only the default rom images will be patched (if needed). */ /* Only the default rom images will be patched (if needed). */

View File

@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
PCI_ERR_UNC_SUPPORTED); PCI_ERR_UNC_SUPPORTED);
pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK,
PCI_ERR_UNC_MASK_DEFAULT); if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) {
pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK,
PCI_ERR_UNC_SUPPORTED); PCI_ERR_UNC_MASK_DEFAULT);
pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK,
PCI_ERR_UNC_SUPPORTED);
}
pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
PCI_ERR_UNC_SEVERITY_DEFAULT); PCI_ERR_UNC_SEVERITY_DEFAULT);

View File

@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
*/ */
static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
{ {
return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx); return svq->num_free;
} }
/** /**
@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -EINVAL; return -EINVAL;
} }
svq->num_free -= ndescs;
svq->desc_state[qemu_head].elem = elem; svq->desc_state[qemu_head].elem = elem;
svq->desc_state[qemu_head].ndescs = ndescs; svq->desc_state[qemu_head].ndescs = ndescs;
vhost_svq_kick(svq); vhost_svq_kick(svq);
@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head; svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id; svq->free_head = used_elem.id;
svq->num_free += num;
*len = used_elem.len; *len = used_elem.len;
return g_steal_pointer(&svq->desc_state[used_elem.id].elem); return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
@ -659,6 +661,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->iova_tree = iova_tree; svq->iova_tree = iova_tree;
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
driver_size = vhost_svq_driver_area_size(svq); driver_size = vhost_svq_driver_area_size(svq);
device_size = vhost_svq_device_area_size(svq); device_size = vhost_svq_device_area_size(svq);
svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);

View File

@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue {
/* Next head to consume from the device */ /* Next head to consume from the device */
uint16_t last_used_idx; uint16_t last_used_idx;
/* Size of SVQ vring free descriptors */
uint16_t num_free;
} VhostShadowVirtqueue; } VhostShadowVirtqueue;
bool vhost_svq_valid_features(uint64_t features, Error **errp); bool vhost_svq_valid_features(uint64_t features, Error **errp);

View File

@ -42,17 +42,7 @@
#define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_USER_BACKEND_MAX_FDS 8 #define VHOST_USER_BACKEND_MAX_FDS 8
/* #if defined(TARGET_PPC) || defined(TARGET_PPC64)
* Set maximum number of RAM slots supported to
* the maximum number supported by the target
* hardware plaform.
*/
#if defined(TARGET_X86) || defined(TARGET_X86_64) || \
defined(TARGET_ARM) || defined(TARGET_AARCH64)
#include "hw/acpi/acpi.h"
#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
#elif defined(TARGET_PPC) || defined(TARGET_PPC64)
#include "hw/ppc/spapr.h" #include "hw/ppc/spapr.h"
#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
@ -2677,7 +2667,20 @@ static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER |
VIRTIO_CONFIG_S_DRIVER_OK); VIRTIO_CONFIG_S_DRIVER_OK);
} else { } else {
return vhost_user_set_status(dev, 0); return 0;
}
}
static void vhost_user_reset_status(struct vhost_dev *dev)
{
/* Set device status only for last queue pair */
if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
return;
}
if (virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_STATUS)) {
vhost_user_set_status(dev, 0);
} }
} }
@ -2716,4 +2719,5 @@ const VhostOps user_ops = {
.vhost_get_inflight_fd = vhost_user_get_inflight_fd, .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
.vhost_set_inflight_fd = vhost_user_set_inflight_fd, .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
.vhost_dev_start = vhost_user_dev_start, .vhost_dev_start = vhost_user_dev_start,
.vhost_reset_status = vhost_user_reset_status,
}; };

View File

@ -26,6 +26,7 @@
#include "cpu.h" #include "cpu.h"
#include "trace.h" #include "trace.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "hw/virtio/virtio-access.h"
/* /*
* Return one past the end of the end of section. Be careful with uint64_t * Return one past the end of the end of section. Be careful with uint64_t
@ -60,13 +61,21 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
iova_min, section->offset_within_address_space); iova_min, section->offset_within_address_space);
return true; return true;
} }
/*
* While using vIOMMU, sometimes the section will be larger than iova_max,
* but the memory that actually maps is smaller, so move the check to
* function vhost_vdpa_iommu_map_notify(). That function will use the actual
* size that maps to the kernel
*/
llend = vhost_vdpa_section_end(section); if (!memory_region_is_iommu(section->mr)) {
if (int128_gt(llend, int128_make64(iova_max))) { llend = vhost_vdpa_section_end(section);
error_report("RAM section out of device range (max=0x%" PRIx64 if (int128_gt(llend, int128_make64(iova_max))) {
", end addr=0x%" PRIx64 ")", error_report("RAM section out of device range (max=0x%" PRIx64
iova_max, int128_get64(llend)); ", end addr=0x%" PRIx64 ")",
return true; iova_max, int128_get64(llend));
return true;
}
} }
return false; return false;
@ -185,6 +194,115 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
v->iotlb_batch_begin_sent = false; v->iotlb_batch_begin_sent = false;
} }
static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
hwaddr iova = iotlb->iova + iommu->iommu_offset;
struct vhost_vdpa *v = iommu->dev;
void *vaddr;
int ret;
Int128 llend;
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
iotlb->target_as->name ? iotlb->target_as->name : "none");
return;
}
RCU_READ_LOCK_GUARD();
/* check if RAM section out of device range */
llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
if (int128_gt(llend, int128_make64(v->iova_range.last))) {
error_report("RAM section out of device range (max=0x%" PRIx64
", end addr=0x%" PRIx64 ")",
v->iova_range.last, int128_get64(llend));
return;
}
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
bool read_only;
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
return;
}
ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
iotlb->addr_mask + 1, vaddr, read_only);
if (ret) {
error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
"0x%" HWADDR_PRIx ", %p) = %d (%m)",
v, iova, iotlb->addr_mask + 1, vaddr, ret);
}
} else {
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
iotlb->addr_mask + 1);
if (ret) {
error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
"0x%" HWADDR_PRIx ") = %d (%m)",
v, iova, iotlb->addr_mask + 1, ret);
}
}
}
static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
struct vdpa_iommu *iommu;
Int128 end;
int iommu_idx;
IOMMUMemoryRegion *iommu_mr;
int ret;
iommu_mr = IOMMU_MEMORY_REGION(section->mr);
iommu = g_malloc0(sizeof(*iommu));
end = int128_add(int128_make64(section->offset_within_region),
section->size);
end = int128_sub(end, int128_one());
iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
MEMTXATTRS_UNSPECIFIED);
iommu->iommu_mr = iommu_mr;
iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
IOMMU_NOTIFIER_IOTLB_EVENTS,
section->offset_within_region,
int128_get64(end),
iommu_idx);
iommu->iommu_offset = section->offset_within_address_space -
section->offset_within_region;
iommu->dev = v;
ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
if (ret) {
g_free(iommu);
return;
}
QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next);
memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
return;
}
static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
struct vdpa_iommu *iommu;
QLIST_FOREACH(iommu, &v->iommu_list, iommu_next)
{
if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
iommu->n.start == section->offset_within_region) {
memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
QLIST_REMOVE(iommu, iommu_next);
g_free(iommu);
break;
}
}
}
static void vhost_vdpa_listener_region_add(MemoryListener *listener, static void vhost_vdpa_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section) MemoryRegionSection *section)
{ {
@ -199,6 +317,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
v->iova_range.last)) { v->iova_range.last)) {
return; return;
} }
if (memory_region_is_iommu(section->mr)) {
vhost_vdpa_iommu_region_add(listener, section);
return;
}
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) { (section->offset_within_region & ~TARGET_PAGE_MASK))) {
@ -278,6 +400,9 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
v->iova_range.last)) { v->iova_range.last)) {
return; return;
} }
if (memory_region_is_iommu(section->mr)) {
vhost_vdpa_iommu_region_del(listener, section);
}
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) { (section->offset_within_region & ~TARGET_PAGE_MASK))) {
@ -288,7 +413,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
llend = vhost_vdpa_section_end(section); llend = vhost_vdpa_section_end(section);
trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); trace_vhost_vdpa_listener_region_del(v, iova,
int128_get64(int128_sub(llend, int128_one())));
if (int128_ge(int128_make64(iova), llend)) { if (int128_ge(int128_make64(iova), llend)) {
return; return;
@ -315,10 +441,28 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
vhost_iova_tree_remove(v->iova_tree, *result); vhost_iova_tree_remove(v->iova_tree, *result);
} }
vhost_vdpa_iotlb_batch_begin_once(v); vhost_vdpa_iotlb_batch_begin_once(v);
/*
* The unmap ioctl doesn't accept a full 64-bit. need to check it
*/
if (int128_eq(llsize, int128_2_64())) {
llsize = int128_rshift(llsize, 1);
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
int128_get64(llsize));
if (ret) {
error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
"0x%" HWADDR_PRIx ") = %d (%m)",
v, iova, int128_get64(llsize), ret);
}
iova += int128_get64(llsize);
}
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
int128_get64(llsize)); int128_get64(llsize));
if (ret) { if (ret) {
error_report("vhost_vdpa dma unmap error!"); error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
"0x%" HWADDR_PRIx ") = %d (%m)",
v, iova, int128_get64(llsize), ret);
} }
memory_region_unref(section->mr); memory_region_unref(section->mr);
@ -1163,7 +1307,13 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
} }
if (started) { if (started) {
memory_listener_register(&v->listener, &address_space_memory); if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
error_report("SVQ can not work while IOMMU enable, please disable"
"IOMMU and try again");
return -1;
}
memory_listener_register(&v->listener, dev->vdev->dma_as);
return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
} }

View File

@ -107,7 +107,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
} }
} }
static bool vhost_dev_has_iommu(struct vhost_dev *dev) bool vhost_dev_has_iommu(struct vhost_dev *dev)
{ {
VirtIODevice *vdev = dev->vdev; VirtIODevice *vdev = dev->vdev;

View File

@ -476,15 +476,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
size_t max_len; size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
max_len = op_info->iv_len + if (op_info) {
op_info->aad_len + max_len = op_info->iv_len +
op_info->src_len + op_info->aad_len +
op_info->dst_len + op_info->src_len +
op_info->digest_result_len; op_info->dst_len +
op_info->digest_result_len;
/* Zeroize and free request data structure */ /* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len); memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info); g_free(op_info);
}
} else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
if (op_info) { if (op_info) {

View File

@ -1341,7 +1341,7 @@ static Property virtio_mem_properties[] = {
TYPE_MEMORY_BACKEND, HostMemoryBackend *), TYPE_MEMORY_BACKEND, HostMemoryBackend *),
#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) #if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM,
unplugged_inaccessible, ON_OFF_AUTO_AUTO), unplugged_inaccessible, ON_OFF_AUTO_ON),
#endif #endif
DEFINE_PROP_BOOL(VIRTIO_MEM_EARLY_MIGRATION_PROP, VirtIOMEM, DEFINE_PROP_BOOL(VIRTIO_MEM_EARLY_MIGRATION_PROP, VirtIOMEM,
early_migration, true), early_migration, true),

View File

@ -716,6 +716,38 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
} }
} }
static void virtio_pci_ats_ctrl_trigger(PCIDevice *pci_dev, bool enable)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
vdev->device_iotlb_enabled = enable;
if (k->toggle_device_iotlb) {
k->toggle_device_iotlb(vdev);
}
}
static void pcie_ats_config_write(PCIDevice *dev, uint32_t address,
uint32_t val, int len)
{
uint32_t off;
uint16_t ats_cap = dev->exp.ats_cap;
if (!ats_cap || address < ats_cap) {
return;
}
off = address - ats_cap;
if (off >= PCI_EXT_CAP_ATS_SIZEOF) {
return;
}
if (range_covers_byte(off, len, PCI_ATS_CTRL + 1)) {
virtio_pci_ats_ctrl_trigger(dev, !!(val & PCI_ATS_CTRL_ENABLE));
}
}
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
uint32_t val, int len) uint32_t val, int len)
{ {
@ -729,6 +761,10 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
pcie_cap_flr_write_config(pci_dev, address, val, len); pcie_cap_flr_write_config(pci_dev, address, val, len);
} }
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
pcie_ats_config_write(pci_dev, address, val, len);
}
if (range_covers_byte(address, len, PCI_COMMAND)) { if (range_covers_byte(address, len, PCI_COMMAND)) {
if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
virtio_set_disabled(vdev, true); virtio_set_disabled(vdev, true);

View File

@ -119,8 +119,10 @@ typedef struct cxl_device_state {
uint64_t host_set; uint64_t host_set;
} timestamp; } timestamp;
/* memory region for persistent memory, HDM */ /* memory region size, HDM */
uint64_t mem_size;
uint64_t pmem_size; uint64_t pmem_size;
uint64_t vmem_size;
} CXLDeviceState; } CXLDeviceState;
/* Initialize the register block for a device */ /* Initialize the register block for a device */
@ -245,12 +247,15 @@ struct CXLType3Dev {
PCIDevice parent_obj; PCIDevice parent_obj;
/* Properties */ /* Properties */
HostMemoryBackend *hostmem; HostMemoryBackend *hostmem; /* deprecated */
HostMemoryBackend *hostvmem;
HostMemoryBackend *hostpmem;
HostMemoryBackend *lsa; HostMemoryBackend *lsa;
uint64_t sn; uint64_t sn;
/* State */ /* State */
AddressSpace hostmem_as; AddressSpace hostvmem_as;
AddressSpace hostpmem_as;
CXLComponentState cxl_cstate; CXLComponentState cxl_cstate;
CXLDeviceState cxl_dstate; CXLDeviceState cxl_dstate;
@ -282,4 +287,6 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
unsigned size, MemTxAttrs attrs); unsigned size, MemTxAttrs attrs);
uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds);
#endif #endif

View File

@ -162,13 +162,12 @@ void xen_load_linux(PCMachineState *pcms);
void pc_memory_init(PCMachineState *pcms, void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory, MemoryRegion *system_memory,
MemoryRegion *rom_memory, MemoryRegion *rom_memory,
MemoryRegion **ram_memory,
uint64_t pci_hole64_size); uint64_t pci_hole64_size);
uint64_t pc_pci_hole64_start(void); uint64_t pc_pci_hole64_start(void);
DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus); DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus);
void pc_basic_device_init(struct PCMachineState *pcms, void pc_basic_device_init(struct PCMachineState *pcms,
ISABus *isa_bus, qemu_irq *gsi, ISABus *isa_bus, qemu_irq *gsi,
ISADevice **rtc_state, ISADevice *rtc_state,
bool create_fdctrl, bool create_fdctrl,
uint32_t hpet_irqs); uint32_t hpet_irqs);
void pc_cmos_init(PCMachineState *pcms, void pc_cmos_init(PCMachineState *pcms,

View File

@ -87,8 +87,9 @@ typedef struct PAMMemoryRegion {
unsigned current; unsigned current;
} PAMMemoryRegion; } PAMMemoryRegion;
void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system, void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram,
MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size); MemoryRegion *system, MemoryRegion *pci,
uint32_t start, uint32_t size);
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val); void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);
#endif /* QEMU_PAM_H */ #endif /* QEMU_PAM_H */

View File

@ -207,6 +207,8 @@ enum {
QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR), QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR),
#define QEMU_PCIE_CXL_BITNR 10 #define QEMU_PCIE_CXL_BITNR 10
QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR), QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR),
#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11
QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
}; };
typedef struct PCIINTxRoute { typedef struct PCIINTxRoute {

View File

@ -6,6 +6,7 @@
#include "hw/intc/ioapic.h" #include "hw/intc/ioapic.h"
#include "hw/pci/pci.h" #include "hw/pci/pci.h"
#include "hw/pci/pci_device.h" #include "hw/pci/pci_device.h"
#include "hw/rtc/mc146818rtc.h"
#include "exec/memory.h" #include "exec/memory.h"
#include "qemu/notify.h" #include "qemu/notify.h"
#include "qom/object.h" #include "qom/object.h"
@ -30,6 +31,7 @@ struct ICH9LPCState {
*/ */
uint8_t irr[PCI_SLOT_MAX][PCI_NUM_PINS]; uint8_t irr[PCI_SLOT_MAX][PCI_NUM_PINS];
MC146818RtcState rtc;
APMState apm; APMState apm;
ICH9LPCPMRegs pm; ICH9LPCPMRegs pm;
uint32_t sci_level; /* track sci level */ uint32_t sci_level; /* track sci level */

View File

@ -13,6 +13,7 @@
#define HW_SOUTHBRIDGE_PIIX_H #define HW_SOUTHBRIDGE_PIIX_H
#include "hw/pci/pci_device.h" #include "hw/pci/pci_device.h"
#include "hw/rtc/mc146818rtc.h"
/* PIRQRC[A:D]: PIRQx Route Control Registers */ /* PIRQRC[A:D]: PIRQx Route Control Registers */
#define PIIX_PIRQCA 0x60 #define PIIX_PIRQCA 0x60
@ -51,6 +52,8 @@ struct PIIXState {
/* This member isn't used. Just for save/load compatibility */ /* This member isn't used. Just for save/load compatibility */
int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS]; int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS];
MC146818RtcState rtc;
/* Reset Control Register contents */ /* Reset Control Register contents */
uint8_t rcr; uint8_t rcr;

View File

@ -52,6 +52,8 @@ typedef struct vhost_vdpa {
struct vhost_dev *dev; struct vhost_dev *dev;
Error *migration_blocker; Error *migration_blocker;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
QLIST_HEAD(, vdpa_iommu) iommu_list;
IOMMUNotifier n;
} VhostVDPA; } VhostVDPA;
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
@ -61,4 +63,13 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
hwaddr size); hwaddr size);
typedef struct vdpa_iommu {
struct vhost_vdpa *dev;
IOMMUMemoryRegion *iommu_mr;
hwaddr iommu_offset;
IOMMUNotifier n;
QLIST_ENTRY(vdpa_iommu) iommu_next;
} VDPAIOMMUState;
#endif #endif

View File

@ -336,4 +336,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight); struct vhost_inflight *inflight);
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
struct vhost_inflight *inflight); struct vhost_inflight *inflight);
bool vhost_dev_has_iommu(struct vhost_dev *dev);
#endif #endif

View File

@ -155,6 +155,7 @@ struct VirtIODevice
QLIST_HEAD(, VirtQueue) *vector_queues; QLIST_HEAD(, VirtQueue) *vector_queues;
QTAILQ_ENTRY(VirtIODevice) next; QTAILQ_ENTRY(VirtIODevice) next;
EventNotifier config_notifier; EventNotifier config_notifier;
bool device_iotlb_enabled;
}; };
struct VirtioDeviceClass { struct VirtioDeviceClass {
@ -212,6 +213,7 @@ struct VirtioDeviceClass {
const VMStateDescription *vmsd; const VMStateDescription *vmsd;
bool (*primary_unplug_pending)(void *opaque); bool (*primary_unplug_pending)(void *opaque);
struct vhost_dev *(*get_vhost)(VirtIODevice *vdev); struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
void (*toggle_device_iotlb)(VirtIODevice *vdev);
}; };
void virtio_instance_init_common(Object *proxy_obj, void *data, void virtio_instance_init_common(Object *proxy_obj, void *data,

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1867,13 +1867,13 @@ static void test_acpi_q35_cxl(void)
" -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1" " -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1"
" -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2" " -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2"
" -device cxl-rp,port=0,bus=cxl.1,id=rp1,chassis=0,slot=2" " -device cxl-rp,port=0,bus=cxl.1,id=rp1,chassis=0,slot=2"
" -device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1" " -device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1"
" -device cxl-rp,port=1,bus=cxl.1,id=rp2,chassis=0,slot=3" " -device cxl-rp,port=1,bus=cxl.1,id=rp2,chassis=0,slot=3"
" -device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2" " -device cxl-type3,bus=rp2,persistent-memdev=cxl-mem2,lsa=lsa2"
" -device cxl-rp,port=0,bus=cxl.2,id=rp3,chassis=0,slot=5" " -device cxl-rp,port=0,bus=cxl.2,id=rp3,chassis=0,slot=5"
" -device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3" " -device cxl-type3,bus=rp3,persistent-memdev=cxl-mem3,lsa=lsa3"
" -device cxl-rp,port=1,bus=cxl.2,id=rp4,chassis=0,slot=6" " -device cxl-rp,port=1,bus=cxl.2,id=rp4,chassis=0,slot=6"
" -device cxl-type3,bus=rp4,memdev=cxl-mem4,lsa=lsa4" " -device cxl-type3,bus=rp4,persistent-memdev=cxl-mem4,lsa=lsa4"
" -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k," " -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k,"
"cxl-fmw.1.targets.0=cxl.1,cxl-fmw.1.targets.1=cxl.2,cxl-fmw.1.size=4G,cxl-fmw.1.interleave-granularity=8k", "cxl-fmw.1.targets.0=cxl.1,cxl-fmw.1.targets.1=cxl.2,cxl-fmw.1.size=4G,cxl-fmw.1.interleave-granularity=8k",
tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, tmp_path,

View File

@ -8,50 +8,72 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "libqtest-single.h" #include "libqtest-single.h"
#define QEMU_PXB_CMD "-machine q35,cxl=on " \ #define QEMU_PXB_CMD \
"-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ "-machine q35,cxl=on " \
"-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=4G " "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \
"-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=4G "
#define QEMU_2PXB_CMD "-machine q35,cxl=on " \ #define QEMU_2PXB_CMD \
"-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ "-machine q35,cxl=on " \
"-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \
"-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \
"-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G "
#define QEMU_RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " #define QEMU_RP \
"-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 "
/* Dual ports on first pxb */ /* Dual ports on first pxb */
#define QEMU_2RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ #define QEMU_2RP \
"-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \
"-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 "
/* Dual ports on each of the pxb instances */ /* Dual ports on each of the pxb instances */
#define QEMU_4RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ #define QEMU_4RP \
"-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " \ "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \
"-device cxl-rp,id=rp2,bus=cxl.1,chassis=0,slot=2 " \ "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " \
"-device cxl-rp,id=rp3,bus=cxl.1,chassis=0,slot=3 " "-device cxl-rp,id=rp2,bus=cxl.1,chassis=0,slot=2 " \
"-device cxl-rp,id=rp3,bus=cxl.1,chassis=0,slot=3 "
#define QEMU_T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ #define QEMU_T3D_DEPRECATED \
"-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 "
#define QEMU_2T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ #define QEMU_T3D_PMEM \
"-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \
"-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ "-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 "
"-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 "
#define QEMU_4T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ #define QEMU_T3D_VMEM \
"-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ "-object memory-backend-ram,id=cxl-mem0,size=256M " \
"-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ "-device cxl-type3,bus=rp0,volatile-memdev=cxl-mem0,id=mem0 "
"-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ #define QEMU_T3D_VMEM_LSA \
"-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 " \ "-object memory-backend-ram,id=cxl-mem0,size=256M " \
"-object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M " \ "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa2,mem-path=%s,size=256M " \ "-device cxl-type3,bus=rp0,volatile-memdev=cxl-mem0,lsa=lsa0,id=mem0 "
"-device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2,id=cxl-pmem2 " \
"-object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M " \ #define QEMU_2T3D \
"-object memory-backend-file,id=lsa3,mem-path=%s,size=256M " \ "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3,id=cxl-pmem3 " "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 " \
"-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1,id=pmem1 "
#define QEMU_4T3D \
"-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 " \
"-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1,id=pmem1 " \
"-object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa2,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp2,persistent-memdev=cxl-mem2,lsa=lsa2,id=pmem2 " \
"-object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M " \
"-object memory-backend-file,id=lsa3,mem-path=%s,size=256M " \
"-device cxl-type3,bus=rp3,persistent-memdev=cxl-mem3,lsa=lsa3,id=pmem3 "
static void cxl_basic_hb(void) static void cxl_basic_hb(void)
{ {
@ -90,14 +112,53 @@ static void cxl_2root_port(void)
} }
#ifdef CONFIG_POSIX #ifdef CONFIG_POSIX
static void cxl_t3d(void) static void cxl_t3d_deprecated(void)
{ {
g_autoptr(GString) cmdline = g_string_new(NULL); g_autoptr(GString) cmdline = g_string_new(NULL);
g_autofree const char *tmpfs = NULL; g_autofree const char *tmpfs = NULL;
tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL);
g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D, tmpfs, tmpfs); g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_DEPRECATED,
tmpfs, tmpfs);
qtest_start(cmdline->str);
qtest_end();
}
static void cxl_t3d_persistent(void)
{
g_autoptr(GString) cmdline = g_string_new(NULL);
g_autofree const char *tmpfs = NULL;
tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL);
g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_PMEM,
tmpfs, tmpfs);
qtest_start(cmdline->str);
qtest_end();
}
static void cxl_t3d_volatile(void)
{
g_autoptr(GString) cmdline = g_string_new(NULL);
g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_VMEM);
qtest_start(cmdline->str);
qtest_end();
}
static void cxl_t3d_volatile_lsa(void)
{
g_autoptr(GString) cmdline = g_string_new(NULL);
g_autofree const char *tmpfs = NULL;
tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL);
g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_VMEM_LSA,
tmpfs);
qtest_start(cmdline->str); qtest_start(cmdline->str);
qtest_end(); qtest_end();
@ -147,7 +208,10 @@ int main(int argc, char **argv)
qtest_add_func("/pci/cxl/rp", cxl_root_port); qtest_add_func("/pci/cxl/rp", cxl_root_port);
qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port); qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port);
#ifdef CONFIG_POSIX #ifdef CONFIG_POSIX
qtest_add_func("/pci/cxl/type3_device", cxl_t3d); qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated);
qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent);
qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile);
qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa);
qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d); qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d);
qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d); qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d);
#endif #endif