ppc patch queue for 2019-08-21

First ppc and spapr pull request for qemu-4.2.  Includes:
    * Some TCG emulation fixes and performance improvements
    * Support for the mffsl instruction in TCG
    * Added missing DPDES SPR
    * Some enhancements to the emulation of the XIVE interrupt
      controller
    * Cleanups to spapr MSI management
    * Some new suspend/resume infrastructure and a draft suspend
      implementation for spapr
    * New spapr hypercall for TPM communication (will be needed for
      secure guests under an Ultravisor)
    * Fix several memory leaks
 
 And a few other assorted fixes.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl1c8bwACgkQbDjKyiDZ
 s5Ko6hAA1Y1xOreKTUP9UtAIaipfdasOMOcGYQ+MMovh05Zn0CwmB0uukeIzbnhi
 hU3qMue6Q0EAt5F9d9z4YWRZqkgsAOBd7SVHpSouoY6DOtIsL9Tc0jTrpr6z8t0L
 j4TYZYlJUybKMocj/8YayTALMZf2myh5A+oxDGPQHqYNWYGCEcttsFbcoeWQbAXG
 eXrGDuSzXDXJSKej99ty/tpSjbJXDbRcvMv+v3v6F+tHWhNke3Ku8s7niDy3fIZU
 lU1Sbz0/UnjKXpCWI/WRBFFWrr1bYICvKPzjK1tNJgA/HhAp37IIsF/j/5kmmF0Y
 dxOCf3kRBhGi5/KKDFrVWwdTiU0CdJ4iF/NvaNlZGZ+oSTZzANz6O/nlAjcBlbt6
 nAJRB4irKkDpL0slwDhl+oF73kFXMUokNgqeaMXE03agMapHrHfmxHs7yL5lAnxf
 I0hyfAUYTZBc1yd8dxEtmEoFYGE9OXU5jZC4BcV8GcrT1tK3ZVzsALetRF2Sm1wm
 wW16B0V6szsDd67cwJdPIs3tR6ZSxX2D6/vhK4mK77TM9TAN7nEMJBFNwjNbnttD
 QLRhFnIZQ61Ja+tDI0aV37bSM32Mi43bYRksh2FujgaYpX92Z0QfsDf9NtM9yQab
 Ihbq7KJ/bK4m9OvmWTUO4CKrCbnzMEzL+ncFamoO2PcvG9uTk+M=
 =E+7d
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.2-20190821' into staging

ppc patch queue for 2019-08-21

First ppc and spapr pull request for qemu-4.2.  Includes:
   * Some TCG emulation fixes and performance improvements
   * Support for the mffsl instruction in TCG
   * Added missing DPDES SPR
   * Some enhancements to the emulation of the XIVE interrupt
     controller
   * Cleanups to spapr MSI management
   * Some new suspend/resume infrastructure and a draft suspend
     implementation for spapr
   * New spapr hypercall for TPM communication (will be needed for
     secure guests under an Ultravisor)
   * Fix several memory leaks

And a few other assorted fixes.

# gpg: Signature made Wed 21 Aug 2019 08:24:44 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-4.2-20190821: (42 commits)
  ppc: Fix emulated single to double denormalized conversions
  ppc: Fix emulated INFINITY and NAN conversions
  ppc: conform to processor User's Manual for xscvdpspn
  ppc: Add support for 'mffsl' instruction
  target/ppc: Add Directed Privileged Door-bell Exception State (DPDES) SPR
  spapr/xive: Mask the EAS when allocating an IRQ
  spapr: Implement better workaround in spapr-vty device
  spapr/irq: Drop spapr_irq_msi_reset()
  spapr/pci: Free MSIs during reset
  spapr/pci: Consolidate de-allocation of MSIs
  ppc: remove idle_timer logic
  spapr: Implement ibm,suspend-me
  i386: use machine class ->wakeup method
  machine: Add wakeup method to MachineClass
  ppc/xive: Improve 'info pic' support
  ppc/xive: Provide silent escalation support
  ppc/xive: Provide unconditional escalation support
  ppc/xive: Provide escalation support
  ppc/xive: Provide backlog support
  ppc/xive: Implement TM_PULL_OS_CTX special command
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-08-21 14:04:16 +01:00
commit e65472c7bc
47 changed files with 1419 additions and 613 deletions

View File

@ -1765,6 +1765,9 @@ extract_tbr (unsigned long insn,
/* An X_MASK with the RA and RB fields fixed. */ /* An X_MASK with the RA and RB fields fixed. */
#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK) #define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
/* An X form instruction with the RA field fixed. */
#define XRA(op, xop, ra) (X((op), (xop)) | (((ra) << 16) & XRA_MASK))
/* An XRARB_MASK, but with the L bit clear. */ /* An XRARB_MASK, but with the L bit clear. */
#define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16)) #define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16))
@ -4998,6 +5001,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } }, { "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } }, { "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "mffsl", XRA(63,583,12), XRARB_MASK, POWER9, { FRT } },
{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } }, { "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } },
{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } }, { "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } },

View File

@ -0,0 +1,76 @@
On PPC64 systems supporting Protected Execution Facility (PEF), system
memory can be placed in a secured region where only an "ultravisor"
running in firmware can provide to access it. pseries guests on such
systems can communicate with the ultravisor (via ultracalls) to switch to a
secure VM mode (SVM) where the guest's memory is relocated to this secured
region, making its memory inaccessible to normal processes/guests running on
the host.
The various ultracalls/hypercalls relating to SVM mode are currently
only documented internally, but are planned for direct inclusion into the
public OpenPOWER version of the PAPR specification (LoPAPR/LoPAR). An internal
ACR has been filed to reserve a hypercall number range specific to this
use-case to avoid any future conflicts with the internally-maintained PAPR
specification. This document summarizes some of these details as they relate
to QEMU.
== hypercalls needed by the ultravisor ==
Switching to SVM mode involves a number of hcalls issued by the ultravisor
to the hypervisor to orchestrate the movement of guest memory to secure
memory and various other aspects SVM mode. Numbers are assigned for these
hcalls within the reserved range 0xEF00-0xEF80. The below documents the
hcalls relevant to QEMU.
- H_TPM_COMM (0xef10)
For TPM_COMM_OP_EXECUTE operation:
Send a request to a TPM and receive a response, opening a new TPM session
if one has not already been opened.
For TPM_COMM_OP_CLOSE_SESSION operation:
Close the existing TPM session, if any.
Arguments:
r3 : H_TPM_COMM (0xef10)
r4 : TPM operation, one of:
TPM_COMM_OP_EXECUTE (0x1)
TPM_COMM_OP_CLOSE_SESSION (0x2)
r5 : in_buffer, guest physical address of buffer containing the request
- Caller may use the same address for both request and response
r6 : in_size, size of the in buffer
- Must be less than or equal to 4KB
r7 : out_buffer, guest physical address of buffer to store the response
- Caller may use the same address for both request and response
r8 : out_size, size of the out buffer
- Must be at least 4KB, as this is the maximum request/response size
supported by most TPM implementations, including the TPM Resource
Manager in the linux kernel.
Return values:
r3 : H_Success request processed successfully
H_PARAMETER invalid TPM operation
H_P2 in_buffer is invalid
H_P3 in_size is invalid
H_P4 out_buffer is invalid
H_P5 out_size is invalid
H_RESOURCE problem communicating with TPM
H_FUNCTION TPM access is not currently allowed/configured
r4 : For TPM_COMM_OP_EXECUTE, the size of the response will be stored here
upon success.
Use-case/notes:
SVM filesystems are encrypted using a symmetric key. This key is then
wrapped/encrypted using the public key of a trusted system which has the
private key stored in the system's TPM. An Ultravisor will use this
hcall to unwrap/unseal the symmetric key using the system's TPM device
or a TPM Resource Manager associated with the device.
The Ultravisor sets up a separate session key with the TPM in advance
during host system boot. All sensitive in and out values will be
encrypted using the session key. Though the hypervisor will see the 'in'
and 'out' buffers in raw form, any sensitive contents will generally be
encrypted using this session key.

View File

@ -2050,10 +2050,17 @@ static void machvirt_machine_init(void)
} }
type_init(machvirt_machine_init); type_init(machvirt_machine_init);
static void virt_machine_4_1_options(MachineClass *mc) static void virt_machine_4_2_options(MachineClass *mc)
{ {
} }
DEFINE_VIRT_MACHINE_AS_LATEST(4, 1) DEFINE_VIRT_MACHINE_AS_LATEST(4, 2)
static void virt_machine_4_1_options(MachineClass *mc)
{
virt_machine_4_2_options(mc);
compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
}
DEFINE_VIRT_MACHINE(4, 1)
static void virt_machine_4_0_options(MachineClass *mc) static void virt_machine_4_0_options(MachineClass *mc)
{ {

View File

@ -59,25 +59,19 @@ static int vty_getchars(SpaprVioDevice *sdev, uint8_t *buf, int max)
int n = 0; int n = 0;
while ((n < max) && (dev->out != dev->in)) { while ((n < max) && (dev->out != dev->in)) {
buf[n++] = dev->buf[dev->out++ % VTERM_BUFSIZE]; /*
* Long ago, PowerVM's vty implementation had a bug where it
/* PowerVM's vty implementation has a bug where it inserts a * inserted a \0 after every \r going to the guest. Existing
* \0 after every \r going to the guest. Existing guests have * guests have a workaround for this which removes every \0
* a workaround for this which removes every \0 immediately * immediately following a \r. To avoid triggering this
* following a \r, so here we make ourselves bug-for-bug * workaround, we stop before inserting a \0 if the preceding
* compatible, so that the guest won't drop a real \0-after-\r * character in the output buffer is a \r.
* that happens to occur in a binary stream. */ */
if (buf[n - 1] == '\r') { if (n > 0 && (buf[n - 1] == '\r') &&
if (n < max) { (dev->buf[dev->out % VTERM_BUFSIZE] == '\0')) {
buf[n++] = '\0'; break;
} else {
/* No room for the extra \0, roll back and try again
* next time */
dev->out--;
n--;
break;
}
} }
buf[n++] = dev->buf[dev->out++ % VTERM_BUFSIZE];
} }
qemu_chr_fe_accept_input(&dev->chardev); qemu_chr_fe_accept_input(&dev->chardev);

View File

@ -27,6 +27,9 @@
#include "hw/pci/pci.h" #include "hw/pci/pci.h"
#include "hw/mem/nvdimm.h" #include "hw/mem/nvdimm.h"
GlobalProperty hw_compat_4_1[] = {};
const size_t hw_compat_4_1_len = G_N_ELEMENTS(hw_compat_4_1);
GlobalProperty hw_compat_4_0[] = { GlobalProperty hw_compat_4_0[] = {
{ "VGA", "edid", "false" }, { "VGA", "edid", "false" },
{ "secondary-vga", "edid", "false" }, { "secondary-vga", "edid", "false" },

View File

@ -119,6 +119,9 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
/* Physical Address of PVH entry point read from kernel ELF NOTE */ /* Physical Address of PVH entry point read from kernel ELF NOTE */
static size_t pvh_start_addr; static size_t pvh_start_addr;
GlobalProperty pc_compat_4_1[] = {};
const size_t pc_compat_4_1_len = G_N_ELEMENTS(pc_compat_4_1);
GlobalProperty pc_compat_4_0[] = {}; GlobalProperty pc_compat_4_0[] = {};
const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0); const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0);
@ -2840,6 +2843,13 @@ static void pc_machine_reset(MachineState *machine)
} }
} }
static void pc_machine_wakeup(MachineState *machine)
{
cpu_synchronize_all_states();
pc_machine_reset(machine);
cpu_synchronize_all_post_reset();
}
static CpuInstanceProperties static CpuInstanceProperties
pc_cpu_index_to_props(MachineState *ms, unsigned cpu_index) pc_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
{ {
@ -2952,6 +2962,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->block_default_type = IF_IDE; mc->block_default_type = IF_IDE;
mc->max_cpus = 255; mc->max_cpus = 255;
mc->reset = pc_machine_reset; mc->reset = pc_machine_reset;
mc->wakeup = pc_machine_wakeup;
hc->pre_plug = pc_machine_device_pre_plug_cb; hc->pre_plug = pc_machine_device_pre_plug_cb;
hc->plug = pc_machine_device_plug_cb; hc->plug = pc_machine_device_plug_cb;
hc->unplug_request = pc_machine_device_unplug_request_cb; hc->unplug_request = pc_machine_device_unplug_request_cb;

View File

@ -432,7 +432,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
} }
static void pc_i440fx_4_1_machine_options(MachineClass *m) static void pc_i440fx_4_2_machine_options(MachineClass *m)
{ {
PCMachineClass *pcmc = PC_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_machine_options(m); pc_i440fx_machine_options(m);
@ -441,6 +441,18 @@ static void pc_i440fx_4_1_machine_options(MachineClass *m)
pcmc->default_cpu_version = 1; pcmc->default_cpu_version = 1;
} }
DEFINE_I440FX_MACHINE(v4_2, "pc-i440fx-4.2", NULL,
pc_i440fx_4_2_machine_options);
static void pc_i440fx_4_1_machine_options(MachineClass *m)
{
pc_i440fx_4_2_machine_options(m);
m->alias = NULL;
m->is_default = 0;
compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len);
compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len);
}
DEFINE_I440FX_MACHINE(v4_1, "pc-i440fx-4.1", NULL, DEFINE_I440FX_MACHINE(v4_1, "pc-i440fx-4.1", NULL,
pc_i440fx_4_1_machine_options); pc_i440fx_4_1_machine_options);

View File

@ -364,7 +364,7 @@ static void pc_q35_machine_options(MachineClass *m)
m->max_cpus = 288; m->max_cpus = 288;
} }
static void pc_q35_4_1_machine_options(MachineClass *m) static void pc_q35_4_2_machine_options(MachineClass *m)
{ {
PCMachineClass *pcmc = PC_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_options(m); pc_q35_machine_options(m);
@ -372,6 +372,17 @@ static void pc_q35_4_1_machine_options(MachineClass *m)
pcmc->default_cpu_version = 1; pcmc->default_cpu_version = 1;
} }
DEFINE_Q35_MACHINE(v4_2, "pc-q35-4.2", NULL,
pc_q35_4_2_machine_options);
static void pc_q35_4_1_machine_options(MachineClass *m)
{
pc_q35_4_2_machine_options(m);
m->alias = NULL;
compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len);
compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len);
}
DEFINE_Q35_MACHINE(v4_1, "pc-q35-4.1", NULL, DEFINE_Q35_MACHINE(v4_1, "pc-q35-4.1", NULL,
pc_q35_4_1_machine_options); pc_q35_4_1_machine_options);

View File

@ -1595,6 +1595,15 @@ void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
} }
xive_end_pic_print_info(&end, i, mon); xive_end_pic_print_info(&end, i, mon);
} }
monitor_printf(mon, "XIVE[%x] END Escalation %08x .. %08x\n", blk, 0,
nr_ends - 1);
for (i = 0; i < nr_ends; i++) {
if (xive_router_get_end(xrtr, blk, i, &end)) {
break;
}
xive_end_eas_pic_print_info(&end, i, mon);
}
} }
static void pnv_xive_reset(void *dev) static void pnv_xive_reset(void *dev)

View File

@ -146,7 +146,6 @@ static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
priority, qindex, qentries, qaddr_base, qgen); priority, qindex, qentries, qaddr_base, qgen);
xive_end_queue_pic_print_info(end, 6, mon); xive_end_queue_pic_print_info(end, 6, mon);
monitor_printf(mon, "]");
} }
void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
@ -537,7 +536,10 @@ bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi)
return false; return false;
} }
xive->eat[lisn].w |= cpu_to_be64(EAS_VALID); /*
* Set default values when allocating an IRQ number
*/
xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
if (lsi) { if (lsi) {
xive_source_irq_set_lsi(xsrc, lisn); xive_source_irq_set_lsi(xsrc, lisn);
} }

View File

@ -337,6 +337,17 @@ static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
xive_tctx_notify(tctx, TM_QW1_OS); xive_tctx_notify(tctx, TM_QW1_OS);
} }
static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset,
unsigned size)
{
uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw1w2;
qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0);
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
return qw1w2;
}
/* /*
* Define a mapping of "special" operations depending on the TIMA page * Define a mapping of "special" operations depending on the TIMA page
* offset and the size of the operation. * offset and the size of the operation.
@ -363,6 +374,8 @@ static const XiveTmOp xive_tm_operations[] = {
/* MMIOs above 2K : special operations with side effects */ /* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
@ -406,7 +419,7 @@ void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
if (offset & 0x800) { if (offset & 0x800) {
xto = xive_tm_find_op(offset, size, true); xto = xive_tm_find_op(offset, size, true);
if (!xto) { if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA" qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
"@%"HWADDR_PRIx"\n", offset); "@%"HWADDR_PRIx"\n", offset);
} else { } else {
xto->write_handler(tctx, offset, value, size); xto->write_handler(tctx, offset, value, size);
@ -1145,6 +1158,7 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
be32_to_cpu(qdata)); be32_to_cpu(qdata));
qindex = (qindex + 1) & (qentries - 1); qindex = (qindex + 1) & (qentries - 1);
} }
monitor_printf(mon, "]");
} }
void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
@ -1155,24 +1169,36 @@ void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
uint32_t qentries = 1 << (qsize + 10); uint32_t qentries = 1 << (qsize + 10);
uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
uint8_t pq;
if (!xive_end_is_valid(end)) { if (!xive_end_is_valid(end)) {
return; return;
} }
monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64 pq = xive_get_field32(END_W1_ESn, end->w1);
"% 6d/%5d ^%d", end_idx,
monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
end_idx,
pq & XIVE_ESB_VAL_P ? 'P' : '-',
pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
xive_end_is_valid(end) ? 'v' : '-', xive_end_is_valid(end) ? 'v' : '-',
xive_end_is_enqueue(end) ? 'q' : '-', xive_end_is_enqueue(end) ? 'q' : '-',
xive_end_is_notify(end) ? 'n' : '-', xive_end_is_notify(end) ? 'n' : '-',
xive_end_is_backlog(end) ? 'b' : '-', xive_end_is_backlog(end) ? 'b' : '-',
xive_end_is_escalate(end) ? 'e' : '-', xive_end_is_escalate(end) ? 'e' : '-',
priority, nvt, qaddr_base, qindex, qentries, qgen); xive_end_is_uncond_escalation(end) ? 'u' : '-',
xive_end_is_silent_escalation(end) ? 's' : '-',
priority, nvt_blk, nvt_idx);
xive_end_queue_pic_print_info(end, 6, mon); if (qaddr_base) {
monitor_printf(mon, "]\n"); monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
qaddr_base, qindex, qentries, qgen);
xive_end_queue_pic_print_info(end, 6, mon);
}
monitor_printf(mon, "\n");
} }
static void xive_end_enqueue(XiveEND *end, uint32_t data) static void xive_end_enqueue(XiveEND *end, uint32_t data)
@ -1200,6 +1226,29 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data)
end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
} }
void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
Monitor *mon)
{
XiveEAS *eas = (XiveEAS *) &end->w4;
uint8_t pq;
if (!xive_end_is_escalate(end)) {
return;
}
pq = xive_get_field32(END_W1_ESe, end->w1);
monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
end_idx,
pq & XIVE_ESB_VAL_P ? 'P' : '-',
pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
xive_eas_is_valid(eas) ? 'V' : ' ',
xive_eas_is_masked(eas) ? 'M' : ' ',
(uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
(uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
(uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
}
/* /*
* XIVE Router (aka. Virtualization Controller or IVRE) * XIVE Router (aka. Virtualization Controller or IVRE)
*/ */
@ -1398,46 +1447,43 @@ static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
* *
* The parameters represent what is sent on the PowerBus * The parameters represent what is sent on the PowerBus
*/ */
static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx, uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority, bool cam_ignore, uint8_t priority,
uint32_t logic_serv) uint32_t logic_serv)
{ {
XiveNVT nvt;
XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
bool found; bool found;
/* NVT cache lookup */
if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
nvt_blk, nvt_idx);
return;
}
if (!xive_nvt_is_valid(&nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
nvt_blk, nvt_idx);
return;
}
found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
priority, logic_serv, &match); priority, logic_serv, &match);
if (found) { if (found) {
ipb_update(&match.tctx->regs[match.ring], priority); ipb_update(&match.tctx->regs[match.ring], priority);
xive_tctx_notify(match.tctx, match.ring); xive_tctx_notify(match.tctx, match.ring);
return;
} }
/* Record the IPB in the associated NVT structure */ return found;
ipb_update((uint8_t *) &nvt.w4, priority); }
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
/* /*
* If no matching NVT is dispatched on a HW thread : * Notification using the END ESe/ESn bit (Event State Buffer for
* - update the NVT structure if backlog is activated * escalation and notification). Profide futher coalescing in the
* - escalate (ESe PQ bits and EAS in w4-5) if escalation is * Router.
* activated */
*/ static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
uint32_t end_idx, XiveEND *end,
uint32_t end_esmask)
{
uint8_t pq = xive_get_field32(end_esmask, end->w1);
bool notify = xive_esb_trigger(&pq);
if (pq != xive_get_field32(end_esmask, end->w1)) {
end->w1 = xive_set_field32(end_esmask, end->w1, pq);
xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
}
/* ESe/n[Q]=1 : end of notification */
return notify;
} }
/* /*
@ -1451,6 +1497,10 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
XiveEND end; XiveEND end;
uint8_t priority; uint8_t priority;
uint8_t format; uint8_t format;
uint8_t nvt_blk;
uint32_t nvt_idx;
XiveNVT nvt;
bool found;
/* END cache lookup */ /* END cache lookup */
if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
@ -1471,6 +1521,13 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
} }
/*
* When the END is silent, we skip the notification part.
*/
if (xive_end_is_silent_escalation(&end)) {
goto do_escalation;
}
/* /*
* The W7 format depends on the F bit in W6. It defines the type * The W7 format depends on the F bit in W6. It defines the type
* of the notification : * of the notification :
@ -1492,16 +1549,9 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
* even futher coalescing in the Router * even futher coalescing in the Router
*/ */
if (!xive_end_is_notify(&end)) { if (!xive_end_is_notify(&end)) {
uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
bool notify = xive_esb_trigger(&pq);
if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
}
/* ESn[Q]=1 : end of notification */ /* ESn[Q]=1 : end of notification */
if (!notify) { if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
&end, END_W1_ESn)) {
return; return;
} }
} }
@ -1509,14 +1559,82 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
/* /*
* Follows IVPE notification * Follows IVPE notification
*/ */
xive_presenter_notify(xrtr, format, nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
xive_get_field32(END_W6_NVT_BLOCK, end.w6), nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
xive_get_field32(END_W6_NVT_INDEX, end.w6),
/* NVT cache lookup */
if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
nvt_blk, nvt_idx);
return;
}
if (!xive_nvt_is_valid(&nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
nvt_blk, nvt_idx);
return;
}
found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx,
xive_get_field32(END_W7_F0_IGNORE, end.w7), xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority, priority,
xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
/* TODO: Auto EOI. */ /* TODO: Auto EOI. */
if (found) {
return;
}
/*
* If no matching NVT is dispatched on a HW thread :
* - specific VP: update the NVT structure if backlog is activated
* - logical server : forward request to IVPE (not supported)
*/
if (xive_end_is_backlog(&end)) {
if (format == 1) {
qemu_log_mask(LOG_GUEST_ERROR,
"XIVE: END %x/%x invalid config: F1 & backlog\n",
end_blk, end_idx);
return;
}
/* Record the IPB in the associated NVT structure */
ipb_update((uint8_t *) &nvt.w4, priority);
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
/*
* On HW, follows a "Broadcast Backlog" to IVPEs
*/
}
do_escalation:
/*
* If activated, escalate notification using the ESe PQ bits and
* the EAS in w4-5
*/
if (!xive_end_is_escalate(&end)) {
return;
}
/*
* Check the END ESe (Event State Buffer for escalation) for even
* futher coalescing in the Router
*/
if (!xive_end_is_uncond_escalation(&end)) {
/* ESe[Q]=1 : end of notification */
if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
&end, END_W1_ESe)) {
return;
}
}
/*
* The END trigger becomes an Escalation trigger
*/
xive_router_end_notify(xrtr,
xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
xive_get_field32(END_W5_ESC_END_DATA, end.w5));
} }
void xive_router_notify(XiveNotifier *xn, uint32_t lisn) void xive_router_notify(XiveNotifier *xn, uint32_t lisn)

View File

@ -5,6 +5,7 @@ obj-$(CONFIG_PSERIES) += spapr.o spapr_caps.o spapr_vio.o spapr_events.o
obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o
obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o spapr_irq.o obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o spapr_irq.o
obj-$(CONFIG_PSERIES) += spapr_tpm_proxy.o
obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o
# IBM PowerNV # IBM PowerNV
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o

View File

@ -1011,6 +1011,8 @@ static void timebase_save(PPCTimebase *tb)
* there is no need to update it from KVM here * there is no need to update it from KVM here
*/ */
tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
tb->runstate_paused = runstate_check(RUN_STATE_PAUSED);
} }
static void timebase_load(PPCTimebase *tb) static void timebase_load(PPCTimebase *tb)
@ -1054,9 +1056,9 @@ void cpu_ppc_clock_vm_state_change(void *opaque, int running,
} }
/* /*
* When migrating, read the clock just before migration, * When migrating a running guest, read the clock just
* so that the guest clock counts during the events * before migration, so that the guest clock counts
* between: * during the events between:
* *
* * vm_stop() * * vm_stop()
* * * *
@ -1071,7 +1073,10 @@ static int timebase_pre_save(void *opaque)
{ {
PPCTimebase *tb = opaque; PPCTimebase *tb = opaque;
timebase_save(tb); /* guest_timebase won't be overridden in case of paused guest */
if (!tb->runstate_paused) {
timebase_save(tb);
}
return 0; return 0;
} }

View File

@ -79,6 +79,7 @@
#include "qemu/cutils.h" #include "qemu/cutils.h"
#include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/spapr_cpu_core.h"
#include "hw/mem/memory-device.h" #include "hw/mem/memory-device.h"
#include "hw/ppc/spapr_tpm_proxy.h"
#include <libfdt.h> #include <libfdt.h>
@ -1070,6 +1071,7 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
add_str(hypertas, "hcall-tce"); add_str(hypertas, "hcall-tce");
add_str(hypertas, "hcall-vio"); add_str(hypertas, "hcall-vio");
add_str(hypertas, "hcall-splpar"); add_str(hypertas, "hcall-splpar");
add_str(hypertas, "hcall-join");
add_str(hypertas, "hcall-bulk"); add_str(hypertas, "hcall-bulk");
add_str(hypertas, "hcall-set-mode"); add_str(hypertas, "hcall-set-mode");
add_str(hypertas, "hcall-sprg0"); add_str(hypertas, "hcall-sprg0");
@ -1753,10 +1755,6 @@ static void spapr_machine_reset(MachineState *machine)
ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal); ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal);
} }
if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
spapr_irq_msi_reset(spapr);
}
/* /*
* This is fixing some of the default configuration of the XIVE * This is fixing some of the default configuration of the XIVE
* devices. To be called after the reset of the machine devices. * devices. To be called after the reset of the machine devices.
@ -3081,6 +3079,13 @@ static void spapr_machine_init(MachineState *machine)
qemu_register_boot_set(spapr_boot_set, spapr); qemu_register_boot_set(spapr_boot_set, spapr);
/*
* Nothing needs to be done to resume a suspended guest because
* suspending does not change the machine state, so no need for
* a ->wakeup method.
*/
qemu_register_wakeup_support();
if (kvm_enabled()) { if (kvm_enabled()) {
/* to stop and start vmclock */ /* to stop and start vmclock */
qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
@ -4035,6 +4040,29 @@ static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
} }
} }
static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
if (spapr->tpm_proxy != NULL) {
error_setg(errp, "Only one TPM proxy can be specified for this machine");
return;
}
spapr->tpm_proxy = tpm_proxy;
}
static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
object_property_set_bool(OBJECT(dev), false, "realized", NULL);
object_unparent(OBJECT(dev));
spapr->tpm_proxy = NULL;
}
static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp) DeviceState *dev, Error **errp)
{ {
@ -4044,6 +4072,8 @@ static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
spapr_core_plug(hotplug_dev, dev, errp); spapr_core_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
spapr_phb_plug(hotplug_dev, dev, errp); spapr_phb_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
spapr_tpm_proxy_plug(hotplug_dev, dev, errp);
} }
} }
@ -4056,6 +4086,8 @@ static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
spapr_core_unplug(hotplug_dev, dev); spapr_core_unplug(hotplug_dev, dev);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
spapr_phb_unplug(hotplug_dev, dev); spapr_phb_unplug(hotplug_dev, dev);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
spapr_tpm_proxy_unplug(hotplug_dev, dev);
} }
} }
@ -4090,6 +4122,8 @@ static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
return; return;
} }
spapr_phb_unplug_request(hotplug_dev, dev, errp); spapr_phb_unplug_request(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
spapr_tpm_proxy_unplug(hotplug_dev, dev);
} }
} }
@ -4110,7 +4144,8 @@ static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
{ {
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
return HOTPLUG_HANDLER(machine); return HOTPLUG_HANDLER(machine);
} }
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
@ -4306,6 +4341,53 @@ PowerPCCPU *spapr_find_cpu(int vcpu_id)
return NULL; return NULL;
} }
static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
{
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
/* These are only called by TCG, KVM maintains dispatch state */
spapr_cpu->prod = false;
if (spapr_cpu->vpa_addr) {
CPUState *cs = CPU(cpu);
uint32_t dispatch;
dispatch = ldl_be_phys(cs->as,
spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
dispatch++;
if ((dispatch & 1) != 0) {
qemu_log_mask(LOG_GUEST_ERROR,
"VPA: incorrect dispatch counter value for "
"dispatched partition %u, correcting.\n", dispatch);
dispatch++;
}
stl_be_phys(cs->as,
spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
}
}
static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
{
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
if (spapr_cpu->vpa_addr) {
CPUState *cs = CPU(cpu);
uint32_t dispatch;
dispatch = ldl_be_phys(cs->as,
spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
dispatch++;
if ((dispatch & 1) != 1) {
qemu_log_mask(LOG_GUEST_ERROR,
"VPA: incorrect dispatch counter value for "
"preempted partition %u, correcting.\n", dispatch);
dispatch++;
}
stl_be_phys(cs->as,
spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
}
}
static void spapr_machine_class_init(ObjectClass *oc, void *data) static void spapr_machine_class_init(ObjectClass *oc, void *data)
{ {
MachineClass *mc = MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc);
@ -4362,6 +4444,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
vhc->hpte_set_r = spapr_hpte_set_r; vhc->hpte_set_r = spapr_hpte_set_r;
vhc->get_pate = spapr_get_pate; vhc->get_pate = spapr_get_pate;
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
vhc->cpu_exec_enter = spapr_cpu_exec_enter;
vhc->cpu_exec_exit = spapr_cpu_exec_exit;
xic->ics_get = spapr_ics_get; xic->ics_get = spapr_ics_get;
xic->ics_resend = spapr_ics_resend; xic->ics_resend = spapr_ics_resend;
xic->icp_get = spapr_icp_get; xic->icp_get = spapr_icp_get;
@ -4431,14 +4515,31 @@ static const TypeInfo spapr_machine_info = {
type_init(spapr_machine_register_##suffix) type_init(spapr_machine_register_##suffix)
/* /*
* pseries-4.1 * pseries-4.2
*/ */
static void spapr_machine_4_1_class_options(MachineClass *mc) static void spapr_machine_4_2_class_options(MachineClass *mc)
{ {
/* Defaults for the latest behaviour inherited from the base class */ /* Defaults for the latest behaviour inherited from the base class */
} }
DEFINE_SPAPR_MACHINE(4_1, "4.1", true); DEFINE_SPAPR_MACHINE(4_2, "4.2", true);
/*
* pseries-4.1
*/
static void spapr_machine_4_1_class_options(MachineClass *mc)
{
static GlobalProperty compat[] = {
/* Only allow 4kiB and 64kiB IOMMU pagesizes */
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
};
spapr_machine_4_2_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
}
DEFINE_SPAPR_MACHINE(4_1, "4.1", false);
/* /*
* pseries-4.0 * pseries-4.0

View File

@ -195,10 +195,12 @@ static void cap_htm_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
} }
if (tcg_enabled()) { if (tcg_enabled()) {
error_setg(errp, error_setg(errp,
"No Transactional Memory support in TCG, try cap-htm=off"); "No Transactional Memory support in TCG,"
" try appending -machine cap-htm=off");
} else if (kvm_enabled() && !kvmppc_has_cap_htm()) { } else if (kvm_enabled() && !kvmppc_has_cap_htm()) {
error_setg(errp, error_setg(errp,
"KVM implementation does not support Transactional Memory, try cap-htm=off" "KVM implementation does not support Transactional Memory,"
" try appending -machine cap-htm=off"
); );
} }
} }
@ -216,7 +218,8 @@ static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
* rid of anything that doesn't do VMX */ * rid of anything that doesn't do VMX */
g_assert(env->insns_flags & PPC_ALTIVEC); g_assert(env->insns_flags & PPC_ALTIVEC);
if (!(env->insns_flags2 & PPC2_VSX)) { if (!(env->insns_flags2 & PPC2_VSX)) {
error_setg(errp, "VSX support not available, try cap-vsx=off"); error_setg(errp, "VSX support not available,"
" try appending -machine cap-vsx=off");
} }
} }
@ -230,7 +233,8 @@ static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
return; return;
} }
if (!(env->insns_flags2 & PPC2_DFP)) { if (!(env->insns_flags2 & PPC2_DFP)) {
error_setg(errp, "DFP support not available, try cap-dfp=off"); error_setg(errp, "DFP support not available,"
" try appending -machine cap-dfp=off");
} }
} }
@ -254,7 +258,8 @@ static void cap_safe_cache_apply(SpaprMachineState *spapr, uint8_t val,
cap_cfpc_possible.vals[val]); cap_cfpc_possible.vals[val]);
} else if (kvm_enabled() && (val > kvm_val)) { } else if (kvm_enabled() && (val > kvm_val)) {
error_setg(errp, error_setg(errp,
"Requested safe cache capability level not supported by kvm, try cap-cfpc=%s", "Requested safe cache capability level not supported by kvm,"
" try appending -machine cap-cfpc=%s",
cap_cfpc_possible.vals[kvm_val]); cap_cfpc_possible.vals[kvm_val]);
} }
@ -282,7 +287,8 @@ static void cap_safe_bounds_check_apply(SpaprMachineState *spapr, uint8_t val,
cap_sbbc_possible.vals[val]); cap_sbbc_possible.vals[val]);
} else if (kvm_enabled() && (val > kvm_val)) { } else if (kvm_enabled() && (val > kvm_val)) {
error_setg(errp, error_setg(errp,
"Requested safe bounds check capability level not supported by kvm, try cap-sbbc=%s", "Requested safe bounds check capability level not supported by kvm,"
" try appending -machine cap-sbbc=%s",
cap_sbbc_possible.vals[kvm_val]); cap_sbbc_possible.vals[kvm_val]);
} }
@ -313,7 +319,8 @@ static void cap_safe_indirect_branch_apply(SpaprMachineState *spapr,
cap_ibs_possible.vals[val]); cap_ibs_possible.vals[val]);
} else if (kvm_enabled() && (val > kvm_val)) { } else if (kvm_enabled() && (val > kvm_val)) {
error_setg(errp, error_setg(errp,
"Requested safe indirect branch capability level not supported by kvm, try cap-ibs=%s", "Requested safe indirect branch capability level not supported by kvm,"
" try appending -machine cap-ibs=%s",
cap_ibs_possible.vals[kvm_val]); cap_ibs_possible.vals[kvm_val]);
} }
@ -402,11 +409,13 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
if (tcg_enabled()) { if (tcg_enabled()) {
error_setg(errp, error_setg(errp,
"No Nested KVM-HV support in tcg, try cap-nested-hv=off"); "No Nested KVM-HV support in tcg,"
" try appending -machine cap-nested-hv=off");
} else if (kvm_enabled()) { } else if (kvm_enabled()) {
if (!kvmppc_has_cap_nested_kvm_hv()) { if (!kvmppc_has_cap_nested_kvm_hv()) {
error_setg(errp, error_setg(errp,
"KVM implementation does not support Nested KVM-HV, try cap-nested-hv=off"); "KVM implementation does not support Nested KVM-HV,"
" try appending -machine cap-nested-hv=off");
} else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) { } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) {
error_setg(errp, error_setg(errp,
"Error enabling cap-nested-hv with KVM, try cap-nested-hv=off"); "Error enabling cap-nested-hv with KVM, try cap-nested-hv=off");
@ -436,10 +445,12 @@ static void cap_large_decr_apply(SpaprMachineState *spapr,
if (!kvm_nr_bits) { if (!kvm_nr_bits) {
error_setg(errp, error_setg(errp,
"No large decrementer support, try cap-large-decr=off"); "No large decrementer support,"
" try appending -machine cap-large-decr=off");
} else if (pcc->lrg_decr_bits != kvm_nr_bits) { } else if (pcc->lrg_decr_bits != kvm_nr_bits) {
error_setg(errp, error_setg(errp,
"KVM large decrementer size (%d) differs to model (%d), try -cap-large-decr=off", "KVM large decrementer size (%d) differs to model (%d),"
" try appending -machine cap-large-decr=off",
kvm_nr_bits, pcc->lrg_decr_bits); kvm_nr_bits, pcc->lrg_decr_bits);
} }
} }
@ -455,7 +466,8 @@ static void cap_large_decr_cpu_apply(SpaprMachineState *spapr,
if (kvm_enabled()) { if (kvm_enabled()) {
if (kvmppc_enable_cap_large_decr(cpu, val)) { if (kvmppc_enable_cap_large_decr(cpu, val)) {
error_setg(errp, error_setg(errp,
"No large decrementer support, try cap-large-decr=off"); "No large decrementer support,"
" try appending -machine cap-large-decr=off");
} }
} }
@ -475,10 +487,12 @@ static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val,
if (tcg_enabled() && val) { if (tcg_enabled() && val) {
/* TODO - for now only allow broken for TCG */ /* TODO - for now only allow broken for TCG */
error_setg(errp, error_setg(errp,
"Requested count cache flush assist capability level not supported by tcg, try cap-ccf-assist=off"); "Requested count cache flush assist capability level not supported by tcg,"
" try appending -machine cap-ccf-assist=off");
} else if (kvm_enabled() && (val > kvm_val)) { } else if (kvm_enabled() && (val > kvm_val)) {
error_setg(errp, error_setg(errp,
"Requested count cache flush assist capability level not supported by kvm, try cap-ccf-assist=off"); "Requested count cache flush assist capability level not supported by kvm,"
" try appending -machine cap-ccf-assist=off");
} }
} }
@ -779,7 +793,7 @@ void spapr_caps_add_properties(SpaprMachineClass *smc, Error **errp)
for (i = 0; i < ARRAY_SIZE(capability_table); i++) { for (i = 0; i < ARRAY_SIZE(capability_table); i++) {
SpaprCapabilityInfo *cap = &capability_table[i]; SpaprCapabilityInfo *cap = &capability_table[i];
const char *name = g_strdup_printf("cap-%s", cap->name); char *name = g_strdup_printf("cap-%s", cap->name);
char *desc; char *desc;
object_class_property_add(klass, name, cap->type, object_class_property_add(klass, name, cap->type,
@ -787,11 +801,13 @@ void spapr_caps_add_properties(SpaprMachineClass *smc, Error **errp)
NULL, cap, &local_err); NULL, cap, &local_err);
if (local_err) { if (local_err) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
g_free(name);
return; return;
} }
desc = g_strdup_printf("%s", cap->description); desc = g_strdup_printf("%s", cap->description);
object_class_property_set_description(klass, name, desc, &local_err); object_class_property_set_description(klass, name, desc, &local_err);
g_free(name);
g_free(desc); g_free(desc);
if (local_err) { if (local_err) {
error_propagate(errp, local_err); error_propagate(errp, local_err);

View File

@ -227,7 +227,7 @@ static uint32_t drc_set_unusable(SpaprDrc *drc)
return RTAS_OUT_SUCCESS; return RTAS_OUT_SUCCESS;
} }
static const char *spapr_drc_name(SpaprDrc *drc) static char *spapr_drc_name(SpaprDrc *drc)
{ {
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
@ -828,6 +828,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
Object *obj; Object *obj;
SpaprDrc *drc; SpaprDrc *drc;
SpaprDrcClass *drck; SpaprDrcClass *drck;
char *drc_name = NULL;
uint32_t drc_index, drc_power_domain; uint32_t drc_index, drc_power_domain;
if (!strstart(prop->type, "link<", NULL)) { if (!strstart(prop->type, "link<", NULL)) {
@ -857,8 +858,10 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
g_array_append_val(drc_power_domains, drc_power_domain); g_array_append_val(drc_power_domains, drc_power_domain);
/* ibm,drc-names */ /* ibm,drc-names */
drc_names = g_string_append(drc_names, spapr_drc_name(drc)); drc_name = spapr_drc_name(drc);
drc_names = g_string_append(drc_names, drc_name);
drc_names = g_string_insert_len(drc_names, -1, "\0", 1); drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
g_free(drc_name);
/* ibm,drc-types */ /* ibm,drc-types */
drc_types = g_string_append(drc_types, drck->typename); drc_types = g_string_append(drc_types, drck->typename);

View File

@ -875,11 +875,6 @@ unmap_out:
#define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
#define VPA_MIN_SIZE 640
#define VPA_SIZE_OFFSET 0x4
#define VPA_SHARED_PROC_OFFSET 0x9
#define VPA_SHARED_PROC_VAL 0x2
static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa) static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa)
{ {
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
@ -1056,14 +1051,155 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
{ {
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
env->msr |= (1ULL << MSR_EE); env->msr |= (1ULL << MSR_EE);
hreg_compute_hflags(env); hreg_compute_hflags(env);
if (spapr_cpu->prod) {
spapr_cpu->prod = false;
return H_SUCCESS;
}
if (!cpu_has_work(cs)) { if (!cpu_has_work(cs)) {
cs->halted = 1; cs->halted = 1;
cs->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cs->exit_request = 1; cs->exit_request = 1;
} }
return H_SUCCESS;
}
/*
* Confer to self, aka join. Cede could use the same pattern as well, if
* EXCP_HLT can be changed to ECXP_HALTED.
*/
static target_ulong h_confer_self(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
if (spapr_cpu->prod) {
spapr_cpu->prod = false;
return H_SUCCESS;
}
cs->halted = 1;
cs->exception_index = EXCP_HALTED;
cs->exit_request = 1;
return H_SUCCESS;
}
static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
CPUState *cs;
bool last_unjoined = true;
if (env->msr & (1ULL << MSR_EE)) {
return H_BAD_MODE;
}
/*
* Must not join the last CPU running. Interestingly, no such restriction
* for H_CONFER-to-self, but that is probably not intended to be used
* when H_JOIN is available.
*/
CPU_FOREACH(cs) {
PowerPCCPU *c = POWERPC_CPU(cs);
CPUPPCState *e = &c->env;
if (c == cpu) {
continue;
}
/* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */
if (!cs->halted || (e->msr & (1ULL << MSR_EE))) {
last_unjoined = false;
break;
}
}
if (last_unjoined) {
return H_CONTINUE;
}
return h_confer_self(cpu);
}
static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_long target = args[0];
uint32_t dispatch = args[1];
CPUState *cs = CPU(cpu);
SpaprCpuState *spapr_cpu;
/*
* -1 means confer to all other CPUs without dispatch counter check,
* otherwise it's a targeted confer.
*/
if (target != -1) {
PowerPCCPU *target_cpu = spapr_find_cpu(target);
uint32_t target_dispatch;
if (!target_cpu) {
return H_PARAMETER;
}
/*
* target == self is a special case, we wait until prodded, without
* dispatch counter check.
*/
if (cpu == target_cpu) {
return h_confer_self(cpu);
}
spapr_cpu = spapr_cpu_state(target_cpu);
if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) {
return H_SUCCESS;
}
target_dispatch = ldl_be_phys(cs->as,
spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
if (target_dispatch != dispatch) {
return H_SUCCESS;
}
/*
* The targeted confer does not do anything special beyond yielding
* the current vCPU, but even this should be better than nothing.
* At least for single-threaded tcg, it gives the target a chance to
* run before we run again. Multi-threaded tcg does not really do
* anything with EXCP_YIELD yet.
*/
}
cs->exception_index = EXCP_YIELD;
cs->exit_request = 1;
cpu_loop_exit(cs);
return H_SUCCESS;
}
static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_long target = args[0];
PowerPCCPU *tcpu;
CPUState *cs;
SpaprCpuState *spapr_cpu;
tcpu = spapr_find_cpu(target);
cs = CPU(tcpu);
if (!cs) {
return H_PARAMETER;
}
spapr_cpu = spapr_cpu_state(tcpu);
spapr_cpu->prod = true;
cs->halted = 0;
qemu_cpu_kick(cs);
return H_SUCCESS; return H_SUCCESS;
} }
@ -1613,6 +1749,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
ov5_updates = spapr_ovec_new(); ov5_updates = spapr_ovec_new();
spapr->cas_reboot = spapr_ovec_diff(ov5_updates, spapr->cas_reboot = spapr_ovec_diff(ov5_updates,
ov5_cas_old, spapr->ov5_cas); ov5_cas_old, spapr->ov5_cas);
spapr_ovec_cleanup(ov5_cas_old);
/* Now that processing is finished, set the radix/hash bit for the /* Now that processing is finished, set the radix/hash bit for the
* guest if it requested a valid mode; otherwise terminate the boot. */ * guest if it requested a valid mode; otherwise terminate the boot. */
if (guest_radix) { if (guest_radix) {
@ -1630,6 +1767,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
} }
spapr->cas_legacy_guest_workaround = !spapr_ovec_test(ov1_guest, spapr->cas_legacy_guest_workaround = !spapr_ovec_test(ov1_guest,
OV1_PPC_3_00); OV1_PPC_3_00);
spapr_ovec_cleanup(ov1_guest);
if (!spapr->cas_reboot) { if (!spapr->cas_reboot) {
/* If spapr_machine_reset() did not set up a HPT but one is necessary /* If spapr_machine_reset() did not set up a HPT but one is necessary
* (because the guest isn't going to use radix) then set it up here. */ * (because the guest isn't going to use radix) then set it up here. */
@ -1825,6 +1963,7 @@ static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr,
static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1];
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
{ {
@ -1834,6 +1973,11 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
assert((opcode & 0x3) == 0); assert((opcode & 0x3) == 0);
slot = &papr_hypercall_table[opcode / 4]; slot = &papr_hypercall_table[opcode / 4];
} else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) {
/* we only have SVM-related hcall numbers assigned in multiples of 4 */
assert((opcode & 0x3) == 0);
slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
} else { } else {
assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
@ -1853,6 +1997,13 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
&& ((opcode & 0x3) == 0)) { && ((opcode & 0x3) == 0)) {
spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
if (fn) {
return fn(cpu, spapr, opcode, args);
}
} else if ((opcode >= SVM_HCALL_BASE) &&
(opcode <= SVM_HCALL_MAX)) {
spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
if (fn) { if (fn) {
return fn(cpu, spapr, opcode, args); return fn(cpu, spapr, opcode, args);
} }
@ -1888,6 +2039,12 @@ static void hypercall_register_types(void)
/* hcall-splpar */ /* hcall-splpar */
spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
spapr_register_hypercall(H_CEDE, h_cede); spapr_register_hypercall(H_CEDE, h_cede);
spapr_register_hypercall(H_CONFER, h_confer);
spapr_register_hypercall(H_PROD, h_prod);
/* hcall-join */
spapr_register_hypercall(H_JOIN, h_join);
spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset); spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
/* processor register resource access h-calls */ /* processor register resource access h-calls */

View File

@ -136,7 +136,7 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu,
ret.addr_mask = ~page_mask; ret.addr_mask = ~page_mask;
ret.perm = spapr_tce_iommu_access_flags(tce); ret.perm = spapr_tce_iommu_access_flags(tce);
} }
trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, trace_spapr_iommu_xlate(tcet->liobn, addr, ret.translated_addr, ret.perm,
ret.addr_mask); ret.addr_mask);
return ret; return ret;

View File

@ -59,11 +59,6 @@ void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
} }
void spapr_irq_msi_reset(SpaprMachineState *spapr)
{
bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
}
static void spapr_irq_init_kvm(SpaprMachineState *spapr, static void spapr_irq_init_kvm(SpaprMachineState *spapr,
SpaprIrq *irq, Error **errp) SpaprIrq *irq, Error **errp)
{ {
@ -731,6 +726,8 @@ int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
{ {
assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr));
if (spapr->irq->reset) { if (spapr->irq->reset) {
spapr->irq->reset(spapr, errp); spapr->irq->reset(spapr, errp);
} }

View File

@ -338,10 +338,6 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
return; return;
} }
if (!smc->legacy_irq_allocation) {
spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
}
spapr_irq_free(spapr, msi->first_irq, msi->num);
if (msi_present(pdev)) { if (msi_present(pdev)) {
spapr_msi_setmsg(pdev, 0, false, 0, 0); spapr_msi_setmsg(pdev, 0, false, 0, 0);
} }
@ -411,10 +407,6 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
/* Release previous MSIs */ /* Release previous MSIs */
if (msi) { if (msi) {
if (!smc->legacy_irq_allocation) {
spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
}
spapr_irq_free(spapr, msi->first_irq, msi->num);
g_hash_table_remove(phb->msi, &config_addr); g_hash_table_remove(phb->msi, &config_addr);
} }
@ -1808,6 +1800,19 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp)
memory_region_del_subregion(get_system_memory(), &sphb->mem32window); memory_region_del_subregion(get_system_memory(), &sphb->mem32window);
} }
static void spapr_phb_destroy_msi(gpointer opaque)
{
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
spapr_pci_msi *msi = opaque;
if (!smc->legacy_irq_allocation) {
spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
}
spapr_irq_free(spapr, msi->first_irq, msi->num);
g_free(msi);
}
static void spapr_phb_realize(DeviceState *dev, Error **errp) static void spapr_phb_realize(DeviceState *dev, Error **errp)
{ {
/* We don't use SPAPR_MACHINE() in order to exit gracefully if the user /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user
@ -2019,7 +2024,8 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
spapr_tce_get_iommu(tcet)); spapr_tce_get_iommu(tcet));
} }
sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free,
spapr_phb_destroy_msi);
return; return;
unrealize: unrealize:
@ -2074,6 +2080,8 @@ static void spapr_phb_reset(DeviceState *qdev)
if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) { if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) {
spapr_phb_vfio_reset(qdev); spapr_phb_vfio_reset(qdev);
} }
g_hash_table_remove_all(sphb->msi);
} }
static Property spapr_phb_properties[] = { static Property spapr_phb_properties[] = {
@ -2093,7 +2101,8 @@ static Property spapr_phb_properties[] = {
0x800000000000000ULL), 0x800000000000000ULL),
DEFINE_PROP_BOOL("ddw", SpaprPhbState, ddw_enabled, true), DEFINE_PROP_BOOL("ddw", SpaprPhbState, ddw_enabled, true),
DEFINE_PROP_UINT64("pgsz", SpaprPhbState, page_size_mask, DEFINE_PROP_UINT64("pgsz", SpaprPhbState, page_size_mask,
(1ULL << 12) | (1ULL << 16)), (1ULL << 12) | (1ULL << 16)
| (1ULL << 21) | (1ULL << 24)),
DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1), DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1),
DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState, DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState,
pre_2_8_migration, false), pre_2_8_migration, false),

View File

@ -217,6 +217,36 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr,
qemu_cpu_kick(cs); qemu_cpu_kick(cs);
} }
static void rtas_ibm_suspend_me(PowerPCCPU *cpu, SpaprMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
CPUState *cs;
if (nargs != 0 || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
CPU_FOREACH(cs) {
PowerPCCPU *c = POWERPC_CPU(cs);
CPUPPCState *e = &c->env;
if (c == cpu) {
continue;
}
/* See h_join */
if (!cs->halted || (e->msr & (1ULL << MSR_EE))) {
rtas_st(rets, 0, H_MULTI_THREADS_ACTIVE);
return;
}
}
qemu_system_suspend_request();
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static inline int sysparm_st(target_ulong addr, target_ulong len, static inline int sysparm_st(target_ulong addr, target_ulong len,
const void *val, uint16_t vallen) const void *val, uint16_t vallen)
{ {
@ -484,6 +514,8 @@ static void core_rtas_register_types(void)
rtas_query_cpu_stopped_state); rtas_query_cpu_stopped_state);
spapr_rtas_register(RTAS_START_CPU, "start-cpu", rtas_start_cpu); spapr_rtas_register(RTAS_START_CPU, "start-cpu", rtas_start_cpu);
spapr_rtas_register(RTAS_STOP_SELF, "stop-self", rtas_stop_self); spapr_rtas_register(RTAS_STOP_SELF, "stop-self", rtas_stop_self);
spapr_rtas_register(RTAS_IBM_SUSPEND_ME, "ibm,suspend-me",
rtas_ibm_suspend_me);
spapr_rtas_register(RTAS_IBM_GET_SYSTEM_PARAMETER, spapr_rtas_register(RTAS_IBM_GET_SYSTEM_PARAMETER,
"ibm,get-system-parameter", "ibm,get-system-parameter",
rtas_ibm_get_system_parameter); rtas_ibm_get_system_parameter);

178
hw/ppc/spapr_tpm_proxy.c Normal file
View File

@ -0,0 +1,178 @@
/*
* SPAPR TPM Proxy/Hypercall
*
* Copyright IBM Corp. 2019
*
* Authors:
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/reset.h"
#include "cpu.h"
#include "hw/ppc/spapr.h"
#include "hw/qdev-properties.h"
#include "trace.h"
#define TPM_SPAPR_BUFSIZE 4096
enum {
TPM_COMM_OP_EXECUTE = 1,
TPM_COMM_OP_CLOSE_SESSION = 2,
};
static void spapr_tpm_proxy_reset(void *opaque)
{
SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(opaque);
if (tpm_proxy->host_fd != -1) {
close(tpm_proxy->host_fd);
tpm_proxy->host_fd = -1;
}
}
static ssize_t tpm_execute(SpaprTpmProxy *tpm_proxy, target_ulong *args)
{
uint64_t data_in = ppc64_phys_to_real(args[1]);
target_ulong data_in_size = args[2];
uint64_t data_out = ppc64_phys_to_real(args[3]);
target_ulong data_out_size = args[4];
uint8_t buf_in[TPM_SPAPR_BUFSIZE];
uint8_t buf_out[TPM_SPAPR_BUFSIZE];
ssize_t ret;
trace_spapr_tpm_execute(data_in, data_in_size, data_out, data_out_size);
if (data_in_size > TPM_SPAPR_BUFSIZE) {
error_report("invalid TPM input buffer size: " TARGET_FMT_lu,
data_in_size);
return H_P3;
}
if (data_out_size < TPM_SPAPR_BUFSIZE) {
error_report("invalid TPM output buffer size: " TARGET_FMT_lu,
data_out_size);
return H_P5;
}
if (tpm_proxy->host_fd == -1) {
tpm_proxy->host_fd = open(tpm_proxy->host_path, O_RDWR);
if (tpm_proxy->host_fd == -1) {
error_report("failed to open TPM device %s: %d",
tpm_proxy->host_path, errno);
return H_RESOURCE;
}
}
cpu_physical_memory_read(data_in, buf_in, data_in_size);
do {
ret = write(tpm_proxy->host_fd, buf_in, data_in_size);
if (ret > 0) {
data_in_size -= ret;
}
} while ((ret >= 0 && data_in_size > 0) || (ret == -1 && errno == EINTR));
if (ret == -1) {
error_report("failed to write to TPM device %s: %d",
tpm_proxy->host_path, errno);
return H_RESOURCE;
}
do {
ret = read(tpm_proxy->host_fd, buf_out, data_out_size);
} while (ret == 0 || (ret == -1 && errno == EINTR));
if (ret == -1) {
error_report("failed to read from TPM device %s: %d",
tpm_proxy->host_path, errno);
return H_RESOURCE;
}
cpu_physical_memory_write(data_out, buf_out, ret);
args[0] = ret;
return H_SUCCESS;
}
static target_ulong h_tpm_comm(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
target_ulong *args)
{
target_ulong op = args[0];
SpaprTpmProxy *tpm_proxy = spapr->tpm_proxy;
if (!tpm_proxy) {
error_report("TPM proxy not available");
return H_FUNCTION;
}
trace_spapr_h_tpm_comm(tpm_proxy->host_path ?: "null", op);
switch (op) {
case TPM_COMM_OP_EXECUTE:
return tpm_execute(tpm_proxy, args);
case TPM_COMM_OP_CLOSE_SESSION:
spapr_tpm_proxy_reset(tpm_proxy);
return H_SUCCESS;
default:
return H_PARAMETER;
}
}
static void spapr_tpm_proxy_realize(DeviceState *d, Error **errp)
{
SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(d);
if (tpm_proxy->host_path == NULL) {
error_setg(errp, "must specify 'host-path' option for device");
return;
}
tpm_proxy->host_fd = -1;
qemu_register_reset(spapr_tpm_proxy_reset, tpm_proxy);
}
static void spapr_tpm_proxy_unrealize(DeviceState *d, Error **errp)
{
SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(d);
qemu_unregister_reset(spapr_tpm_proxy_reset, tpm_proxy);
}
static Property spapr_tpm_proxy_properties[] = {
DEFINE_PROP_STRING("host-path", SpaprTpmProxy, host_path),
DEFINE_PROP_END_OF_LIST(),
};
static void spapr_tpm_proxy_class_init(ObjectClass *k, void *data)
{
DeviceClass *dk = DEVICE_CLASS(k);
dk->realize = spapr_tpm_proxy_realize;
dk->unrealize = spapr_tpm_proxy_unrealize;
dk->user_creatable = true;
dk->props = spapr_tpm_proxy_properties;
}
static const TypeInfo spapr_tpm_proxy_info = {
.name = TYPE_SPAPR_TPM_PROXY,
.parent = TYPE_DEVICE,
.instance_size = sizeof(SpaprTpmProxy),
.class_init = spapr_tpm_proxy_class_init,
};
static void spapr_tpm_proxy_register_types(void)
{
type_register_static(&spapr_tpm_proxy_info);
spapr_register_hypercall(SVM_H_TPM_COMM, h_tpm_comm);
}
type_init(spapr_tpm_proxy_register_types)

View File

@ -25,6 +25,10 @@ spapr_update_dt(unsigned cb) "New blob %u bytes"
spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x"
spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x"
# spapr_hcall_tpm.c
spapr_h_tpm_comm(const char *device_path, uint64_t operation) "tpm_device_path=%s operation=0x%"PRIu64
spapr_tpm_execute(uint64_t data_in, uint64_t data_in_sz, uint64_t data_out, uint64_t data_out_sz) "data_in=0x%"PRIx64", data_in_sz=%"PRIu64", data_out=0x%"PRIx64", data_out_sz=%"PRIu64
# spapr_iommu.c # spapr_iommu.c
spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64 spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64 spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64

View File

@ -663,14 +663,26 @@ bool css_migration_enabled(void)
} \ } \
type_init(ccw_machine_register_##suffix) type_init(ccw_machine_register_##suffix)
static void ccw_machine_4_2_instance_options(MachineState *machine)
{
}
static void ccw_machine_4_2_class_options(MachineClass *mc)
{
}
DEFINE_CCW_MACHINE(4_2, "4.2", true);
static void ccw_machine_4_1_instance_options(MachineState *machine) static void ccw_machine_4_1_instance_options(MachineState *machine)
{ {
ccw_machine_4_2_instance_options(machine);
} }
static void ccw_machine_4_1_class_options(MachineClass *mc) static void ccw_machine_4_1_class_options(MachineClass *mc)
{ {
ccw_machine_4_2_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
} }
DEFINE_CCW_MACHINE(4_1, "4.1", true); DEFINE_CCW_MACHINE(4_1, "4.1", false);
static void ccw_machine_4_0_instance_options(MachineState *machine) static void ccw_machine_4_0_instance_options(MachineState *machine)
{ {

View File

@ -180,6 +180,7 @@ struct MachineClass {
void (*init)(MachineState *state); void (*init)(MachineState *state);
void (*reset)(MachineState *state); void (*reset)(MachineState *state);
void (*wakeup)(MachineState *state);
void (*hot_add_cpu)(MachineState *state, const int64_t id, Error **errp); void (*hot_add_cpu)(MachineState *state, const int64_t id, Error **errp);
int (*kvm_type)(MachineState *machine, const char *arg); int (*kvm_type)(MachineState *machine, const char *arg);
void (*smp_parse)(MachineState *ms, QemuOpts *opts); void (*smp_parse)(MachineState *ms, QemuOpts *opts);
@ -317,6 +318,9 @@ struct MachineState {
} \ } \
type_init(machine_initfn##_register_types) type_init(machine_initfn##_register_types)
extern GlobalProperty hw_compat_4_1[];
extern const size_t hw_compat_4_1_len;
extern GlobalProperty hw_compat_4_0[]; extern GlobalProperty hw_compat_4_0[];
extern const size_t hw_compat_4_0_len; extern const size_t hw_compat_4_0_len;

View File

@ -302,6 +302,9 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t);
int e820_get_num_entries(void); int e820_get_num_entries(void);
bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *); bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
extern GlobalProperty pc_compat_4_1[];
extern const size_t pc_compat_4_1_len;
extern GlobalProperty pc_compat_4_0[]; extern GlobalProperty pc_compat_4_0[];
extern const size_t pc_compat_4_0_len; extern const size_t pc_compat_4_0_len;

View File

@ -10,6 +10,7 @@
#include "hw/ppc/spapr_irq.h" #include "hw/ppc/spapr_irq.h"
#include "hw/ppc/spapr_xive.h" /* For SpaprXive */ #include "hw/ppc/spapr_xive.h" /* For SpaprXive */
#include "hw/ppc/xics.h" /* For ICSState */ #include "hw/ppc/xics.h" /* For ICSState */
#include "hw/ppc/spapr_tpm_proxy.h"
struct SpaprVioBus; struct SpaprVioBus;
struct SpaprPhbState; struct SpaprPhbState;
@ -203,6 +204,7 @@ struct SpaprMachineState {
SpaprCapabilities def, eff, mig; SpaprCapabilities def, eff, mig;
unsigned gpu_numa_id; unsigned gpu_numa_id;
SpaprTpmProxy *tpm_proxy;
}; };
#define H_SUCCESS 0 #define H_SUCCESS 0
@ -508,6 +510,15 @@ struct SpaprMachineState {
#define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3) #define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3)
#define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT #define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT
/*
* The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
* Secure VM mode via an Ultravisor / Protected Execution Facility
*/
#define SVM_HCALL_BASE 0xEF00
#define SVM_H_TPM_COMM 0xEF10
#define SVM_HCALL_MAX SVM_H_TPM_COMM
typedef struct SpaprDeviceTreeUpdateHeader { typedef struct SpaprDeviceTreeUpdateHeader {
uint32_t version_id; uint32_t version_id;
} SpaprDeviceTreeUpdateHeader; } SpaprDeviceTreeUpdateHeader;
@ -525,6 +536,13 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
target_ulong *args); target_ulong *args);
/* Virtual Processor Area structure constants */
#define VPA_MIN_SIZE 640
#define VPA_SIZE_OFFSET 0x4
#define VPA_SHARED_PROC_OFFSET 0x9
#define VPA_SHARED_PROC_VAL 0x2
#define VPA_DISPATCH_COUNTER 0x100
/* ibm,set-eeh-option */ /* ibm,set-eeh-option */
#define RTAS_EEH_DISABLE 0 #define RTAS_EEH_DISABLE 0
#define RTAS_EEH_ENABLE 1 #define RTAS_EEH_ENABLE 1
@ -624,8 +642,9 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
#define RTAS_IBM_CREATE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x27) #define RTAS_IBM_CREATE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x27)
#define RTAS_IBM_REMOVE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x28) #define RTAS_IBM_REMOVE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x28)
#define RTAS_IBM_RESET_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x29) #define RTAS_IBM_RESET_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x29)
#define RTAS_IBM_SUSPEND_ME (RTAS_TOKEN_BASE + 0x2A)
#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x2A) #define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x2B)
/* RTAS ibm,get-system-parameter token values */ /* RTAS ibm,get-system-parameter token values */
#define RTAS_SYSPARM_SPLPAR_CHARACTERISTICS 20 #define RTAS_SYSPARM_SPLPAR_CHARACTERISTICS 20

View File

@ -46,6 +46,7 @@ typedef struct SpaprCpuState {
uint64_t vpa_addr; uint64_t vpa_addr;
uint64_t slb_shadow_addr, slb_shadow_size; uint64_t slb_shadow_addr, slb_shadow_size;
uint64_t dtl_addr, dtl_size; uint64_t dtl_addr, dtl_size;
bool prod; /* not migrated, only used to improve dispatch latencies */
struct ICPState *icp; struct ICPState *icp;
struct XiveTCTX *tctx; struct XiveTCTX *tctx;
} SpaprCpuState; } SpaprCpuState;

View File

@ -30,7 +30,6 @@ void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis);
int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
Error **errp); Error **errp);
void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num); void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num);
void spapr_irq_msi_reset(SpaprMachineState *spapr);
typedef struct SpaprIrq { typedef struct SpaprIrq {
uint32_t nr_irqs; uint32_t nr_irqs;

View File

@ -0,0 +1,31 @@
/*
* SPAPR TPM Proxy/Hypercall
*
* Copyright IBM Corp. 2019
*
* Authors:
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_SPAPR_TPM_PROXY_H
#define HW_SPAPR_TPM_PROXY_H
#include "qom/object.h"
#include "hw/qdev-core.h"
#define TYPE_SPAPR_TPM_PROXY "spapr-tpm-proxy"
#define SPAPR_TPM_PROXY(obj) OBJECT_CHECK(SpaprTpmProxy, (obj), \
TYPE_SPAPR_TPM_PROXY)
typedef struct SpaprTpmProxy {
/*< private >*/
DeviceState parent;
char *host_path;
int host_fd;
} SpaprTpmProxy;
#endif /* HW_SPAPR_TPM_PROXY_H */

View File

@ -148,13 +148,11 @@
* XIVE Notifier (Interface between Source and Router) * XIVE Notifier (Interface between Source and Router)
*/ */
typedef struct XiveNotifier { typedef struct XiveNotifier XiveNotifier;
Object parent;
} XiveNotifier;
#define TYPE_XIVE_NOTIFIER "xive-notifier" #define TYPE_XIVE_NOTIFIER "xive-notifier"
#define XIVE_NOTIFIER(obj) \ #define XIVE_NOTIFIER(obj) \
OBJECT_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER) INTERFACE_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER)
#define XIVE_NOTIFIER_CLASS(klass) \ #define XIVE_NOTIFIER_CLASS(klass) \
OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER) OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER)
#define XIVE_NOTIFIER_GET_CLASS(obj) \ #define XIVE_NOTIFIER_GET_CLASS(obj) \
@ -356,8 +354,6 @@ typedef struct XiveRouterClass {
XiveTCTX *(*get_tctx)(XiveRouter *xrtr, CPUState *cs); XiveTCTX *(*get_tctx)(XiveRouter *xrtr, CPUState *cs);
} XiveRouterClass; } XiveRouterClass;
void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon);
int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas); XiveEAS *eas);
int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
@ -399,9 +395,6 @@ typedef struct XiveENDSource {
*/ */
#define XIVE_PRIORITY_MAX 7 #define XIVE_PRIORITY_MAX 7
void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon);
/* /*
* XIVE Thread Interrupt Management Aera (TIMA) * XIVE Thread Interrupt Management Aera (TIMA)
* *

View File

@ -131,6 +131,8 @@ typedef struct XiveEAS {
#define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID) #define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID)
#define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED) #define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED)
void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon);
static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word) static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word)
{ {
return (be64_to_cpu(word) & mask) >> ctz64(mask); return (be64_to_cpu(word) & mask) >> ctz64(mask);
@ -210,6 +212,10 @@ typedef struct XiveEND {
#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY) #define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY)
#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) #define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) #define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL)
#define xive_end_is_uncond_escalation(end) \
(be32_to_cpu((end)->w0) & END_W0_UNCOND_ESCALATE)
#define xive_end_is_silent_escalation(end) \
(be32_to_cpu((end)->w0) & END_W0_SILENT_ESCALATE)
static inline uint64_t xive_end_qaddr(XiveEND *end) static inline uint64_t xive_end_qaddr(XiveEND *end)
{ {
@ -217,6 +223,10 @@ static inline uint64_t xive_end_qaddr(XiveEND *end)
be32_to_cpu(end->w3); be32_to_cpu(end->w3);
} }
void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon);
void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
/* Notification Virtual Target (NVT) */ /* Notification Virtual Target (NVT) */
typedef struct XiveNVT { typedef struct XiveNVT {
uint32_t w0; uint32_t w0;

View File

@ -17,7 +17,7 @@
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware - SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
implementation for certain IBM POWER hardware. The sources are at implementation for certain IBM POWER hardware. The sources are at
https://github.com/aik/SLOF, and the image currently in qemu is https://github.com/aik/SLOF, and the image currently in qemu is
built from git tag qemu-slof-20190703. built from git tag qemu-slof-20190719.
- sgabios (the Serial Graphics Adapter option ROM) provides a means for - sgabios (the Serial Graphics Adapter option ROM) provides a means for
legacy x86 software to communicate with an attached serial console as legacy x86 software to communicate with an attached serial console as

Binary file not shown.

@ -1 +1 @@
Subproject commit ba1ab360eebe6338bb8d7d83a9220ccf7e213af3 Subproject commit 7bfe584e321946771692711ff83ad2b5850daca7

View File

@ -201,6 +201,7 @@ typedef struct PowerPCCPUClass {
typedef struct PPCTimebase { typedef struct PPCTimebase {
uint64_t guest_timebase; uint64_t guest_timebase;
int64_t time_of_the_day_ns; int64_t time_of_the_day_ns;
bool runstate_paused;
} PPCTimebase; } PPCTimebase;
extern const VMStateDescription vmstate_ppc_timebase; extern const VMStateDescription vmstate_ppc_timebase;

View File

@ -591,7 +591,7 @@ enum {
#define FPSCR_XE 3 /* Floating-point inexact exception enable */ #define FPSCR_XE 3 /* Floating-point inexact exception enable */
#define FPSCR_NI 2 /* Floating-point non-IEEE mode */ #define FPSCR_NI 2 /* Floating-point non-IEEE mode */
#define FPSCR_RN1 1 #define FPSCR_RN1 1
#define FPSCR_RN 0 /* Floating-point rounding control */ #define FPSCR_RN0 0 /* Floating-point rounding control */
#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1) #define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1) #define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1) #define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
@ -614,7 +614,7 @@ enum {
#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1) #define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1) #define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1) #define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
#define fpscr_rn (((env->fpscr) >> FPSCR_RN) & 0x3) #define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3)
/* Invalid operation exception summary */ /* Invalid operation exception summary */
#define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \ #define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
(1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
@ -640,7 +640,7 @@ enum {
#define FP_VXZDZ (1ull << FPSCR_VXZDZ) #define FP_VXZDZ (1ull << FPSCR_VXZDZ)
#define FP_VXIMZ (1ull << FPSCR_VXIMZ) #define FP_VXIMZ (1ull << FPSCR_VXIMZ)
#define FP_VXVC (1ull << FPSCR_VXVC) #define FP_VXVC (1ull << FPSCR_VXVC)
#define FP_FR (1ull << FSPCR_FR) #define FP_FR (1ull << FPSCR_FR)
#define FP_FI (1ull << FPSCR_FI) #define FP_FI (1ull << FPSCR_FI)
#define FP_C (1ull << FPSCR_C) #define FP_C (1ull << FPSCR_C)
#define FP_FL (1ull << FPSCR_FL) #define FP_FL (1ull << FPSCR_FL)
@ -648,7 +648,7 @@ enum {
#define FP_FE (1ull << FPSCR_FE) #define FP_FE (1ull << FPSCR_FE)
#define FP_FU (1ull << FPSCR_FU) #define FP_FU (1ull << FPSCR_FU)
#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU) #define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU) #define FP_FPRF (FP_C | FP_FPCC)
#define FP_VXSOFT (1ull << FPSCR_VXSOFT) #define FP_VXSOFT (1ull << FPSCR_VXSOFT)
#define FP_VXSQRT (1ull << FPSCR_VXSQRT) #define FP_VXSQRT (1ull << FPSCR_VXSQRT)
#define FP_VXCVI (1ull << FPSCR_VXCVI) #define FP_VXCVI (1ull << FPSCR_VXCVI)
@ -659,7 +659,12 @@ enum {
#define FP_XE (1ull << FPSCR_XE) #define FP_XE (1ull << FPSCR_XE)
#define FP_NI (1ull << FPSCR_NI) #define FP_NI (1ull << FPSCR_NI)
#define FP_RN1 (1ull << FPSCR_RN1) #define FP_RN1 (1ull << FPSCR_RN1)
#define FP_RN (1ull << FPSCR_RN) #define FP_RN0 (1ull << FPSCR_RN0)
#define FP_RN (FP_RN1 | FP_RN0)
#define FP_MODE FP_RN
#define FP_ENABLES (FP_VE | FP_OE | FP_UE | FP_ZE | FP_XE)
#define FP_STATUS (FP_FR | FP_FI | FP_FPRF)
/* the exception bits which can be cleared by mcrfs - includes FX */ /* the exception bits which can be cleared by mcrfs - includes FX */
#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \ #define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
@ -1104,10 +1109,6 @@ struct CPUPPCState {
bool resume_as_sreset; bool resume_as_sreset;
#endif #endif
/* Those resources are used only during code translation */
/* opcode handlers */
opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
/* Those resources are used only in QEMU core */ /* Those resources are used only in QEMU core */
target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */ target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */
target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */ target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */
@ -1191,6 +1192,10 @@ struct PowerPCCPU {
int32_t node_id; /* NUMA node this CPU belongs to */ int32_t node_id; /* NUMA node this CPU belongs to */
PPCHash64Options *hash64_opts; PPCHash64Options *hash64_opts;
/* Those resources are used only during code translation */
/* opcode handlers */
opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
/* Fields related to migration compatibility hacks */ /* Fields related to migration compatibility hacks */
bool pre_2_8_migration; bool pre_2_8_migration;
target_ulong mig_msr_mask; target_ulong mig_msr_mask;
@ -1224,6 +1229,10 @@ struct PPCVirtualHypervisorClass {
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry); void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp); target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
#ifndef CONFIG_USER_ONLY
void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
#endif
}; };
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor" #define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
@ -1462,6 +1471,7 @@ typedef PowerPCCPU ArchCPU;
#define SPR_MPC_ICTRL (0x09E) #define SPR_MPC_ICTRL (0x09E)
#define SPR_MPC_BAR (0x09F) #define SPR_MPC_BAR (0x09F)
#define SPR_PSPB (0x09F) #define SPR_PSPB (0x09F)
#define SPR_DPDES (0x0B0)
#define SPR_DAWR (0x0B4) #define SPR_DAWR (0x0B4)
#define SPR_RPR (0x0BA) #define SPR_RPR (0x0BA)
#define SPR_CIABR (0x0BB) #define SPR_CIABR (0x0BB)

View File

@ -58,19 +58,35 @@ uint64_t helper_todouble(uint32_t arg)
uint64_t ret; uint64_t ret;
if (likely(abs_arg >= 0x00800000)) { if (likely(abs_arg >= 0x00800000)) {
/* Normalized operand, or Inf, or NaN. */ if (unlikely(extract32(arg, 23, 8) == 0xff)) {
ret = (uint64_t)extract32(arg, 30, 2) << 62; /* Inf or NAN. */
ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59; ret = (uint64_t)extract32(arg, 31, 1) << 63;
ret |= (uint64_t)extract32(arg, 0, 30) << 29; ret |= (uint64_t)0x7ff << 52;
ret |= (uint64_t)extract32(arg, 0, 23) << 29;
} else {
/* Normalized operand. */
ret = (uint64_t)extract32(arg, 30, 2) << 62;
ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
ret |= (uint64_t)extract32(arg, 0, 30) << 29;
}
} else { } else {
/* Zero or Denormalized operand. */ /* Zero or Denormalized operand. */
ret = (uint64_t)extract32(arg, 31, 1) << 63; ret = (uint64_t)extract32(arg, 31, 1) << 63;
if (unlikely(abs_arg != 0)) { if (unlikely(abs_arg != 0)) {
/* Denormalized operand. */ /*
int shift = clz32(abs_arg) - 9; * Denormalized operand.
int exp = -126 - shift + 1023; * Shift fraction so that the msb is in the implicit bit position.
* Thus, shift is in the range [1:23].
*/
int shift = clz32(abs_arg) - 8;
/*
* The first 3 terms compute the float64 exponent. We then bias
* this result by -1 so that we can swallow the implicit bit below.
*/
int exp = -126 - shift + 1023 - 1;
ret |= (uint64_t)exp << 52; ret |= (uint64_t)exp << 52;
ret |= abs_arg << (shift + 29); ret += (uint64_t)abs_arg << (52 - 23 + shift);
} }
} }
return ret; return ret;
@ -403,7 +419,7 @@ void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
if (prev == 1) { if (prev == 1) {
switch (bit) { switch (bit) {
case FPSCR_RN1: case FPSCR_RN1:
case FPSCR_RN: case FPSCR_RN0:
fpscr_set_rounding_mode(env); fpscr_set_rounding_mode(env);
break; break;
case FPSCR_VXSNAN: case FPSCR_VXSNAN:
@ -557,7 +573,7 @@ void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
} }
break; break;
case FPSCR_RN1: case FPSCR_RN1:
case FPSCR_RN: case FPSCR_RN0:
fpscr_set_rounding_mode(env); fpscr_set_rounding_mode(env);
break; break;
default: default:
@ -2871,10 +2887,14 @@ void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
{ {
uint64_t result;
float_status tstat = env->fp_status; float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat); set_float_exception_flags(0, &tstat);
return (uint64_t)float64_to_float32(xb, &tstat) << 32; result = (uint64_t)float64_to_float32(xb, &tstat);
/* hardware replicates result to both words of the doubleword result. */
return (result << 32) | result;
} }
uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)

View File

@ -193,8 +193,6 @@ DEF_HELPER_2(vprtybw, void, avr, avr)
DEF_HELPER_2(vprtybd, void, avr, avr) DEF_HELPER_2(vprtybd, void, avr, avr)
DEF_HELPER_2(vprtybq, void, avr, avr) DEF_HELPER_2(vprtybq, void, avr, avr)
DEF_HELPER_3(vsubcuw, void, avr, avr, avr) DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
DEF_HELPER_2(lvsl, void, avr, tl)
DEF_HELPER_2(lvsr, void, avr, tl)
DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
@ -219,8 +217,6 @@ DEF_HELPER_3(vrlb, void, avr, avr, avr)
DEF_HELPER_3(vrlh, void, avr, avr, avr) DEF_HELPER_3(vrlh, void, avr, avr, avr)
DEF_HELPER_3(vrlw, void, avr, avr, avr) DEF_HELPER_3(vrlw, void, avr, avr, avr)
DEF_HELPER_3(vrld, void, avr, avr, avr) DEF_HELPER_3(vrld, void, avr, avr, avr)
DEF_HELPER_3(vsl, void, avr, avr, avr)
DEF_HELPER_3(vsr, void, avr, avr, avr)
DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32) DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
DEF_HELPER_3(vextractub, void, avr, avr, i32) DEF_HELPER_3(vextractub, void, avr, avr, i32)
DEF_HELPER_3(vextractuh, void, avr, avr, i32) DEF_HELPER_3(vextractuh, void, avr, avr, i32)
@ -314,8 +310,6 @@ DEF_HELPER_4(vctsxs, void, env, avr, avr, i32)
DEF_HELPER_2(vclzb, void, avr, avr) DEF_HELPER_2(vclzb, void, avr, avr)
DEF_HELPER_2(vclzh, void, avr, avr) DEF_HELPER_2(vclzh, void, avr, avr)
DEF_HELPER_2(vclzw, void, avr, avr)
DEF_HELPER_2(vclzd, void, avr, avr)
DEF_HELPER_2(vctzb, void, avr, avr) DEF_HELPER_2(vctzb, void, avr, avr)
DEF_HELPER_2(vctzh, void, avr, avr) DEF_HELPER_2(vctzh, void, avr, avr)
DEF_HELPER_2(vctzw, void, avr, avr) DEF_HELPER_2(vctzw, void, avr, avr)
@ -328,7 +322,6 @@ DEF_HELPER_1(vclzlsbb, tl, avr)
DEF_HELPER_1(vctzlsbb, tl, avr) DEF_HELPER_1(vctzlsbb, tl, avr)
DEF_HELPER_3(vbpermd, void, avr, avr, avr) DEF_HELPER_3(vbpermd, void, avr, avr, avr)
DEF_HELPER_3(vbpermq, void, avr, avr, avr) DEF_HELPER_3(vbpermq, void, avr, avr, avr)
DEF_HELPER_2(vgbbd, void, avr, avr)
DEF_HELPER_3(vpmsumb, void, avr, avr, avr) DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
DEF_HELPER_3(vpmsumh, void, avr, avr, avr) DEF_HELPER_3(vpmsumh, void, avr, avr, avr)
DEF_HELPER_3(vpmsumw, void, avr, avr, avr) DEF_HELPER_3(vpmsumw, void, avr, avr, avr)

View File

@ -459,24 +459,6 @@ SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
#undef SATCVT #undef SATCVT
#undef SATCVTU #undef SATCVTU
void helper_lvsl(ppc_avr_t *r, target_ulong sh)
{
int i, j = (sh & 0xf);
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
r->VsrB(i) = j++;
}
}
void helper_lvsr(ppc_avr_t *r, target_ulong sh)
{
int i, j = 0x10 - (sh & 0xf);
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
r->VsrB(i) = j++;
}
}
void helper_mtvscr(CPUPPCState *env, uint32_t vscr) void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
{ {
env->vscr = vscr & ~(1u << VSCR_SAT); env->vscr = vscr & ~(1u << VSCR_SAT);
@ -1205,282 +1187,6 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#undef VBPERMQ_INDEX #undef VBPERMQ_INDEX
#undef VBPERMQ_DW #undef VBPERMQ_DW
static const uint64_t VGBBD_MASKS[256] = {
0x0000000000000000ull, /* 00 */
0x0000000000000080ull, /* 01 */
0x0000000000008000ull, /* 02 */
0x0000000000008080ull, /* 03 */
0x0000000000800000ull, /* 04 */
0x0000000000800080ull, /* 05 */
0x0000000000808000ull, /* 06 */
0x0000000000808080ull, /* 07 */
0x0000000080000000ull, /* 08 */
0x0000000080000080ull, /* 09 */
0x0000000080008000ull, /* 0A */
0x0000000080008080ull, /* 0B */
0x0000000080800000ull, /* 0C */
0x0000000080800080ull, /* 0D */
0x0000000080808000ull, /* 0E */
0x0000000080808080ull, /* 0F */
0x0000008000000000ull, /* 10 */
0x0000008000000080ull, /* 11 */
0x0000008000008000ull, /* 12 */
0x0000008000008080ull, /* 13 */
0x0000008000800000ull, /* 14 */
0x0000008000800080ull, /* 15 */
0x0000008000808000ull, /* 16 */
0x0000008000808080ull, /* 17 */
0x0000008080000000ull, /* 18 */
0x0000008080000080ull, /* 19 */
0x0000008080008000ull, /* 1A */
0x0000008080008080ull, /* 1B */
0x0000008080800000ull, /* 1C */
0x0000008080800080ull, /* 1D */
0x0000008080808000ull, /* 1E */
0x0000008080808080ull, /* 1F */
0x0000800000000000ull, /* 20 */
0x0000800000000080ull, /* 21 */
0x0000800000008000ull, /* 22 */
0x0000800000008080ull, /* 23 */
0x0000800000800000ull, /* 24 */
0x0000800000800080ull, /* 25 */
0x0000800000808000ull, /* 26 */
0x0000800000808080ull, /* 27 */
0x0000800080000000ull, /* 28 */
0x0000800080000080ull, /* 29 */
0x0000800080008000ull, /* 2A */
0x0000800080008080ull, /* 2B */
0x0000800080800000ull, /* 2C */
0x0000800080800080ull, /* 2D */
0x0000800080808000ull, /* 2E */
0x0000800080808080ull, /* 2F */
0x0000808000000000ull, /* 30 */
0x0000808000000080ull, /* 31 */
0x0000808000008000ull, /* 32 */
0x0000808000008080ull, /* 33 */
0x0000808000800000ull, /* 34 */
0x0000808000800080ull, /* 35 */
0x0000808000808000ull, /* 36 */
0x0000808000808080ull, /* 37 */
0x0000808080000000ull, /* 38 */
0x0000808080000080ull, /* 39 */
0x0000808080008000ull, /* 3A */
0x0000808080008080ull, /* 3B */
0x0000808080800000ull, /* 3C */
0x0000808080800080ull, /* 3D */
0x0000808080808000ull, /* 3E */
0x0000808080808080ull, /* 3F */
0x0080000000000000ull, /* 40 */
0x0080000000000080ull, /* 41 */
0x0080000000008000ull, /* 42 */
0x0080000000008080ull, /* 43 */
0x0080000000800000ull, /* 44 */
0x0080000000800080ull, /* 45 */
0x0080000000808000ull, /* 46 */
0x0080000000808080ull, /* 47 */
0x0080000080000000ull, /* 48 */
0x0080000080000080ull, /* 49 */
0x0080000080008000ull, /* 4A */
0x0080000080008080ull, /* 4B */
0x0080000080800000ull, /* 4C */
0x0080000080800080ull, /* 4D */
0x0080000080808000ull, /* 4E */
0x0080000080808080ull, /* 4F */
0x0080008000000000ull, /* 50 */
0x0080008000000080ull, /* 51 */
0x0080008000008000ull, /* 52 */
0x0080008000008080ull, /* 53 */
0x0080008000800000ull, /* 54 */
0x0080008000800080ull, /* 55 */
0x0080008000808000ull, /* 56 */
0x0080008000808080ull, /* 57 */
0x0080008080000000ull, /* 58 */
0x0080008080000080ull, /* 59 */
0x0080008080008000ull, /* 5A */
0x0080008080008080ull, /* 5B */
0x0080008080800000ull, /* 5C */
0x0080008080800080ull, /* 5D */
0x0080008080808000ull, /* 5E */
0x0080008080808080ull, /* 5F */
0x0080800000000000ull, /* 60 */
0x0080800000000080ull, /* 61 */
0x0080800000008000ull, /* 62 */
0x0080800000008080ull, /* 63 */
0x0080800000800000ull, /* 64 */
0x0080800000800080ull, /* 65 */
0x0080800000808000ull, /* 66 */
0x0080800000808080ull, /* 67 */
0x0080800080000000ull, /* 68 */
0x0080800080000080ull, /* 69 */
0x0080800080008000ull, /* 6A */
0x0080800080008080ull, /* 6B */
0x0080800080800000ull, /* 6C */
0x0080800080800080ull, /* 6D */
0x0080800080808000ull, /* 6E */
0x0080800080808080ull, /* 6F */
0x0080808000000000ull, /* 70 */
0x0080808000000080ull, /* 71 */
0x0080808000008000ull, /* 72 */
0x0080808000008080ull, /* 73 */
0x0080808000800000ull, /* 74 */
0x0080808000800080ull, /* 75 */
0x0080808000808000ull, /* 76 */
0x0080808000808080ull, /* 77 */
0x0080808080000000ull, /* 78 */
0x0080808080000080ull, /* 79 */
0x0080808080008000ull, /* 7A */
0x0080808080008080ull, /* 7B */
0x0080808080800000ull, /* 7C */
0x0080808080800080ull, /* 7D */
0x0080808080808000ull, /* 7E */
0x0080808080808080ull, /* 7F */
0x8000000000000000ull, /* 80 */
0x8000000000000080ull, /* 81 */
0x8000000000008000ull, /* 82 */
0x8000000000008080ull, /* 83 */
0x8000000000800000ull, /* 84 */
0x8000000000800080ull, /* 85 */
0x8000000000808000ull, /* 86 */
0x8000000000808080ull, /* 87 */
0x8000000080000000ull, /* 88 */
0x8000000080000080ull, /* 89 */
0x8000000080008000ull, /* 8A */
0x8000000080008080ull, /* 8B */
0x8000000080800000ull, /* 8C */
0x8000000080800080ull, /* 8D */
0x8000000080808000ull, /* 8E */
0x8000000080808080ull, /* 8F */
0x8000008000000000ull, /* 90 */
0x8000008000000080ull, /* 91 */
0x8000008000008000ull, /* 92 */
0x8000008000008080ull, /* 93 */
0x8000008000800000ull, /* 94 */
0x8000008000800080ull, /* 95 */
0x8000008000808000ull, /* 96 */
0x8000008000808080ull, /* 97 */
0x8000008080000000ull, /* 98 */
0x8000008080000080ull, /* 99 */
0x8000008080008000ull, /* 9A */
0x8000008080008080ull, /* 9B */
0x8000008080800000ull, /* 9C */
0x8000008080800080ull, /* 9D */
0x8000008080808000ull, /* 9E */
0x8000008080808080ull, /* 9F */
0x8000800000000000ull, /* A0 */
0x8000800000000080ull, /* A1 */
0x8000800000008000ull, /* A2 */
0x8000800000008080ull, /* A3 */
0x8000800000800000ull, /* A4 */
0x8000800000800080ull, /* A5 */
0x8000800000808000ull, /* A6 */
0x8000800000808080ull, /* A7 */
0x8000800080000000ull, /* A8 */
0x8000800080000080ull, /* A9 */
0x8000800080008000ull, /* AA */
0x8000800080008080ull, /* AB */
0x8000800080800000ull, /* AC */
0x8000800080800080ull, /* AD */
0x8000800080808000ull, /* AE */
0x8000800080808080ull, /* AF */
0x8000808000000000ull, /* B0 */
0x8000808000000080ull, /* B1 */
0x8000808000008000ull, /* B2 */
0x8000808000008080ull, /* B3 */
0x8000808000800000ull, /* B4 */
0x8000808000800080ull, /* B5 */
0x8000808000808000ull, /* B6 */
0x8000808000808080ull, /* B7 */
0x8000808080000000ull, /* B8 */
0x8000808080000080ull, /* B9 */
0x8000808080008000ull, /* BA */
0x8000808080008080ull, /* BB */
0x8000808080800000ull, /* BC */
0x8000808080800080ull, /* BD */
0x8000808080808000ull, /* BE */
0x8000808080808080ull, /* BF */
0x8080000000000000ull, /* C0 */
0x8080000000000080ull, /* C1 */
0x8080000000008000ull, /* C2 */
0x8080000000008080ull, /* C3 */
0x8080000000800000ull, /* C4 */
0x8080000000800080ull, /* C5 */
0x8080000000808000ull, /* C6 */
0x8080000000808080ull, /* C7 */
0x8080000080000000ull, /* C8 */
0x8080000080000080ull, /* C9 */
0x8080000080008000ull, /* CA */
0x8080000080008080ull, /* CB */
0x8080000080800000ull, /* CC */
0x8080000080800080ull, /* CD */
0x8080000080808000ull, /* CE */
0x8080000080808080ull, /* CF */
0x8080008000000000ull, /* D0 */
0x8080008000000080ull, /* D1 */
0x8080008000008000ull, /* D2 */
0x8080008000008080ull, /* D3 */
0x8080008000800000ull, /* D4 */
0x8080008000800080ull, /* D5 */
0x8080008000808000ull, /* D6 */
0x8080008000808080ull, /* D7 */
0x8080008080000000ull, /* D8 */
0x8080008080000080ull, /* D9 */
0x8080008080008000ull, /* DA */
0x8080008080008080ull, /* DB */
0x8080008080800000ull, /* DC */
0x8080008080800080ull, /* DD */
0x8080008080808000ull, /* DE */
0x8080008080808080ull, /* DF */
0x8080800000000000ull, /* E0 */
0x8080800000000080ull, /* E1 */
0x8080800000008000ull, /* E2 */
0x8080800000008080ull, /* E3 */
0x8080800000800000ull, /* E4 */
0x8080800000800080ull, /* E5 */
0x8080800000808000ull, /* E6 */
0x8080800000808080ull, /* E7 */
0x8080800080000000ull, /* E8 */
0x8080800080000080ull, /* E9 */
0x8080800080008000ull, /* EA */
0x8080800080008080ull, /* EB */
0x8080800080800000ull, /* EC */
0x8080800080800080ull, /* ED */
0x8080800080808000ull, /* EE */
0x8080800080808080ull, /* EF */
0x8080808000000000ull, /* F0 */
0x8080808000000080ull, /* F1 */
0x8080808000008000ull, /* F2 */
0x8080808000008080ull, /* F3 */
0x8080808000800000ull, /* F4 */
0x8080808000800080ull, /* F5 */
0x8080808000808000ull, /* F6 */
0x8080808000808080ull, /* F7 */
0x8080808080000000ull, /* F8 */
0x8080808080000080ull, /* F9 */
0x8080808080008000ull, /* FA */
0x8080808080008080ull, /* FB */
0x8080808080800000ull, /* FC */
0x8080808080800080ull, /* FD */
0x8080808080808000ull, /* FE */
0x8080808080808080ull, /* FF */
};
void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
{
int i;
uint64_t t[2] = { 0, 0 };
VECTOR_FOR_INORDER_I(i, u8) {
#if defined(HOST_WORDS_BIGENDIAN)
t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
#else
t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7));
#endif
}
r->u64[0] = t[0];
r->u64[1] = t[1];
}
#define PMSUM(name, srcfld, trgfld, trgtyp) \ #define PMSUM(name, srcfld, trgfld, trgtyp) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \ { \
@ -1758,41 +1464,6 @@ VEXTU_X_DO(vextuhrx, 16, 0)
VEXTU_X_DO(vextuwrx, 32, 0) VEXTU_X_DO(vextuwrx, 32, 0)
#undef VEXTU_X_DO #undef VEXTU_X_DO
/*
* The specification says that the results are undefined if all of the
* shift counts are not identical. We check to make sure that they
* are to conform to what real hardware appears to do.
*/
#define VSHIFT(suffix, leftp) \
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int shift = b->VsrB(15) & 0x7; \
int doit = 1; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
doit = doit && ((b->u8[i] & 0x7) == shift); \
} \
if (doit) { \
if (shift == 0) { \
*r = *a; \
} else if (leftp) { \
uint64_t carry = a->VsrD(1) >> (64 - shift); \
\
r->VsrD(0) = (a->VsrD(0) << shift) | carry; \
r->VsrD(1) = a->VsrD(1) << shift; \
} else { \
uint64_t carry = a->VsrD(0) << (64 - shift); \
\
r->VsrD(1) = (a->VsrD(1) >> shift) | carry; \
r->VsrD(0) = a->VsrD(0) >> shift; \
} \
} \
}
VSHIFT(l, 1)
VSHIFT(r, 0)
#undef VSHIFT
void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{ {
int i; int i;
@ -2148,18 +1819,12 @@ VUPK(lsw, s64, s32, UPKLO)
#define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8) #define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8)
#define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16) #define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16)
#define clzw(v) clz32((v))
#define clzd(v) clz64((v))
VGENERIC_DO(clzb, u8) VGENERIC_DO(clzb, u8)
VGENERIC_DO(clzh, u16) VGENERIC_DO(clzh, u16)
VGENERIC_DO(clzw, u32)
VGENERIC_DO(clzd, u64)
#undef clzb #undef clzb
#undef clzh #undef clzh
#undef clzw
#undef clzd
#define ctzb(v) ((v) ? ctz32(v) : 8) #define ctzb(v) ((v) ? ctz32(v) : 8)
#define ctzh(v) ((v) ? ctz32(v) : 16) #define ctzh(v) ((v) ? ctz32(v) : 16)

View File

@ -58,7 +58,6 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
}; };
static int cap_interrupt_unset; static int cap_interrupt_unset;
static int cap_interrupt_level;
static int cap_segstate; static int cap_segstate;
static int cap_booke_sregs; static int cap_booke_sregs;
static int cap_ppc_smt; static int cap_ppc_smt;
@ -89,25 +88,6 @@ static int cap_large_decr;
static uint32_t debug_inst_opcode; static uint32_t debug_inst_opcode;
/*
* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
* takes but ignores it, goes to sleep and never gets notified that there's
* still an interrupt pending.
*
* As a quick workaround, let's just wake up again 20 ms after we injected
* an interrupt. That way we can assure that we're always reinjecting
* interrupts in case the guest swallowed them.
*/
static QEMUTimer *idle_timer;
static void kvm_kick_cpu(void *opaque)
{
PowerPCCPU *cpu = opaque;
qemu_cpu_kick(CPU(cpu));
}
/* /*
* Check whether we are running with KVM-PR (instead of KVM-HV). This * Check whether we are running with KVM-PR (instead of KVM-HV). This
* should only be used for fallback tests - generally we should use * should only be used for fallback tests - generally we should use
@ -127,7 +107,6 @@ static int kvmppc_get_dec_bits(void);
int kvm_arch_init(MachineState *ms, KVMState *s) int kvm_arch_init(MachineState *ms, KVMState *s)
{ {
cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ); cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE); cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
@ -163,9 +142,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
*/ */
cap_ppc_pvr_compat = false; cap_ppc_pvr_compat = false;
if (!cap_interrupt_level) { if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the " error_report("KVM: Host kernel doesn't have level irq capability");
"VM to stall at times!\n"); exit(1);
} }
kvm_ppc_register_host_cpu_type(ms); kvm_ppc_register_host_cpu_type(ms);
@ -493,8 +472,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret; return ret;
} }
idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
switch (cenv->mmu_model) { switch (cenv->mmu_model) {
case POWERPC_MMU_BOOKE206: case POWERPC_MMU_BOOKE206:
/* This target supports access to KVM's guest TLB */ /* This target supports access to KVM's guest TLB */
@ -1334,7 +1311,7 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
return 0; return 0;
} }
if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) { if (!kvm_enabled() || !cap_interrupt_unset) {
return 0; return 0;
} }
@ -1351,49 +1328,7 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{ {
PowerPCCPU *cpu = POWERPC_CPU(cs); return;
CPUPPCState *env = &cpu->env;
int r;
unsigned irq;
qemu_mutex_lock_iothread();
/*
* PowerPC QEMU tracks the various core input pins (interrupt,
* critical interrupt, reset, etc) in PPC-specific
* env->irq_input_state.
*/
if (!cap_interrupt_level &&
run->ready_for_interrupt_injection &&
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->irq_input_state & (1 << PPC_INPUT_INT)))
{
/*
* For now KVM disregards the 'irq' argument. However, in the
* future KVM could cache it in-kernel to avoid a heavyweight
* exit when reading the UIC.
*/
irq = KVM_INTERRUPT_SET;
trace_kvm_injected_interrupt(irq);
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
if (r < 0) {
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
}
/* Always wake up soon in case the interrupt was level based */
timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND / 50));
}
/*
* We don't know if there are more interrupts pending after
* this. However, the guest will return to userspace in the course
* of handling this one anyways, so we will get a chance to
* deliver the rest.
*/
qemu_mutex_unlock_iothread();
} }
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)

View File

@ -7845,6 +7845,7 @@ static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = cs->env_ptr; CPUPPCState *env = cs->env_ptr;
opc_handler_t **table, *handler; opc_handler_t **table, *handler;
@ -7862,7 +7863,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
opc3(ctx->opcode), opc4(ctx->opcode), opc3(ctx->opcode), opc4(ctx->opcode),
ctx->le_mode ? "little" : "big"); ctx->le_mode ? "little" : "big");
ctx->base.pc_next += 4; ctx->base.pc_next += 4;
table = env->opcodes; table = cpu->opcodes;
handler = table[opc1(ctx->opcode)]; handler = table[opc1(ctx->opcode)];
if (is_indirect_opcode(handler)) { if (is_indirect_opcode(handler)) {
table = ind_table(handler); table = ind_table(handler);

View File

@ -617,6 +617,28 @@ static void gen_mffs(DisasContext *ctx)
tcg_temp_free_i64(t0); tcg_temp_free_i64(t0);
} }
/* mffsl */
static void gen_mffsl(DisasContext *ctx)
{
TCGv_i64 t0;
if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
return gen_mffs(ctx);
}
if (unlikely(!ctx->fpu_enabled)) {
gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
t0 = tcg_temp_new_i64();
gen_reset_fpstatus();
tcg_gen_extu_tl_i64(t0, cpu_fpscr);
/* Mask everything except mode, status, and enables. */
tcg_gen_andi_i64(t0, t0, FP_MODE | FP_STATUS | FP_ENABLES);
set_fpr(rD(ctx->opcode), t0);
tcg_temp_free_i64(t0);
}
/* mtfsb0 */ /* mtfsb0 */
static void gen_mtfsb0(DisasContext *ctx) static void gen_mtfsb0(DisasContext *ctx)
{ {

View File

@ -104,7 +104,9 @@ GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT), GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE),
GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT,
PPC2_ISA300),
GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT), GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),

View File

@ -142,38 +142,6 @@ GEN_VR_STVE(bx, 0x07, 0x04, 1);
GEN_VR_STVE(hx, 0x07, 0x05, 2); GEN_VR_STVE(hx, 0x07, 0x05, 2);
GEN_VR_STVE(wx, 0x07, 0x06, 4); GEN_VR_STVE(wx, 0x07, 0x06, 4);
static void gen_lvsl(DisasContext *ctx)
{
TCGv_ptr rd;
TCGv EA;
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
return;
}
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
rd = gen_avr_ptr(rD(ctx->opcode));
gen_helper_lvsl(rd, EA);
tcg_temp_free(EA);
tcg_temp_free_ptr(rd);
}
static void gen_lvsr(DisasContext *ctx)
{
TCGv_ptr rd;
TCGv EA;
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
return;
}
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
rd = gen_avr_ptr(rD(ctx->opcode));
gen_helper_lvsr(rd, EA);
tcg_temp_free(EA);
tcg_temp_free_ptr(rd);
}
static void gen_mfvscr(DisasContext *ctx) static void gen_mfvscr(DisasContext *ctx)
{ {
TCGv_i32 t; TCGv_i32 t;
@ -316,6 +284,16 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_temp_free_ptr(rd); \ tcg_temp_free_ptr(rd); \
} }
#define GEN_VXFORM_TRANS(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
trans_##name(ctx); \
}
#define GEN_VXFORM_ENV(name, opc2, opc3) \ #define GEN_VXFORM_ENV(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \ static void glue(gen_, name)(DisasContext *ctx) \
{ \ { \
@ -515,6 +493,307 @@ static void gen_vmrgow(DisasContext *ctx)
tcg_temp_free_i64(avr); tcg_temp_free_i64(avr);
} }
/*
* lvsl VRT,RA,RB - Load Vector for Shift Left
*
* Let the EA be the sum (rA|0)+(rB). Let sh=EA[2831].
* Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
* Bytes sh:sh+15 of X are placed into vD.
*/
static void trans_lvsl(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
TCGv_i64 result = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
TCGv EA = tcg_temp_new();
/* Get sh(from description) by anding EA with 0xf. */
gen_addr_reg_index(ctx, EA);
tcg_gen_extu_tl_i64(sh, EA);
tcg_gen_andi_i64(sh, sh, 0xfULL);
/*
* Create bytes sh:sh+7 of X(from description) and place them in
* higher doubleword of vD.
*/
tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
set_avr64(VT, result, true);
/*
* Create bytes sh+8:sh+15 of X(from description) and place them in
* lower doubleword of vD.
*/
tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
set_avr64(VT, result, false);
tcg_temp_free_i64(result);
tcg_temp_free_i64(sh);
tcg_temp_free(EA);
}
/*
* lvsr VRT,RA,RB - Load Vector for Shift Right
*
* Let the EA be the sum (rA|0)+(rB). Let sh=EA[2831].
* Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
* Bytes (16-sh):(31-sh) of X are placed into vD.
*/
static void trans_lvsr(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
TCGv_i64 result = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
TCGv EA = tcg_temp_new();
/* Get sh(from description) by anding EA with 0xf. */
gen_addr_reg_index(ctx, EA);
tcg_gen_extu_tl_i64(sh, EA);
tcg_gen_andi_i64(sh, sh, 0xfULL);
/*
* Create bytes (16-sh):(23-sh) of X(from description) and place them in
* higher doubleword of vD.
*/
tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
set_avr64(VT, result, true);
/*
* Create bytes (24-sh):(32-sh) of X(from description) and place them in
* lower doubleword of vD.
*/
tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
set_avr64(VT, result, false);
tcg_temp_free_i64(result);
tcg_temp_free_i64(sh);
tcg_temp_free(EA);
}
/*
* vsl VRT,VRA,VRB - Vector Shift Left
*
* Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
* Lowest 3 bits in each byte element of register vB must be identical or
* result is undefined.
*/
static void trans_vsl(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
TCGv_i64 avrA = tcg_temp_new_i64();
TCGv_i64 avrB = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
TCGv_i64 shifted = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
/* Place bits 125-127 of vB in sh. */
get_avr64(avrB, VB, false);
tcg_gen_andi_i64(sh, avrB, 0x07ULL);
/*
* Save highest sh bits of lower doubleword element of vA in variable
* shifted and perform shift on lower doubleword.
*/
get_avr64(avrA, VA, false);
tcg_gen_subfi_i64(tmp, 64, sh);
tcg_gen_shr_i64(shifted, avrA, tmp);
tcg_gen_andi_i64(shifted, shifted, 0x7fULL);
tcg_gen_shl_i64(avrA, avrA, sh);
set_avr64(VT, avrA, false);
/*
* Perform shift on higher doubleword element of vA and replace lowest
* sh bits with shifted.
*/
get_avr64(avrA, VA, true);
tcg_gen_shl_i64(avrA, avrA, sh);
tcg_gen_or_i64(avrA, avrA, shifted);
set_avr64(VT, avrA, true);
tcg_temp_free_i64(avrA);
tcg_temp_free_i64(avrB);
tcg_temp_free_i64(sh);
tcg_temp_free_i64(shifted);
tcg_temp_free_i64(tmp);
}
/*
* vsr VRT,VRA,VRB - Vector Shift Right
*
* Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
* Lowest 3 bits in each byte element of register vB must be identical or
* result is undefined.
*/
static void trans_vsr(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
TCGv_i64 avrA = tcg_temp_new_i64();
TCGv_i64 avrB = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
TCGv_i64 shifted = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
/* Place bits 125-127 of vB in sh. */
get_avr64(avrB, VB, false);
tcg_gen_andi_i64(sh, avrB, 0x07ULL);
/*
* Save lowest sh bits of higher doubleword element of vA in variable
* shifted and perform shift on higher doubleword.
*/
get_avr64(avrA, VA, true);
tcg_gen_subfi_i64(tmp, 64, sh);
tcg_gen_shl_i64(shifted, avrA, tmp);
tcg_gen_andi_i64(shifted, shifted, 0xfe00000000000000ULL);
tcg_gen_shr_i64(avrA, avrA, sh);
set_avr64(VT, avrA, true);
/*
* Perform shift on lower doubleword element of vA and replace highest
* sh bits with shifted.
*/
get_avr64(avrA, VA, false);
tcg_gen_shr_i64(avrA, avrA, sh);
tcg_gen_or_i64(avrA, avrA, shifted);
set_avr64(VT, avrA, false);
tcg_temp_free_i64(avrA);
tcg_temp_free_i64(avrB);
tcg_temp_free_i64(sh);
tcg_temp_free_i64(shifted);
tcg_temp_free_i64(tmp);
}
/*
* vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
*
* All ith bits (i in range 1 to 8) of each byte of doubleword element in source
* register are concatenated and placed into ith byte of appropriate doubleword
* element in destination register.
*
* Following solution is done for both doubleword elements of source register
* in parallel, in order to reduce the number of instructions needed(that's why
* arrays are used):
* First, both doubleword elements of source register vB are placed in
* appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
* loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
* byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
* tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
* have to be shifted right for 7 and 8 places, respectively, in order to get
* bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
* shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
* After first 8 iteration(first loop), all the first bits are in their final
* places, all second bits but second bit from eight byte are in their places...
* only 1 eight bit from eight byte is in it's place). In second loop we do all
* operations symmetrically, in order to get other half of bits in their final
* spots. Results for first and second doubleword elements are saved in
* result[0] and result[1] respectively. In the end those results are saved in
* appropriate doubleword element of destination register vD.
*/
static void trans_vgbbd(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
int VB = rB(ctx->opcode);
TCGv_i64 tmp = tcg_temp_new_i64();
uint64_t mask = 0x8040201008040201ULL;
int i, j;
TCGv_i64 result[2];
result[0] = tcg_temp_new_i64();
result[1] = tcg_temp_new_i64();
TCGv_i64 avr[2];
avr[0] = tcg_temp_new_i64();
avr[1] = tcg_temp_new_i64();
TCGv_i64 tcg_mask = tcg_temp_new_i64();
tcg_gen_movi_i64(tcg_mask, mask);
for (j = 0; j < 2; j++) {
get_avr64(avr[j], VB, j);
tcg_gen_and_i64(result[j], avr[j], tcg_mask);
}
for (i = 1; i < 8; i++) {
tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
for (j = 0; j < 2; j++) {
tcg_gen_shri_i64(tmp, avr[j], i * 7);
tcg_gen_and_i64(tmp, tmp, tcg_mask);
tcg_gen_or_i64(result[j], result[j], tmp);
}
}
for (i = 1; i < 8; i++) {
tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
for (j = 0; j < 2; j++) {
tcg_gen_shli_i64(tmp, avr[j], i * 7);
tcg_gen_and_i64(tmp, tmp, tcg_mask);
tcg_gen_or_i64(result[j], result[j], tmp);
}
}
for (j = 0; j < 2; j++) {
set_avr64(VT, result[j], j);
}
tcg_temp_free_i64(tmp);
tcg_temp_free_i64(tcg_mask);
tcg_temp_free_i64(result[0]);
tcg_temp_free_i64(result[1]);
tcg_temp_free_i64(avr[0]);
tcg_temp_free_i64(avr[1]);
}
/*
* vclzw VRT,VRB - Vector Count Leading Zeros Word
*
* Counting the number of leading zero bits of each word element in source
* register and placing result in appropriate word element of destination
* register.
*/
static void trans_vclzw(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
int VB = rB(ctx->opcode);
TCGv_i32 tmp = tcg_temp_new_i32();
int i;
/* Perform count for every word element using tcg_gen_clzi_i32. */
for (i = 0; i < 4; i++) {
tcg_gen_ld_i32(tmp, cpu_env,
offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
tcg_gen_clzi_i32(tmp, tmp, 32);
tcg_gen_st_i32(tmp, cpu_env,
offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
}
tcg_temp_free_i32(tmp);
}
/*
* vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
*
* Counting the number of leading zero bits of each doubleword element in source
* register and placing result in appropriate doubleword element of destination
* register.
*/
static void trans_vclzd(DisasContext *ctx)
{
int VT = rD(ctx->opcode);
int VB = rB(ctx->opcode);
TCGv_i64 avr = tcg_temp_new_i64();
/* high doubleword */
get_avr64(avr, VB, true);
tcg_gen_clzi_i64(avr, avr, 64);
set_avr64(VT, avr, true);
/* low doubleword */
get_avr64(avr, VB, false);
tcg_gen_clzi_i64(avr, avr, 64);
set_avr64(VT, avr, false);
tcg_temp_free_i64(avr);
}
GEN_VXFORM(vmuloub, 4, 0); GEN_VXFORM(vmuloub, 4, 0);
GEN_VXFORM(vmulouh, 4, 1); GEN_VXFORM(vmulouh, 4, 1);
GEN_VXFORM(vmulouw, 4, 2); GEN_VXFORM(vmulouw, 4, 2);
@ -627,11 +906,11 @@ GEN_VXFORM(vrld, 2, 3);
GEN_VXFORM(vrldmi, 2, 3); GEN_VXFORM(vrldmi, 2, 3);
GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
vrldmi, PPC_NONE, PPC2_ISA300) vrldmi, PPC_NONE, PPC2_ISA300)
GEN_VXFORM(vsl, 2, 7); GEN_VXFORM_TRANS(vsl, 2, 7);
GEN_VXFORM(vrldnm, 2, 7); GEN_VXFORM(vrldnm, 2, 7);
GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \
vrldnm, PPC_NONE, PPC2_ISA300) vrldnm, PPC_NONE, PPC2_ISA300)
GEN_VXFORM(vsr, 2, 11); GEN_VXFORM_TRANS(vsr, 2, 11);
GEN_VXFORM_ENV(vpkuhum, 7, 0); GEN_VXFORM_ENV(vpkuhum, 7, 0);
GEN_VXFORM_ENV(vpkuwum, 7, 1); GEN_VXFORM_ENV(vpkuwum, 7, 1);
GEN_VXFORM_ENV(vpkudum, 7, 17); GEN_VXFORM_ENV(vpkudum, 7, 17);
@ -662,6 +941,8 @@ GEN_VXFORM_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
GEN_VXFORM_HETRO(vextubrx, 6, 28) GEN_VXFORM_HETRO(vextubrx, 6, 28)
GEN_VXFORM_HETRO(vextuhrx, 6, 29) GEN_VXFORM_HETRO(vextuhrx, 6, 29)
GEN_VXFORM_HETRO(vextuwrx, 6, 30) GEN_VXFORM_HETRO(vextuwrx, 6, 30)
GEN_VXFORM_TRANS(lvsl, 6, 31)
GEN_VXFORM_TRANS(lvsr, 6, 32)
GEN_VXFORM_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, \ GEN_VXFORM_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, \
vextuwrx, PPC_NONE, PPC2_ISA300) vextuwrx, PPC_NONE, PPC2_ISA300)
@ -1028,8 +1309,8 @@ GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
GEN_VXFORM_NOA(vclzb, 1, 28) GEN_VXFORM_NOA(vclzb, 1, 28)
GEN_VXFORM_NOA(vclzh, 1, 29) GEN_VXFORM_NOA(vclzh, 1, 29)
GEN_VXFORM_NOA(vclzw, 1, 30) GEN_VXFORM_TRANS(vclzw, 1, 30)
GEN_VXFORM_NOA(vclzd, 1, 31) GEN_VXFORM_TRANS(vclzd, 1, 31)
GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16)
@ -1057,7 +1338,7 @@ GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM(vbpermd, 6, 23); GEN_VXFORM(vbpermd, 6, 23);
GEN_VXFORM(vbpermq, 6, 21); GEN_VXFORM(vbpermq, 6, 21);
GEN_VXFORM_NOA(vgbbd, 6, 20); GEN_VXFORM_TRANS(vgbbd, 6, 20);
GEN_VXFORM(vpmsumb, 4, 16) GEN_VXFORM(vpmsumb, 4, 16)
GEN_VXFORM(vpmsumh, 4, 17) GEN_VXFORM(vpmsumh, 4, 17)
GEN_VXFORM(vpmsumw, 4, 18) GEN_VXFORM(vpmsumw, 4, 18)

View File

@ -8196,6 +8196,18 @@ static void gen_spr_power8_pspb(CPUPPCState *env)
KVM_REG_PPC_PSPB, 0); KVM_REG_PPC_PSPB, 0);
} }
static void gen_spr_power8_dpdes(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
/* Directed Privileged Door-bell Exception State, used for IPI */
spr_register_kvm_hv(env, SPR_DPDES, "DPDES",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DPDES, 0x00000000);
#endif
}
static void gen_spr_power8_ic(CPUPPCState *env) static void gen_spr_power8_ic(CPUPPCState *env)
{ {
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
@ -8627,6 +8639,7 @@ static void init_proc_POWER8(CPUPPCState *env)
gen_spr_power8_pmu_user(env); gen_spr_power8_pmu_user(env);
gen_spr_power8_tm(env); gen_spr_power8_tm(env);
gen_spr_power8_pspb(env); gen_spr_power8_pspb(env);
gen_spr_power8_dpdes(env);
gen_spr_vtb(env); gen_spr_vtb(env);
gen_spr_power8_ic(env); gen_spr_power8_ic(env);
gen_spr_power8_book4(env); gen_spr_power8_book4(env);
@ -8815,6 +8828,7 @@ static void init_proc_POWER9(CPUPPCState *env)
gen_spr_power8_pmu_user(env); gen_spr_power8_pmu_user(env);
gen_spr_power8_tm(env); gen_spr_power8_tm(env);
gen_spr_power8_pspb(env); gen_spr_power8_pspb(env);
gen_spr_power8_dpdes(env);
gen_spr_vtb(env); gen_spr_vtb(env);
gen_spr_power8_ic(env); gen_spr_power8_ic(env);
gen_spr_power8_book4(env); gen_spr_power8_book4(env);
@ -9438,14 +9452,13 @@ static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
{ {
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
opcode_t *opc; opcode_t *opc;
fill_new_table(env->opcodes, PPC_CPU_OPCODES_LEN); fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
if (((opc->handler.type & pcc->insns_flags) != 0) || if (((opc->handler.type & pcc->insns_flags) != 0) ||
((opc->handler.type2 & pcc->insns_flags2) != 0)) { ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
if (register_insn(env->opcodes, opc) < 0) { if (register_insn(cpu->opcodes, opc) < 0) {
error_setg(errp, "ERROR initializing PowerPC instruction " error_setg(errp, "ERROR initializing PowerPC instruction "
"0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
opc->opc3); opc->opc3);
@ -9453,7 +9466,7 @@ static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
} }
} }
} }
fix_opcode_tables(env->opcodes); fix_opcode_tables(cpu->opcodes);
fflush(stdout); fflush(stdout);
fflush(stderr); fflush(stderr);
} }
@ -10021,7 +10034,6 @@ static void ppc_cpu_unrealize(DeviceState *dev, Error **errp)
{ {
PowerPCCPU *cpu = POWERPC_CPU(dev); PowerPCCPU *cpu = POWERPC_CPU(dev);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
Error *local_err = NULL; Error *local_err = NULL;
opc_handler_t **table, **table_2; opc_handler_t **table, **table_2;
int i, j, k; int i, j, k;
@ -10033,11 +10045,11 @@ static void ppc_cpu_unrealize(DeviceState *dev, Error **errp)
} }
for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
if (env->opcodes[i] == &invalid_handler) { if (cpu->opcodes[i] == &invalid_handler) {
continue; continue;
} }
if (is_indirect_opcode(env->opcodes[i])) { if (is_indirect_opcode(cpu->opcodes[i])) {
table = ind_table(env->opcodes[i]); table = ind_table(cpu->opcodes[i]);
for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
if (table[j] == &invalid_handler) { if (table[j] == &invalid_handler) {
continue; continue;
@ -10055,7 +10067,7 @@ static void ppc_cpu_unrealize(DeviceState *dev, Error **errp)
~PPC_INDIRECT)); ~PPC_INDIRECT));
} }
} }
g_free((opc_handler_t *)((uintptr_t)env->opcodes[i] & g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
~PPC_INDIRECT)); ~PPC_INDIRECT));
} }
} }
@ -10469,6 +10481,28 @@ static bool ppc_cpu_is_big_endian(CPUState *cs)
return !msr_le; return !msr_le;
} }
static void ppc_cpu_exec_enter(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->cpu_exec_enter(cpu->vhyp, cpu);
}
}
static void ppc_cpu_exec_exit(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->cpu_exec_exit(cpu->vhyp, cpu);
}
}
#endif #endif
static void ppc_cpu_instance_init(Object *obj) static void ppc_cpu_instance_init(Object *obj)
@ -10622,6 +10656,11 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->tcg_initialize = ppc_translate_init; cc->tcg_initialize = ppc_translate_init;
cc->tlb_fill = ppc_cpu_tlb_fill; cc->tlb_fill = ppc_cpu_tlb_fill;
#endif #endif
#ifndef CONFIG_USER_ONLY
cc->cpu_exec_enter = ppc_cpu_exec_enter;
cc->cpu_exec_exit = ppc_cpu_exec_exit;
#endif
cc->disas_set_info = ppc_disas_set_info; cc->disas_set_info = ppc_disas_set_info;
dc->fw_name = "PowerPC,UNKNOWN"; dc->fw_name = "PowerPC,UNKNOWN";

16
vl.c
View File

@ -1557,6 +1557,20 @@ void qemu_system_reset(ShutdownCause reason)
cpu_synchronize_all_post_reset(); cpu_synchronize_all_post_reset();
} }
/*
* Wake the VM after suspend.
*/
static void qemu_system_wakeup(void)
{
MachineClass *mc;
mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL;
if (mc && mc->wakeup) {
mc->wakeup(current_machine);
}
}
void qemu_system_guest_panicked(GuestPanicInformation *info) void qemu_system_guest_panicked(GuestPanicInformation *info)
{ {
qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed"); qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed");
@ -1765,7 +1779,7 @@ static bool main_loop_should_exit(void)
} }
if (qemu_wakeup_requested()) { if (qemu_wakeup_requested()) {
pause_all_vcpus(); pause_all_vcpus();
qemu_system_reset(SHUTDOWN_CAUSE_NONE); qemu_system_wakeup();
notifier_list_notify(&wakeup_notifiers, &wakeup_reason); notifier_list_notify(&wakeup_notifiers, &wakeup_reason);
wakeup_reason = QEMU_WAKEUP_REASON_NONE; wakeup_reason = QEMU_WAKEUP_REASON_NONE;
resume_all_vcpus(); resume_all_vcpus();