ppc patch queue 2021-05-19

Next set of ppc related patches for qemu-6.1.  Highlights are:
  * Start of a significant softmmu cleanup from Richard Henderson
  * Further work towards allowing builds without CONFIG_TCG
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAmClBl0ACgkQbDjKyiDZ
 s5JKLhAAkqyFAs/KuyM57OE/CnkZh8ZOJgugTr+58UT1EnwQ4FJ9Veu+L7JhZwYb
 vsX7KQarIa8aC9bBKekKcNh4nJHziO/0pvjWzZUXyaGMDqSHyhYdvCjneZVO0pk3
 +dRZaCOv+qxq6z+JiFmHTg0H2O2wAFJfn2HhLS6ay7fo5on3rQTaONPrs22fy+9Z
 E5Pf330T4tRCZAcWHVudhS8DLqfuGay+chGBbbcMVwywHejtbeXwpdBC+FhxqAQe
 VhvwVPJjwjggjyEssq8mWEMxMnVeGDbzzXRlAaEUPTB0byU1JdRbC4nhKIuHMEyq
 KbSljzD0xepsK6wkE7aqOy1Dkj7U5hvlpqDrUL5L8/OFNRQIPKwwBSWoo+5Y40GQ
 EL+nJZ+7dRgB5EA0k84bXDn0NUtqIBlX+1iPG/UOjuOIkdxtdqPJfscYiUGqjZyj
 fYcdQNqj7f+OlN2RCkpIpCwbYbJvjwKYXU558ULD7tM1v6gaB7DBXS0wrb5+vf99
 NmBhn+9E/ATNM6h4KuBrXg0yKC87vTqwHOh0d/PY9ezF81RTWGwEwkS3cO+5G1tw
 Xyud6O6Ld9Xmnzaf5wFCKK65hHC/Gm+OM9Gh7+sgabRUZ/sc992PmGQ7Z3oCnrt0
 fws/iW3EX1R4YEy9hQcTd7fCZj0BzvmjnHQwnGOR7Xa5qUGo7CQ=
 =FI4M
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dg-gitlab/tags/ppc-for-6.1-20210519' into staging

ppc patch queue 2021-05-19

Next set of ppc related patches for qemu-6.1.  Highlights are:
 * Start of a significant softmmu cleanup from Richard Henderson
 * Further work towards allowing builds without CONFIG_TCG

# gpg: Signature made Wed 19 May 2021 13:36:45 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dg-gitlab/tags/ppc-for-6.1-20210519: (48 commits)
  target/ppc: Remove type argument for mmubooke206_get_physical_address
  target/ppc: Remove type argument from mmubooke206_check_tlb
  target/ppc: Remove type argument from mmubooke_get_physical_address
  target/ppc: Remove type argument from mmubooke_check_tlb
  target/ppc: Remove type argument from mmu40x_get_physical_address
  target/ppc: Remove type argument from get_bat_6xx_tlb
  target/ppc: Remove type argument from ppc6xx_tlb_check
  target/ppc: Remove type argument from ppc6xx_tlb_pte_check
  target/ppc: Remove type argument from check_prot
  target/ppc: Use MMUAccessType in mmu_helper.c
  target/ppc: Rename access_type to type in mmu_helper.c
  target/ppc: Use MMUAccessType in mmu-hash32.c
  target/ppc: Use MMUAccessType in mmu-hash64.c
  target/ppc: Use MMUAccessType in mmu-radix64.c
  target/ppc: Introduce prot_for_access_type
  target/ppc: Fix load endianness for lxvwsx/lxvdsx
  target/ppc: Use translator_loop_temp_check
  target/ppc: Mark helper_raise_exception* as noreturn
  target/ppc: Tidy exception vs exit_tb
  target/ppc: Move single-step check to ppc_tr_tb_stop
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-05-19 21:00:33 +01:00
commit 9aa9197a35
27 changed files with 3000 additions and 2816 deletions

View File

@ -29,6 +29,9 @@ ppc_ss.add(when: 'CONFIG_PSERIES', if_true: files(
'spapr_numa.c',
'pef.c',
))
ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_TCG'], if_true: files(
'spapr_softmmu.c',
))
ppc_ss.add(when: 'CONFIG_SPAPR_RNG', if_true: files('spapr_rng.c'))
ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_LINUX'], if_true: files(
'spapr_pci_vfio.c',

View File

@ -196,7 +196,7 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
_FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
_FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
if (env->spr_cb[SPR_PURR].oea_read) {
if (ppc_has_spr(cpu, SPR_PURR)) {
_FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
}

View File

@ -703,10 +703,10 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
_FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
_FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
if (env->spr_cb[SPR_PURR].oea_read) {
if (ppc_has_spr(cpu, SPR_PURR)) {
_FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
}
if (env->spr_cb[SPR_SPURR].oea_read) {
if (ppc_has_spr(cpu, SPR_PURR)) {
_FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
}
@ -979,6 +979,7 @@ static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
*/
val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
val[3] = 0x00; /* Hash */
spapr_check_mmu_mode(false);
} else if (kvm_enabled()) {
if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
val[3] = 0x80; /* OV5_MMU_BOTH */
@ -1556,6 +1557,22 @@ void spapr_setup_hpt(SpaprMachineState *spapr)
}
}
void spapr_check_mmu_mode(bool guest_radix)
{
if (guest_radix) {
if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
error_report("Guest requested unavailable MMU mode (radix).");
exit(EXIT_FAILURE);
}
} else {
if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
&& !kvmppc_has_cap_mmu_hash_v3()) {
error_report("Guest requested unavailable MMU mode (hash).");
exit(EXIT_FAILURE);
}
}
}
static void spapr_machine_reset(MachineState *machine)
{
SpaprMachineState *spapr = SPAPR_MACHINE(machine);

View File

@ -371,6 +371,65 @@ static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift,
return true;
}
static void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
bool (*cb)(void *, uint32_t, uint32_t),
void *opaque)
{
PPCHash64Options *opts = cpu->hash64_opts;
int i;
int n = 0;
bool ci_largepage = false;
assert(opts);
n = 0;
for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
PPCHash64SegmentPageSizes *sps = &opts->sps[i];
int j;
int m = 0;
assert(n <= i);
if (!sps->page_shift) {
break;
}
for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
PPCHash64PageSize *ps = &sps->enc[j];
assert(m <= j);
if (!ps->page_shift) {
break;
}
if (cb(opaque, sps->page_shift, ps->page_shift)) {
if (ps->page_shift >= 16) {
ci_largepage = true;
}
sps->enc[m++] = *ps;
}
}
/* Clear rest of the row */
for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
}
if (m) {
n++;
}
}
/* Clear the rest of the table */
for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
}
if (!ci_largepage) {
opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
}
}
static void cap_hpt_maxpagesize_cpu_apply(SpaprMachineState *spapr,
PowerPCCPU *cpu,
uint8_t val, Error **errp)

View File

@ -20,24 +20,7 @@
#include "mmu-book3s-v3.h"
#include "hw/mem/memory-device.h"
static bool has_spr(PowerPCCPU *cpu, int spr)
{
/* We can test whether the SPR is defined by checking for a valid name */
return cpu->env.spr_cb[spr].name != NULL;
}
static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
{
/*
* hash value/pteg group index is normalized by HPT mask
*/
if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
return false;
}
return true;
}
static bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
{
MachineState *machine = MACHINE(spapr);
DeviceMemoryState *dms = machine->device_memory;
@ -53,355 +36,6 @@ static bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
return false;
}
static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong pteh = args[2];
target_ulong ptel = args[3];
unsigned apshift;
target_ulong raddr;
target_ulong slot;
const ppc_hash_pte64_t *hptes;
apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
if (!apshift) {
/* Bad page size encoding */
return H_PARAMETER;
}
raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
if (is_ram_address(spapr, raddr)) {
/* Regular RAM - should have WIMG=0010 */
if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
return H_PARAMETER;
}
} else {
target_ulong wimg_flags;
/* Looks like an IO address */
/* FIXME: What WIMG combinations could be sensible for IO?
* For now we allow WIMG=010x, but are there others? */
/* FIXME: Should we check against registered IO addresses? */
wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
if (wimg_flags != HPTE64_R_I &&
wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
return H_PARAMETER;
}
}
pteh &= ~0x60ULL;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
slot = ptex & 7ULL;
ptex = ptex & ~7ULL;
if (likely((flags & H_EXACT) == 0)) {
hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
for (slot = 0; slot < 8; slot++) {
if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
break;
}
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
if (slot == 8) {
return H_PTEG_FULL;
}
} else {
hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
return H_PTEG_FULL;
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
}
spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
args[0] = ptex + slot;
return H_SUCCESS;
}
typedef enum {
REMOVE_SUCCESS = 0,
REMOVE_NOT_FOUND = 1,
REMOVE_PARM = 2,
REMOVE_HW = 3,
} RemoveResult;
static RemoveResult remove_hpte(PowerPCCPU *cpu
, target_ulong ptex,
target_ulong avpn,
target_ulong flags,
target_ulong *vp, target_ulong *rp)
{
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
if (!valid_ptex(cpu, ptex)) {
return REMOVE_PARM;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
v = ppc_hash64_hpte0(cpu, hptes, 0);
r = ppc_hash64_hpte1(cpu, hptes, 0);
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
if ((v & HPTE64_V_VALID) == 0 ||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
((flags & H_ANDCOND) && (v & avpn) != 0)) {
return REMOVE_NOT_FOUND;
}
*vp = v;
*rp = r;
spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
return REMOVE_SUCCESS;
}
static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong avpn = args[2];
RemoveResult ret;
ret = remove_hpte(cpu, ptex, avpn, flags,
&args[0], &args[1]);
switch (ret) {
case REMOVE_SUCCESS:
check_tlb_flush(env, true);
return H_SUCCESS;
case REMOVE_NOT_FOUND:
return H_NOT_FOUND;
case REMOVE_PARM:
return H_PARAMETER;
case REMOVE_HW:
return H_HARDWARE;
}
g_assert_not_reached();
}
#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
#define H_BULK_REMOVE_END 0xc000000000000000ULL
#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
#define H_BULK_REMOVE_HW 0x3000000000000000ULL
#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
#define H_BULK_REMOVE_MAX_BATCH 4
static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
int i;
target_ulong rc = H_SUCCESS;
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
target_ulong *tsh = &args[i*2];
target_ulong tsl = args[i*2 + 1];
target_ulong v, r, ret;
if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
break;
} else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
return H_PARAMETER;
}
*tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
*tsh |= H_BULK_REMOVE_RESPONSE;
if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
*tsh |= H_BULK_REMOVE_PARM;
return H_PARAMETER;
}
ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
(*tsh & H_BULK_REMOVE_FLAGS) >> 26,
&v, &r);
*tsh |= ret << 60;
switch (ret) {
case REMOVE_SUCCESS:
*tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
break;
case REMOVE_PARM:
rc = H_PARAMETER;
goto exit;
case REMOVE_HW:
rc = H_HARDWARE;
goto exit;
}
}
exit:
check_tlb_flush(env, true);
return rc;
}
static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong avpn = args[2];
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
v = ppc_hash64_hpte0(cpu, hptes, 0);
r = ppc_hash64_hpte1(cpu, hptes, 0);
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
if ((v & HPTE64_V_VALID) == 0 ||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
return H_NOT_FOUND;
}
r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
r |= (flags << 55) & HPTE64_R_PP0;
r |= (flags << 48) & HPTE64_R_KEY_HI;
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
spapr_store_hpte(cpu, ptex,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
/* Flush the tlb */
check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */
spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS;
}
static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong flags = args[0];
target_ulong ptex = args[1];
int i, ridx, n_entries = 1;
const ppc_hash_pte64_t *hptes;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
if (flags & H_READ_4) {
/* Clear the two low order bits */
ptex &= ~(3ULL);
n_entries = 4;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
for (i = 0, ridx = 0; i < n_entries; i++) {
args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
return H_SUCCESS;
}
struct SpaprPendingHpt {
/* These fields are read-only after initialization */
int shift;
QemuThread thread;
/* These fields are protected by the BQL */
bool complete;
/* These fields are private to the preparation thread if
* !complete, otherwise protected by the BQL */
int ret;
void *hpt;
};
static void free_pending_hpt(SpaprPendingHpt *pending)
{
if (pending->hpt) {
qemu_vfree(pending->hpt);
}
g_free(pending);
}
static void *hpt_prepare_thread(void *opaque)
{
SpaprPendingHpt *pending = opaque;
size_t size = 1ULL << pending->shift;
pending->hpt = qemu_try_memalign(size, size);
if (pending->hpt) {
memset(pending->hpt, 0, size);
pending->ret = H_SUCCESS;
} else {
pending->ret = H_NO_MEM;
}
qemu_mutex_lock_iothread();
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
/* Ready to go */
pending->complete = true;
} else {
/* We've been cancelled, clean ourselves up */
free_pending_hpt(pending);
}
qemu_mutex_unlock_iothread();
return NULL;
}
/* Must be called with BQL held */
static void cancel_hpt_prepare(SpaprMachineState *spapr)
{
SpaprPendingHpt *pending = spapr->pending_hpt;
/* Let the thread know it's cancelled */
spapr->pending_hpt = NULL;
if (!pending) {
/* Nothing to do */
return;
}
if (!pending->complete) {
/* thread will clean itself up */
return;
}
free_pending_hpt(pending);
}
/* Convert a return code from the KVM ioctl()s implementing resize HPT
* into a PAPR hypercall return code */
static target_ulong resize_hpt_convert_rc(int ret)
@ -447,7 +81,6 @@ static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
{
target_ulong flags = args[0];
int shift = args[1];
SpaprPendingHpt *pending = spapr->pending_hpt;
uint64_t current_ram_size;
int rc;
@ -484,182 +117,11 @@ static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
return resize_hpt_convert_rc(rc);
}
if (pending) {
/* something already in progress */
if (pending->shift == shift) {
/* and it's suitable */
if (pending->complete) {
return pending->ret;
} else {
return H_LONG_BUSY_ORDER_100_MSEC;
}
}
/* not suitable, cancel and replace */
cancel_hpt_prepare(spapr);
}
if (!shift) {
/* nothing to do */
return H_SUCCESS;
}
/* start new prepare */
pending = g_new0(SpaprPendingHpt, 1);
pending->shift = shift;
pending->ret = H_HARDWARE;
qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
spapr->pending_hpt = pending;
/* In theory we could estimate the time more accurately based on
* the new size, but there's not much point */
return H_LONG_BUSY_ORDER_100_MSEC;
}
static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
{
uint8_t *addr = htab;
addr += pteg * HASH_PTEG_SIZE_64;
addr += slot * HASH_PTE_SIZE_64;
return ldq_p(addr);
}
static void new_hpte_store(void *htab, uint64_t pteg, int slot,
uint64_t pte0, uint64_t pte1)
{
uint8_t *addr = htab;
addr += pteg * HASH_PTEG_SIZE_64;
addr += slot * HASH_PTE_SIZE_64;
stq_p(addr, pte0);
stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
}
static int rehash_hpte(PowerPCCPU *cpu,
const ppc_hash_pte64_t *hptes,
void *old_hpt, uint64_t oldsize,
void *new_hpt, uint64_t newsize,
uint64_t pteg, int slot)
{
uint64_t old_hash_mask = (oldsize >> 7) - 1;
uint64_t new_hash_mask = (newsize >> 7) - 1;
target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
target_ulong pte1;
uint64_t avpn;
unsigned base_pg_shift;
uint64_t hash, new_pteg, replace_pte0;
if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
return H_SUCCESS;
}
pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
if (pte0 & HPTE64_V_SECONDARY) {
pteg = ~pteg;
}
if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
uint64_t offset, vsid;
/* We only have 28 - 23 bits of offset in avpn */
offset = (avpn & 0x1f) << 23;
vsid = avpn >> 5;
/* We can find more bits from the pteg value */
if (base_pg_shift < 23) {
offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
}
hash = vsid ^ (offset >> base_pg_shift);
} else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
uint64_t offset, vsid;
/* We only have 40 - 23 bits of seg_off in avpn */
offset = (avpn & 0x1ffff) << 23;
vsid = avpn >> 17;
if (base_pg_shift < 23) {
offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
<< base_pg_shift;
}
hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
} else {
error_report("rehash_pte: Bad segment size in HPTE");
if (kvm_enabled()) {
return H_HARDWARE;
}
new_pteg = hash & new_hash_mask;
if (pte0 & HPTE64_V_SECONDARY) {
assert(~pteg == (hash & old_hash_mask));
new_pteg = ~new_pteg;
} else {
assert(pteg == (hash & old_hash_mask));
}
assert((oldsize != newsize) || (pteg == new_pteg));
replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
/*
* Strictly speaking, we don't need all these tests, since we only
* ever rehash bolted HPTEs. We might in future handle non-bolted
* HPTEs, though so make the logic correct for those cases as
* well.
*/
if (replace_pte0 & HPTE64_V_VALID) {
assert(newsize < oldsize);
if (replace_pte0 & HPTE64_V_BOLTED) {
if (pte0 & HPTE64_V_BOLTED) {
/* Bolted collision, nothing we can do */
return H_PTEG_FULL;
} else {
/* Discard this hpte */
return H_SUCCESS;
}
}
}
new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
return H_SUCCESS;
}
static int rehash_hpt(PowerPCCPU *cpu,
void *old_hpt, uint64_t oldsize,
void *new_hpt, uint64_t newsize)
{
uint64_t n_ptegs = oldsize >> 7;
uint64_t pteg;
int slot;
int rc;
for (pteg = 0; pteg < n_ptegs; pteg++) {
hwaddr ptex = pteg * HPTES_PER_GROUP;
const ppc_hash_pte64_t *hptes
= ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
if (!hptes) {
return H_HARDWARE;
}
for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
pteg, slot);
if (rc != H_SUCCESS) {
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
return rc;
}
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
}
return H_SUCCESS;
return softmmu_resize_hpt_prepare(cpu, spapr, shift);
}
static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
@ -675,7 +137,7 @@ static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
}
}
static void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
{
CPUState *cs;
@ -700,9 +162,7 @@ static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
{
target_ulong flags = args[0];
target_ulong shift = args[1];
SpaprPendingHpt *pending = spapr->pending_hpt;
int rc;
size_t newsize;
if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
return H_AUTHORITY;
@ -725,43 +185,15 @@ static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
return rc;
}
if (flags != 0) {
return H_PARAMETER;
if (kvm_enabled()) {
return H_HARDWARE;
}
if (!pending || (pending->shift != shift)) {
/* no matching prepare */
return H_CLOSED;
}
if (!pending->complete) {
/* prepare has not completed */
return H_BUSY;
}
/* Shouldn't have got past PREPARE without an HPT */
g_assert(spapr->htab_shift);
newsize = 1ULL << pending->shift;
rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
pending->hpt, newsize);
if (rc == H_SUCCESS) {
qemu_vfree(spapr->htab);
spapr->htab = pending->hpt;
spapr->htab_shift = pending->shift;
push_sregs_to_kvm_pr(spapr);
pending->hpt = NULL; /* so it's not free()d */
}
/* Clean up */
spapr->pending_hpt = NULL;
free_pending_hpt(pending);
return rc;
return softmmu_resize_hpt_commit(cpu, spapr, flags, shift);
}
static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
@ -774,12 +206,12 @@ static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
if (!has_spr(cpu, SPR_DABR)) {
if (!ppc_has_spr(cpu, SPR_DABR)) {
return H_HARDWARE; /* DABR register not available */
}
cpu_synchronize_state(CPU(cpu));
if (has_spr(cpu, SPR_DABRX)) {
if (ppc_has_spr(cpu, SPR_DABRX)) {
cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
} else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
return H_RESERVED_DABR;
@ -794,7 +226,7 @@ static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
{
target_ulong dabrx = args[1];
if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
if (!ppc_has_spr(cpu, SPR_DABR) || !ppc_has_spr(cpu, SPR_DABRX)) {
return H_HARDWARE;
}
@ -1760,18 +1192,8 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu,
spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
spapr_ovec_cleanup(ov5_guest);
if (guest_radix) {
if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
error_report("Guest requested unavailable MMU mode (radix).");
exit(EXIT_FAILURE);
}
} else {
if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
&& !kvmppc_has_cap_mmu_hash_v3()) {
error_report("Guest requested unavailable MMU mode (hash).");
exit(EXIT_FAILURE);
}
}
spapr_check_mmu_mode(guest_radix);
spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00);
spapr_ovec_cleanup(ov1_guest);
@ -2023,16 +1445,34 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
return H_FUNCTION;
}
static void hypercall_register_types(void)
#ifndef CONFIG_TCG
static target_ulong h_softmmu(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
g_assert_not_reached();
}
static void hypercall_register_softmmu(void)
{
/* hcall-pft */
spapr_register_hypercall(H_ENTER, h_enter);
spapr_register_hypercall(H_REMOVE, h_remove);
spapr_register_hypercall(H_PROTECT, h_protect);
spapr_register_hypercall(H_READ, h_read);
spapr_register_hypercall(H_ENTER, h_softmmu);
spapr_register_hypercall(H_REMOVE, h_softmmu);
spapr_register_hypercall(H_PROTECT, h_softmmu);
spapr_register_hypercall(H_READ, h_softmmu);
/* hcall-bulk */
spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
spapr_register_hypercall(H_BULK_REMOVE, h_softmmu);
}
#else
static void hypercall_register_softmmu(void)
{
/* DO NOTHING */
}
#endif
static void hypercall_register_types(void)
{
hypercall_register_softmmu();
/* hcall-hpt-resize */
spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare);

627
hw/ppc/spapr_softmmu.c Normal file
View File

@ -0,0 +1,627 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "helper_regs.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "mmu-hash64.h"
#include "cpu-models.h"
#include "trace.h"
#include "kvm_ppc.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/spapr_ovec.h"
#include "mmu-book3s-v3.h"
#include "hw/mem/memory-device.h"
static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
{
/*
* hash value/pteg group index is normalized by HPT mask
*/
if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
return false;
}
return true;
}
static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong pteh = args[2];
target_ulong ptel = args[3];
unsigned apshift;
target_ulong raddr;
target_ulong slot;
const ppc_hash_pte64_t *hptes;
apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
if (!apshift) {
/* Bad page size encoding */
return H_PARAMETER;
}
raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
if (is_ram_address(spapr, raddr)) {
/* Regular RAM - should have WIMG=0010 */
if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
return H_PARAMETER;
}
} else {
target_ulong wimg_flags;
/* Looks like an IO address */
/* FIXME: What WIMG combinations could be sensible for IO?
* For now we allow WIMG=010x, but are there others? */
/* FIXME: Should we check against registered IO addresses? */
wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
if (wimg_flags != HPTE64_R_I &&
wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
return H_PARAMETER;
}
}
pteh &= ~0x60ULL;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
slot = ptex & 7ULL;
ptex = ptex & ~7ULL;
if (likely((flags & H_EXACT) == 0)) {
hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
for (slot = 0; slot < 8; slot++) {
if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
break;
}
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
if (slot == 8) {
return H_PTEG_FULL;
}
} else {
hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
return H_PTEG_FULL;
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
}
spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
args[0] = ptex + slot;
return H_SUCCESS;
}
typedef enum {
REMOVE_SUCCESS = 0,
REMOVE_NOT_FOUND = 1,
REMOVE_PARM = 2,
REMOVE_HW = 3,
} RemoveResult;
static RemoveResult remove_hpte(PowerPCCPU *cpu
, target_ulong ptex,
target_ulong avpn,
target_ulong flags,
target_ulong *vp, target_ulong *rp)
{
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
if (!valid_ptex(cpu, ptex)) {
return REMOVE_PARM;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
v = ppc_hash64_hpte0(cpu, hptes, 0);
r = ppc_hash64_hpte1(cpu, hptes, 0);
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
if ((v & HPTE64_V_VALID) == 0 ||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
((flags & H_ANDCOND) && (v & avpn) != 0)) {
return REMOVE_NOT_FOUND;
}
*vp = v;
*rp = r;
spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
return REMOVE_SUCCESS;
}
static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong avpn = args[2];
RemoveResult ret;
ret = remove_hpte(cpu, ptex, avpn, flags,
&args[0], &args[1]);
switch (ret) {
case REMOVE_SUCCESS:
check_tlb_flush(env, true);
return H_SUCCESS;
case REMOVE_NOT_FOUND:
return H_NOT_FOUND;
case REMOVE_PARM:
return H_PARAMETER;
case REMOVE_HW:
return H_HARDWARE;
}
g_assert_not_reached();
}
#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
#define H_BULK_REMOVE_END 0xc000000000000000ULL
#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
#define H_BULK_REMOVE_HW 0x3000000000000000ULL
#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
#define H_BULK_REMOVE_MAX_BATCH 4
static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
int i;
target_ulong rc = H_SUCCESS;
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
target_ulong *tsh = &args[i*2];
target_ulong tsl = args[i*2 + 1];
target_ulong v, r, ret;
if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
break;
} else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
return H_PARAMETER;
}
*tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
*tsh |= H_BULK_REMOVE_RESPONSE;
if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
*tsh |= H_BULK_REMOVE_PARM;
return H_PARAMETER;
}
ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
(*tsh & H_BULK_REMOVE_FLAGS) >> 26,
&v, &r);
*tsh |= ret << 60;
switch (ret) {
case REMOVE_SUCCESS:
*tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
break;
case REMOVE_PARM:
rc = H_PARAMETER;
goto exit;
case REMOVE_HW:
rc = H_HARDWARE;
goto exit;
}
}
exit:
check_tlb_flush(env, true);
return rc;
}
static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUPPCState *env = &cpu->env;
target_ulong flags = args[0];
target_ulong ptex = args[1];
target_ulong avpn = args[2];
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
v = ppc_hash64_hpte0(cpu, hptes, 0);
r = ppc_hash64_hpte1(cpu, hptes, 0);
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
if ((v & HPTE64_V_VALID) == 0 ||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
return H_NOT_FOUND;
}
r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
r |= (flags << 55) & HPTE64_R_PP0;
r |= (flags << 48) & HPTE64_R_KEY_HI;
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
spapr_store_hpte(cpu, ptex,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
/* Flush the tlb */
check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */
spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS;
}
static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong flags = args[0];
target_ulong ptex = args[1];
int i, ridx, n_entries = 1;
const ppc_hash_pte64_t *hptes;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
if (flags & H_READ_4) {
/* Clear the two low order bits */
ptex &= ~(3ULL);
n_entries = 4;
}
hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
for (i = 0, ridx = 0; i < n_entries; i++) {
args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
return H_SUCCESS;
}
struct SpaprPendingHpt {
/* These fields are read-only after initialization */
int shift;
QemuThread thread;
/* These fields are protected by the BQL */
bool complete;
/* These fields are private to the preparation thread if
* !complete, otherwise protected by the BQL */
int ret;
void *hpt;
};
static void free_pending_hpt(SpaprPendingHpt *pending)
{
if (pending->hpt) {
qemu_vfree(pending->hpt);
}
g_free(pending);
}
static void *hpt_prepare_thread(void *opaque)
{
SpaprPendingHpt *pending = opaque;
size_t size = 1ULL << pending->shift;
pending->hpt = qemu_try_memalign(size, size);
if (pending->hpt) {
memset(pending->hpt, 0, size);
pending->ret = H_SUCCESS;
} else {
pending->ret = H_NO_MEM;
}
qemu_mutex_lock_iothread();
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
/* Ready to go */
pending->complete = true;
} else {
/* We've been cancelled, clean ourselves up */
free_pending_hpt(pending);
}
qemu_mutex_unlock_iothread();
return NULL;
}
/* Must be called with BQL held */
static void cancel_hpt_prepare(SpaprMachineState *spapr)
{
SpaprPendingHpt *pending = spapr->pending_hpt;
/* Let the thread know it's cancelled */
spapr->pending_hpt = NULL;
if (!pending) {
/* Nothing to do */
return;
}
if (!pending->complete) {
/* thread will clean itself up */
return;
}
free_pending_hpt(pending);
}
target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong shift)
{
SpaprPendingHpt *pending = spapr->pending_hpt;
if (pending) {
/* something already in progress */
if (pending->shift == shift) {
/* and it's suitable */
if (pending->complete) {
return pending->ret;
} else {
return H_LONG_BUSY_ORDER_100_MSEC;
}
}
/* not suitable, cancel and replace */
cancel_hpt_prepare(spapr);
}
if (!shift) {
/* nothing to do */
return H_SUCCESS;
}
/* start new prepare */
pending = g_new0(SpaprPendingHpt, 1);
pending->shift = shift;
pending->ret = H_HARDWARE;
qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
spapr->pending_hpt = pending;
/* In theory we could estimate the time more accurately based on
* the new size, but there's not much point */
return H_LONG_BUSY_ORDER_100_MSEC;
}
static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
{
uint8_t *addr = htab;
addr += pteg * HASH_PTEG_SIZE_64;
addr += slot * HASH_PTE_SIZE_64;
return ldq_p(addr);
}
static void new_hpte_store(void *htab, uint64_t pteg, int slot,
uint64_t pte0, uint64_t pte1)
{
uint8_t *addr = htab;
addr += pteg * HASH_PTEG_SIZE_64;
addr += slot * HASH_PTE_SIZE_64;
stq_p(addr, pte0);
stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
}
static int rehash_hpte(PowerPCCPU *cpu,
const ppc_hash_pte64_t *hptes,
void *old_hpt, uint64_t oldsize,
void *new_hpt, uint64_t newsize,
uint64_t pteg, int slot)
{
uint64_t old_hash_mask = (oldsize >> 7) - 1;
uint64_t new_hash_mask = (newsize >> 7) - 1;
target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
target_ulong pte1;
uint64_t avpn;
unsigned base_pg_shift;
uint64_t hash, new_pteg, replace_pte0;
if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
return H_SUCCESS;
}
pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
if (pte0 & HPTE64_V_SECONDARY) {
pteg = ~pteg;
}
if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
uint64_t offset, vsid;
/* We only have 28 - 23 bits of offset in avpn */
offset = (avpn & 0x1f) << 23;
vsid = avpn >> 5;
/* We can find more bits from the pteg value */
if (base_pg_shift < 23) {
offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
}
hash = vsid ^ (offset >> base_pg_shift);
} else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
uint64_t offset, vsid;
/* We only have 40 - 23 bits of seg_off in avpn */
offset = (avpn & 0x1ffff) << 23;
vsid = avpn >> 17;
if (base_pg_shift < 23) {
offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
<< base_pg_shift;
}
hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
} else {
error_report("rehash_pte: Bad segment size in HPTE");
return H_HARDWARE;
}
new_pteg = hash & new_hash_mask;
if (pte0 & HPTE64_V_SECONDARY) {
assert(~pteg == (hash & old_hash_mask));
new_pteg = ~new_pteg;
} else {
assert(pteg == (hash & old_hash_mask));
}
assert((oldsize != newsize) || (pteg == new_pteg));
replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
/*
* Strictly speaking, we don't need all these tests, since we only
* ever rehash bolted HPTEs. We might in future handle non-bolted
* HPTEs, though so make the logic correct for those cases as
* well.
*/
if (replace_pte0 & HPTE64_V_VALID) {
assert(newsize < oldsize);
if (replace_pte0 & HPTE64_V_BOLTED) {
if (pte0 & HPTE64_V_BOLTED) {
/* Bolted collision, nothing we can do */
return H_PTEG_FULL;
} else {
/* Discard this hpte */
return H_SUCCESS;
}
}
}
new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
return H_SUCCESS;
}
static int rehash_hpt(PowerPCCPU *cpu,
void *old_hpt, uint64_t oldsize,
void *new_hpt, uint64_t newsize)
{
uint64_t n_ptegs = oldsize >> 7;
uint64_t pteg;
int slot;
int rc;
for (pteg = 0; pteg < n_ptegs; pteg++) {
hwaddr ptex = pteg * HPTES_PER_GROUP;
const ppc_hash_pte64_t *hptes
= ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
if (!hptes) {
return H_HARDWARE;
}
for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
pteg, slot);
if (rc != H_SUCCESS) {
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
return rc;
}
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
}
return H_SUCCESS;
}
target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong flags,
target_ulong shift)
{
SpaprPendingHpt *pending = spapr->pending_hpt;
int rc;
size_t newsize;
if (flags != 0) {
return H_PARAMETER;
}
if (!pending || (pending->shift != shift)) {
/* no matching prepare */
return H_CLOSED;
}
if (!pending->complete) {
/* prepare has not completed */
return H_BUSY;
}
/* Shouldn't have got past PREPARE without an HPT */
g_assert(spapr->htab_shift);
newsize = 1ULL << pending->shift;
rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
pending->hpt, newsize);
if (rc == H_SUCCESS) {
qemu_vfree(spapr->htab);
spapr->htab = pending->hpt;
spapr->htab_shift = pending->shift;
push_sregs_to_kvm_pr(spapr);
pending->hpt = NULL; /* so it's not free()d */
}
/* Clean up */
spapr->pending_hpt = NULL;
free_pending_hpt(pending);
return rc;
}
static void hypercall_register_types(void)
{
/* hcall-pft */
spapr_register_hypercall(H_ENTER, h_enter);
spapr_register_hypercall(H_REMOVE, h_remove);
spapr_register_hypercall(H_PROTECT, h_protect);
spapr_register_hypercall(H_READ, h_read);
/* hcall-bulk */
spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
}
type_init(hypercall_register_types)

View File

@ -582,6 +582,12 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
target_ulong *args);
target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong shift);
target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong flags, target_ulong shift);
bool is_ram_address(SpaprMachineState *spapr, hwaddr addr);
void push_sregs_to_kvm_pr(SpaprMachineState *spapr);
/* Virtual Processor Area structure constants */
#define VPA_MIN_SIZE 640
@ -821,6 +827,7 @@ void spapr_dt_events(SpaprMachineState *sm, void *fdt);
void close_htab_fd(SpaprMachineState *spapr);
void spapr_setup_hpt(SpaprMachineState *spapr);
void spapr_free_hpt(SpaprMachineState *spapr);
void spapr_check_mmu_mode(bool guest_radix);
SpaprTceTable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn);
void spapr_tce_table_enable(SpaprTceTable *tcet,
uint32_t page_shift, uint64_t bus_offset,

View File

@ -423,12 +423,6 @@ void cpu_loop(CPUPPCState *env)
cpu_abort(cs, "Maintenance exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_STOP: /* stop translation */
/* We did invalidate the instruction cache. Go on */
break;
case POWERPC_EXCP_BRANCH: /* branch instruction: */
/* We just stopped because of a branch. Go on */
break;
case POWERPC_EXCP_SYSCALL_USER:
/* system call in user-mode emulation */
/* WARNING:

View File

@ -17,7 +17,6 @@
#include "elf.h"
#include "sysemu/dump.h"
#include "sysemu/kvm.h"
#include "exec/helper-proto.h"
#ifdef TARGET_PPC64
#define ELFCLASS ELFCLASS64
@ -176,7 +175,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
vmxregset->avr[i].u64[1] = avr->u64[1];
}
}
vmxregset->vscr.u32[3] = cpu_to_dump32(s, helper_mfvscr(&cpu->env));
vmxregset->vscr.u32[3] = cpu_to_dump32(s, ppc_get_vscr(&cpu->env));
}
static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)

View File

@ -20,6 +20,10 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "cpu-models.h"
#include "cpu-qom.h"
#include "exec/log.h"
#include "fpu/softfloat-helpers.h"
#include "mmu-hash64.h"
target_ulong cpu_read_xer(CPUPPCState *env)
{
@ -45,3 +49,46 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer)
(1ul << XER_OV) | (1ul << XER_CA) |
(1ul << XER_OV32) | (1ul << XER_CA32));
}
void ppc_store_vscr(CPUPPCState *env, uint32_t vscr)
{
env->vscr = vscr & ~(1u << VSCR_SAT);
/* Which bit we set is completely arbitrary, but clear the rest. */
env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
env->vscr_sat.u64[1] = 0;
set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
}
uint32_t ppc_get_vscr(CPUPPCState *env)
{
uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
return env->vscr | (sat << VSCR_SAT);
}
#ifdef CONFIG_SOFTMMU
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
PowerPCCPU *cpu = env_archcpu(env);
qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
assert(!cpu->vhyp);
#if defined(TARGET_PPC64)
if (mmu_is_64bit(env->mmu_model)) {
target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
target_ulong htabsize = value & SDR_64_HTABSIZE;
if (value & ~sdr_mask) {
error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1",
value & ~sdr_mask);
value &= sdr_mask;
}
if (htabsize > 28) {
error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
htabsize);
return;
}
}
#endif /* defined(TARGET_PPC64) */
/* FIXME: Should check for valid HTABMASK values in 32-bit case */
env->spr[SPR_SDR1] = value;
}
#endif /* CONFIG_SOFTMMU */

View File

@ -131,11 +131,7 @@ enum {
POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */
/* EOL */
POWERPC_EXCP_NB = 103,
/* QEMU exceptions: used internally during code translation */
POWERPC_EXCP_STOP = 0x200, /* stop translation */
POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */
/* QEMU exceptions: special cases we want to stop translation */
POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */
POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
};
@ -1297,6 +1293,7 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
void ppc_store_ptcr(CPUPPCState *env, target_ulong value);
#endif /* !defined(CONFIG_USER_ONLY) */
void ppc_store_msr(CPUPPCState *env, target_ulong value);
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
void ppc_cpu_list(void);
@ -2641,7 +2638,15 @@ static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i)
return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i));
}
static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr)
{
/* We can test whether the SPR is defined by checking for a valid name */
return cpu->env.spr_cb[spr].name != NULL;
}
void dump_mmu(CPUPPCState *env);
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
void ppc_store_vscr(CPUPPCState *env, uint32_t vscr);
uint32_t ppc_get_vscr(CPUPPCState *env);
#endif /* PPC_CPU_H */

File diff suppressed because it is too large Load Diff

View File

@ -498,7 +498,7 @@ static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
return 16;
}
if (n == 32) {
gdb_get_reg32(buf, helper_mfvscr(env));
gdb_get_reg32(buf, ppc_get_vscr(env));
mem_buf = gdb_get_reg_ptr(buf, 4);
ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
@ -529,7 +529,7 @@ static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
}
if (n == 32) {
ppc_maybe_bswap_register(env, mem_buf, 4);
helper_mtvscr(env, ldl_p(mem_buf));
ppc_store_vscr(env, ldl_p(mem_buf));
return 4;
}
if (n == 33) {

View File

@ -1,5 +1,5 @@
DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32)
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, noreturn, env, i32, i32)
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32)
DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)

View File

@ -462,17 +462,12 @@ SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
{
env->vscr = vscr & ~(1u << VSCR_SAT);
/* Which bit we set is completely arbitrary, but clear the rest. */
env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
env->vscr_sat.u64[1] = 0;
set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
ppc_store_vscr(env, vscr);
}
uint32_t helper_mfvscr(CPUPPCState *env)
{
uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
return env->vscr | (sat << VSCR_SAT);
return ppc_get_vscr(env);
}
static inline void set_vscr_sat(CPUPPCState *env)

View File

@ -228,4 +228,23 @@ void destroy_ppc_opcodes(PowerPCCPU *cpu);
void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *ppc);
gchar *ppc_gdb_arch_name(CPUState *cs);
/**
* prot_for_access_type:
* @access_type: Access type
*
* Return the protection bit required for the given access type.
*/
static inline int prot_for_access_type(MMUAccessType access_type)
{
switch (access_type) {
case MMU_INST_FETCH:
return PAGE_EXEC;
case MMU_DATA_LOAD:
return PAGE_READ;
case MMU_DATA_STORE:
return PAGE_WRITE;
}
g_assert_not_reached();
}
#endif /* PPC_INTERNAL_H */

View File

@ -8,7 +8,6 @@
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "kvm_ppc.h"
#include "exec/helper-proto.h"
static void post_load_update_msr(CPUPPCState *env)
{
@ -107,7 +106,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
ppc_store_sdr1(env, sdr1);
}
qemu_get_be32s(f, &vscr);
helper_mtvscr(env, vscr);
ppc_store_vscr(env, vscr);
qemu_get_be64s(f, &env->spe_acc);
qemu_get_be32s(f, &env->spe_fscr);
qemu_get_betls(f, &env->msr_mask);
@ -456,7 +455,7 @@ static int get_vscr(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
PowerPCCPU *cpu = opaque;
helper_mtvscr(&cpu->env, qemu_get_be32(f));
ppc_store_vscr(&cpu->env, qemu_get_be32(f));
return 0;
}
@ -464,7 +463,7 @@ static int put_vscr(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
PowerPCCPU *cpu = opaque;
qemu_put_be32(f, helper_mfvscr(&cpu->env));
qemu_put_be32(f, ppc_get_vscr(&cpu->env));
return 0;
}

View File

@ -2,6 +2,7 @@ ppc_ss = ss.source_set()
ppc_ss.add(files(
'cpu-models.c',
'cpu.c',
'cpu_init.c',
'dfp_helper.c',
'excp_helper.c',
'fpu_helper.c',

View File

@ -261,6 +261,16 @@ void ppc_store_msr(CPUPPCState *env, target_ulong value)
hreg_store_msr(env, value, 0);
}
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
/* The gtse bit affects hflags */
hreg_compute_hflags(env);
}
/*
* This code is lifted from MacOnLinux. It is called whenever THRM1,2
* or 3 is read an fixes up the values in such a way that will make

View File

@ -24,6 +24,7 @@
#include "exec/helper-proto.h"
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "internal.h"
#include "mmu-hash32.h"
#include "exec/log.h"
@ -152,16 +153,17 @@ static int hash32_bat_601_prot(PowerPCCPU *cpu,
return ppc_hash32_pp_prot(key, pp, 0);
}
static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
int *prot)
static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
MMUAccessType access_type, int *prot)
{
CPUPPCState *env = &cpu->env;
target_ulong *BATlt, *BATut;
bool ifetch = access_type == MMU_INST_FETCH;
int i;
LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
rwx == 2 ? 'I' : 'D', ea);
if (rwx == 2) {
ifetch ? 'I' : 'D', ea);
if (ifetch) {
BATlt = env->IBAT[1];
BATut = env->IBAT[0];
} else {
@ -180,7 +182,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
}
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n", __func__,
type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl);
ifetch ? 'I' : 'D', i, ea, batu, batl);
if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
hwaddr raddr = (batl & mask) | (ea & ~mask);
@ -208,7 +210,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
TARGET_FMT_lx " " TARGET_FMT_lx "\n",
__func__, type == ACCESS_CODE ? 'I' : 'D', i, ea,
__func__, ifetch ? 'I' : 'D', i, ea,
*BATu, *BATl, BEPIu, BEPIl, bl);
}
}
@ -218,7 +220,8 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
}
static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
target_ulong eaddr, int rwx,
target_ulong eaddr,
MMUAccessType access_type,
hwaddr *raddr, int *prot)
{
CPUState *cs = CPU(cpu);
@ -239,7 +242,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
return 0;
}
if (rwx == 2) {
if (access_type == MMU_INST_FETCH) {
/* No code fetch is allowed in direct-store areas */
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
@ -260,7 +263,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
/* lwarx, ldarx or srwcx. */
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x06000000;
} else {
env->spr[SPR_DSISR] = 0x04000000;
@ -280,7 +283,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x06100000;
} else {
env->spr[SPR_DSISR] = 0x04100000;
@ -290,14 +293,15 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
cpu_abort(cs, "ERROR: instruction should not need "
"address translation\n");
}
if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) {
if ((access_type == MMU_DATA_STORE || key != 1) &&
(access_type == MMU_DATA_LOAD || key != 0)) {
*raddr = eaddr;
return 0;
} else {
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x0a000000;
} else {
env->spr[SPR_DSISR] = 0x08000000;
@ -421,13 +425,16 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
hwaddr pte_offset;
ppc_hash_pte32_t pte;
int prot;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
int need_prot;
MMUAccessType access_type;
hwaddr raddr;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
access_type = rwx;
need_prot = prot_for_access_type(access_type);
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) {
/* Translation is off */
raddr = eaddr;
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
@ -438,17 +445,17 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* 2. Check Block Address Translation entries (BATs) */
if (env->nb_BATs != 0) {
raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot);
raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, &prot);
if (raddr != -1) {
if (need_prot[rwx] & ~prot) {
if (rwx == 2) {
if (need_prot & ~prot) {
if (access_type == MMU_INST_FETCH) {
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x0a000000;
} else {
env->spr[SPR_DSISR] = 0x08000000;
@ -469,7 +476,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* 4. Handle direct store segments */
if (sr & SR32_T) {
if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx,
if (ppc_hash32_direct_store(cpu, sr, eaddr, access_type,
&raddr, &prot) == 0) {
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
@ -481,7 +488,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
}
/* 5. Check for segment level no-execute violation */
if ((rwx == 2) && (sr & SR32_NX)) {
if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) {
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
return 1;
@ -490,14 +497,14 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* 6. Locate the PTE in the hash table */
pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
if (access_type == MMU_INST_FETCH) {
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x40000000;
} else {
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x42000000;
} else {
env->spr[SPR_DSISR] = 0x40000000;
@ -513,17 +520,17 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
prot = ppc_hash32_pte_prot(cpu, sr, pte);
if (need_prot[rwx] & ~prot) {
if (need_prot & ~prot) {
/* Access right violation */
qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
if (rwx == 2) {
if (access_type == MMU_INST_FETCH) {
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x0a000000;
} else {
env->spr[SPR_DSISR] = 0x08000000;
@ -540,7 +547,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
}
if (!(pte.pte1 & HPTE32_R_C)) {
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
} else {
/*

View File

@ -29,6 +29,7 @@
#include "mmu-hash64.h"
#include "exec/log.h"
#include "hw/hw.h"
#include "internal.h"
#include "mmu-book3s-v3.h"
#include "helper_regs.h"
@ -876,10 +877,12 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
hwaddr ptex;
ppc_hash_pte64_t pte;
int exec_prot, pp_prot, amr_prot, prot;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
MMUAccessType access_type;
int need_prot;
hwaddr raddr;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
access_type = rwx;
/*
* Note on LPCR usage: 970 uses HID4, but our special variant of
@ -890,7 +893,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
*/
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) {
/*
* Translation is supposedly "off", but in real mode the top 4
* effective address bits are (mostly) ignored
@ -923,14 +926,19 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
/* Emulated old-style RMO mode, bounds check against RMLS */
if (raddr >= limit) {
if (rwx == 2) {
switch (access_type) {
case MMU_INST_FETCH:
ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
} else {
int dsisr = DSISR_PROTFAULT;
if (rwx == 1) {
dsisr |= DSISR_ISSTORE;
}
ppc_hash64_set_dsi(cs, eaddr, dsisr);
break;
case MMU_DATA_LOAD:
ppc_hash64_set_dsi(cs, eaddr, DSISR_PROTFAULT);
break;
case MMU_DATA_STORE:
ppc_hash64_set_dsi(cs, eaddr,
DSISR_PROTFAULT | DSISR_ISSTORE);
break;
default:
g_assert_not_reached();
}
return 1;
}
@ -953,13 +961,19 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
exit(1);
}
/* Segment still not found, generate the appropriate interrupt */
if (rwx == 2) {
switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = POWERPC_EXCP_ISEG;
env->error_code = 0;
} else {
break;
case MMU_DATA_LOAD:
case MMU_DATA_STORE:
cs->exception_index = POWERPC_EXCP_DSEG;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
break;
default:
g_assert_not_reached();
}
return 1;
}
@ -967,7 +981,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
skip_slb_search:
/* 3. Check for segment level no-execute violation */
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
return 1;
}
@ -975,14 +989,18 @@ skip_slb_search:
/* 4. Locate the PTE in the hash table */
ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
if (ptex == -1) {
if (rwx == 2) {
switch (access_type) {
case MMU_INST_FETCH:
ppc_hash64_set_isi(cs, SRR1_NOPTE);
} else {
int dsisr = DSISR_NOPTE;
if (rwx == 1) {
dsisr |= DSISR_ISSTORE;
}
ppc_hash64_set_dsi(cs, eaddr, dsisr);
break;
case MMU_DATA_LOAD:
ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE);
break;
case MMU_DATA_STORE:
ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
break;
default:
g_assert_not_reached();
}
return 1;
}
@ -996,10 +1014,11 @@ skip_slb_search:
amr_prot = ppc_hash64_amr_prot(cpu, pte);
prot = exec_prot & pp_prot & amr_prot;
if ((need_prot[rwx] & ~prot) != 0) {
need_prot = prot_for_access_type(access_type);
if (need_prot & ~prot) {
/* Access right violation */
qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
if (rwx == 2) {
if (access_type == MMU_INST_FETCH) {
int srr1 = 0;
if (PAGE_EXEC & ~exec_prot) {
srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
@ -1012,13 +1031,13 @@ skip_slb_search:
ppc_hash64_set_isi(cs, srr1);
} else {
int dsisr = 0;
if (need_prot[rwx] & ~pp_prot) {
if (need_prot & ~pp_prot) {
dsisr |= DSISR_PROTFAULT;
}
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
dsisr |= DSISR_ISSTORE;
}
if (need_prot[rwx] & ~amr_prot) {
if (need_prot & ~amr_prot) {
dsisr |= DSISR_AMR;
}
ppc_hash64_set_dsi(cs, eaddr, dsisr);
@ -1034,7 +1053,7 @@ skip_slb_search:
ppc_hash64_set_r(cpu, ptex, pte.pte1);
}
if (!(pte.pte1 & HPTE64_R_C)) {
if (rwx == 1) {
if (access_type == MMU_DATA_STORE) {
ppc_hash64_set_c(cpu, ptex, pte.pte1);
} else {
/*
@ -1120,16 +1139,6 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
/* The gtse bit affects hflags */
hreg_compute_hflags(env);
}
void helper_store_lpcr(CPUPPCState *env, target_ulong val)
{
PowerPCCPU *cpu = env_archcpu(env);
@ -1200,61 +1209,4 @@ const PPCHash64Options ppc_hash64_opts_POWER7 = {
}
};
void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
bool (*cb)(void *, uint32_t, uint32_t),
void *opaque)
{
PPCHash64Options *opts = cpu->hash64_opts;
int i;
int n = 0;
bool ci_largepage = false;
assert(opts);
n = 0;
for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
PPCHash64SegmentPageSizes *sps = &opts->sps[i];
int j;
int m = 0;
assert(n <= i);
if (!sps->page_shift) {
break;
}
for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
PPCHash64PageSize *ps = &sps->enc[j];
assert(m <= j);
if (!ps->page_shift) {
break;
}
if (cb(opaque, sps->page_shift, ps->page_shift)) {
if (ps->page_shift >= 16) {
ci_largepage = true;
}
sps->enc[m++] = *ps;
}
}
/* Clear rest of the row */
for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
}
if (m) {
n++;
}
}
/* Clear the rest of the table */
for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
}
if (!ci_largepage) {
opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
}
}

View File

@ -15,12 +15,8 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong pte0, target_ulong pte1);
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
uint64_t pte0, uint64_t pte1);
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
void ppc_hash64_init(PowerPCCPU *cpu);
void ppc_hash64_finalize(PowerPCCPU *cpu);
void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
bool (*cb)(void *, uint32_t, uint32_t),
void *opaque);
#endif
/*

View File

@ -25,6 +25,7 @@
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "exec/log.h"
#include "internal.h"
#include "mmu-radix64.h"
#include "mmu-book3s-v3.h"
@ -74,71 +75,94 @@ static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
return true;
}
static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
vaddr eaddr)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
if (rwx == 2) { /* Instruction Segment Interrupt */
switch (access_type) {
case MMU_INST_FETCH:
/* Instruction Segment Interrupt */
cs->exception_index = POWERPC_EXCP_ISEG;
} else { /* Data Segment Interrupt */
break;
case MMU_DATA_STORE:
case MMU_DATA_LOAD:
/* Data Segment Interrupt */
cs->exception_index = POWERPC_EXCP_DSEG;
env->spr[SPR_DAR] = eaddr;
break;
default:
g_assert_not_reached();
}
env->error_code = 0;
}
static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
uint32_t cause)
static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
vaddr eaddr, uint32_t cause)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
if (rwx == 2) { /* Instruction Storage Interrupt */
switch (access_type) {
case MMU_INST_FETCH:
/* Instruction Storage Interrupt */
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = cause;
} else { /* Data Storage Interrupt */
break;
case MMU_DATA_STORE:
cause |= DSISR_ISSTORE;
/* fall through */
case MMU_DATA_LOAD:
/* Data Storage Interrupt */
cs->exception_index = POWERPC_EXCP_DSI;
if (rwx == 1) { /* Write -> Store */
cause |= DSISR_ISSTORE;
}
env->spr[SPR_DSISR] = cause;
env->spr[SPR_DAR] = eaddr;
env->error_code = 0;
break;
default:
g_assert_not_reached();
}
}
static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
hwaddr g_raddr, uint32_t cause)
static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
vaddr eaddr, hwaddr g_raddr, uint32_t cause)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
if (rwx == 2) { /* H Instruction Storage Interrupt */
switch (access_type) {
case MMU_INST_FETCH:
/* H Instruction Storage Interrupt */
cs->exception_index = POWERPC_EXCP_HISI;
env->spr[SPR_ASDR] = g_raddr;
env->error_code = cause;
} else { /* H Data Storage Interrupt */
break;
case MMU_DATA_STORE:
cause |= DSISR_ISSTORE;
/* fall through */
case MMU_DATA_LOAD:
/* H Data Storage Interrupt */
cs->exception_index = POWERPC_EXCP_HDSI;
if (rwx == 1) { /* Write -> Store */
cause |= DSISR_ISSTORE;
}
env->spr[SPR_HDSISR] = cause;
env->spr[SPR_HDAR] = eaddr;
env->spr[SPR_ASDR] = g_raddr;
env->error_code = 0;
break;
default:
g_assert_not_reached();
}
}
static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
int *fault_cause, int *prot,
static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
uint64_t pte, int *fault_cause, int *prot,
bool partition_scoped)
{
CPUPPCState *env = &cpu->env;
const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC };
int need_prot;
/* Check Page Attributes (pte58:59) */
if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) {
if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
/*
* Radix PTE entries with the non-idempotent I/O attribute are treated
* as guarded storage
@ -158,7 +182,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
}
/* Check if requested access type is allowed */
if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */
need_prot = prot_for_access_type(access_type);
if (need_prot & ~*prot) { /* Page Protected for that Access */
*fault_cause |= DSISR_PROTFAULT;
return true;
}
@ -166,15 +191,15 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
return false;
}
static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
hwaddr pte_addr, int *prot)
static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
uint64_t pte, hwaddr pte_addr, int *prot)
{
CPUState *cs = CPU(cpu);
uint64_t npte;
npte = pte | R_PTE_R; /* Always set reference bit */
if (rwx == 1) { /* Store/Write */
if (access_type == MMU_DATA_STORE) { /* Store/Write */
npte |= R_PTE_C; /* Set change bit */
} else {
/*
@ -269,7 +294,8 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
return true;
}
static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx,
static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
MMUAccessType access_type,
vaddr eaddr, hwaddr g_raddr,
ppc_v3_pate_t pate,
hwaddr *h_raddr, int *h_prot,
@ -285,24 +311,25 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx,
if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
&pte, &fault_cause, &pte_addr) ||
ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) {
ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, h_prot, true)) {
if (pde_addr) { /* address being translated was that of a guest pde */
fault_cause |= DSISR_PRTABLE_FAULT;
}
if (guest_visible) {
ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause);
ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
}
return 1;
}
if (guest_visible) {
ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot);
ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
}
return 0;
}
static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
MMUAccessType access_type,
vaddr eaddr, uint64_t pid,
ppc_v3_pate_t pate, hwaddr *g_raddr,
int *g_prot, int *g_page_size,
@ -321,7 +348,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
if (offset >= size) {
/* offset exceeds size of the process table */
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
}
return 1;
}
@ -362,7 +389,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
if (ret) {
/* No valid PTE */
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
}
return ret;
}
@ -391,7 +418,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
if (ret) {
/* No valid pte */
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
}
return ret;
}
@ -405,16 +432,16 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
*g_raddr = (rpn & ~mask) | (eaddr & mask);
}
if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) {
if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, g_prot, false)) {
/* Access denied due to protection */
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
}
return 1;
}
if (guest_visible) {
ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
}
return 0;
@ -437,7 +464,8 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
* | = On | Process Scoped | Scoped |
* +-------------+----------------+---------------+
*/
static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr,
MMUAccessType access_type,
bool relocation,
hwaddr *raddr, int *psizep, int *protp,
bool guest_visible)
@ -451,7 +479,7 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* Virtual Mode Access - get the fully qualified address */
if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
if (guest_visible) {
ppc_radix64_raise_segi(cpu, rwx, eaddr);
ppc_radix64_raise_segi(cpu, access_type, eaddr);
}
return 1;
}
@ -464,13 +492,13 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
} else {
if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
}
return 1;
}
if (!validate_pate(cpu, lpid, &pate)) {
if (guest_visible) {
ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
}
return 1;
}
@ -488,7 +516,7 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
* - Translates an effective address to a guest real address.
*/
if (relocation) {
int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
pate, &g_raddr, &prot,
&psize, guest_visible);
if (ret) {
@ -511,9 +539,10 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
if (lpid || !msr_hv) {
int ret;
ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr,
pate, raddr, &prot, &psize,
false, guest_visible);
ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
g_raddr, pate, raddr,
&prot, &psize, false,
guest_visible);
if (ret) {
return ret;
}
@ -534,12 +563,14 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
CPUPPCState *env = &cpu->env;
int page_size, prot;
bool relocation;
MMUAccessType access_type;
hwaddr raddr;
assert(!(msr_hv && cpu->vhyp));
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
access_type = rwx;
relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
relocation = (access_type == MMU_INST_FETCH ? msr_ir : msr_dr);
/* HV or virtual hypervisor Real Mode Access */
if (!relocation && (msr_hv || cpu->vhyp)) {
/* In real mode top 4 effective addr bits (mostly) ignored */
@ -568,7 +599,7 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
}
/* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
if (ppc_radix64_xlate(cpu, eaddr, access_type, relocation, &raddr,
&page_size, &prot, true)) {
return 1;
}

View File

@ -32,6 +32,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/qemu-print.h"
#include "internal.h"
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
@ -126,36 +127,14 @@ static int pp_check(int key, int pp, int nx)
return access;
}
static int check_prot(int prot, int rw, int access_type)
static int check_prot(int prot, MMUAccessType access_type)
{
int ret;
if (access_type == ACCESS_CODE) {
if (prot & PAGE_EXEC) {
ret = 0;
} else {
ret = -2;
}
} else if (rw) {
if (prot & PAGE_WRITE) {
ret = 0;
} else {
ret = -2;
}
} else {
if (prot & PAGE_READ) {
ret = 0;
} else {
ret = -2;
}
}
return ret;
return prot & prot_for_access_type(access_type) ? 0 : -2;
}
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
target_ulong pte1, int h,
int rw, int type)
static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
target_ulong pte1, int h,
MMUAccessType access_type)
{
target_ulong ptem, mmask;
int access, ret, pteh, ptev, pp;
@ -182,7 +161,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
/* Keep the matching PTE information */
ctx->raddr = pte1;
ctx->prot = access;
ret = check_prot(ctx->prot, rw, type);
ret = check_prot(ctx->prot, access_type);
if (ret == 0) {
/* Access granted */
qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
@ -197,7 +176,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
}
static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
int ret, int rw)
int ret, MMUAccessType access_type)
{
int store = 0;
@ -208,7 +187,7 @@ static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
store = 1;
}
if (!(*pte1p & 0x00000080)) {
if (rw == 1 && ret == 0) {
if (access_type == MMU_DATA_STORE && ret == 0) {
/* Update changed flag */
*pte1p |= 0x00000080;
store = 1;
@ -308,8 +287,8 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
env->last_way = way;
}
static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type)
static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, MMUAccessType access_type)
{
ppc6xx_tlb_t *tlb;
int nr, best, way;
@ -318,8 +297,7 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
best = -1;
ret = -1; /* No TLB found */
for (way = 0; way < env->nb_ways; way++) {
nr = ppc6xx_tlb_getnum(env, eaddr, way,
access_type == ACCESS_CODE ? 1 : 0);
nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
tlb = &env->tlb.tlb6[nr];
/* This test "emulates" the PTE index match for hardware TLBs */
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
@ -333,9 +311,10 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
access_type == MMU_DATA_STORE ? 'S' : 'L',
access_type == MMU_INST_FETCH ? 'I' : 'D');
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
0, rw, access_type)) {
0, access_type)) {
case -3:
/* TLB inconsistency */
return -1;
@ -366,7 +345,7 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
/* Update page flags */
pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw);
pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
}
return ret;
@ -400,24 +379,22 @@ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
}
static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong virtual, int rw, int type)
target_ulong virtual, MMUAccessType access_type)
{
target_ulong *BATlt, *BATut, *BATu, *BATl;
target_ulong BEPIl, BEPIu, bl;
int i, valid, prot;
int ret = -1;
bool ifetch = access_type == MMU_INST_FETCH;
LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
type == ACCESS_CODE ? 'I' : 'D', virtual);
switch (type) {
case ACCESS_CODE:
ifetch ? 'I' : 'D', virtual);
if (ifetch) {
BATlt = env->IBAT[1];
BATut = env->IBAT[0];
break;
default:
} else {
BATlt = env->DBAT[1];
BATut = env->DBAT[0];
break;
}
for (i = 0; i < env->nb_BATs; i++) {
BATu = &BATut[i];
@ -427,7 +404,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n", __func__,
type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
if ((virtual & 0xF0000000) == BEPIu &&
((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
/* BAT matches */
@ -438,7 +415,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
(virtual & 0x0001F000);
/* Compute access rights */
ctx->prot = prot;
ret = check_prot(ctx->prot, rw, type);
ret = check_prot(ctx->prot, access_type);
if (ret == 0) {
LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
@ -461,7 +438,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
TARGET_FMT_lx " " TARGET_FMT_lx "\n",
__func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
__func__, ifetch ? 'I' : 'D', i, virtual,
*BATu, *BATl, BEPIu, BEPIl, bl);
}
}
@ -472,8 +449,9 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
}
/* Perform segment based translation */
static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int type)
static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, MMUAccessType access_type,
int type)
{
PowerPCCPU *cpu = env_archcpu(env);
hwaddr hash;
@ -497,7 +475,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
" nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
" ir=%d dr=%d pr=%d %d t=%d\n",
eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
(int)msr_dr, pr != 0 ? 1 : 0, rw, type);
(int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
hash = vsid ^ pgidx;
ctx->ptem = (vsid << 7) | (pgidx >> 10);
@ -520,7 +498,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* Initialize real address with an invalid value */
ctx->raddr = (hwaddr)-1ULL;
/* Software TLB search */
ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type);
#if defined(DUMP_PAGE_TABLES)
if (qemu_loglevel_mask(CPU_LOG_MMU)) {
CPUState *cs = env_cpu(env);
@ -603,7 +581,8 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
"address translation\n");
return -4;
}
if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
(access_type == MMU_DATA_LOAD || ctx->key != 0)) {
ctx->raddr = eaddr;
ret = 2;
} else {
@ -682,8 +661,8 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong address, int rw,
int access_type)
target_ulong address,
MMUAccessType access_type)
{
ppcemb_tlb_t *tlb;
hwaddr raddr;
@ -700,8 +679,8 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
}
zsel = (tlb->attr >> 4) & 0xF;
zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
__func__, i, zsel, zpr, rw, tlb->attr);
LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
__func__, i, zsel, zpr, access_type, tlb->attr);
/* Check execute enable bit */
switch (zpr) {
case 0x2:
@ -727,7 +706,7 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
check_perms:
/* Check from TLB entry */
ctx->prot = tlb->prot;
ret = check_prot(ctx->prot, rw, access_type);
ret = check_prot(ctx->prot, access_type);
if (ret == -2) {
env->spr[SPR_40x_ESR] = 0;
}
@ -757,12 +736,11 @@ void store_40x_sler(CPUPPCState *env, uint32_t val)
env->spr[SPR_405_SLER] = val;
}
static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
hwaddr *raddr, int *prot,
target_ulong address, int rw,
int access_type, int i)
static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
hwaddr *raddr, int *prot, target_ulong address,
MMUAccessType access_type, int i)
{
int ret, prot2;
int prot2;
if (ppcemb_tlb_check(env, tlb, raddr, address,
env->spr[SPR_BOOKE_PID],
@ -794,42 +772,24 @@ found_tlb:
}
/* Check the address space */
if (access_type == ACCESS_CODE) {
if (msr_ir != (tlb->attr & 1)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & PAGE_EXEC) {
LOG_SWTLB("%s: good TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
ret = -3;
} else {
if (msr_dr != (tlb->attr & 1)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
LOG_SWTLB("%s: found TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
ret = -2;
if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
return ret;
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
LOG_SWTLB("%s: good TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong address, int rw,
int access_type)
target_ulong address,
MMUAccessType access_type)
{
ppcemb_tlb_t *tlb;
hwaddr raddr;
@ -839,7 +799,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
raddr = (hwaddr)-1ULL;
for (i = 0; i < env->nb_tlb; i++) {
tlb = &env->tlb.tlbe[i];
ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address,
access_type, i);
if (ret != -1) {
break;
@ -938,10 +898,10 @@ static bool is_epid_mmu(int mmu_idx)
return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
}
static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type)
{
uint32_t esr = 0;
if (rw) {
if (access_type == MMU_DATA_STORE) {
esr |= ESR_ST;
}
if (is_epid_mmu(mmu_idx)) {
@ -983,10 +943,9 @@ static bool mmubooke206_get_as(CPUPPCState *env,
/* Check if the tlb found by hashing really matches */
static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
hwaddr *raddr, int *prot,
target_ulong address, int rw,
int access_type, int mmu_idx)
target_ulong address,
MMUAccessType access_type, int mmu_idx)
{
int ret;
int prot2 = 0;
uint32_t epid;
bool as, pr;
@ -1043,44 +1002,31 @@ found_tlb:
}
/* Check the address space and permissions */
if (access_type == ACCESS_CODE) {
if (access_type == MMU_INST_FETCH) {
/* There is no way to fetch code using epid load */
assert(!use_epid);
if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & PAGE_EXEC) {
LOG_SWTLB("%s: good TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
ret = -3;
} else {
if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
LOG_SWTLB("%s: found TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
ret = -2;
as = msr_ir;
}
return ret;
if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
LOG_SWTLB("%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
LOG_SWTLB("%s: good TLB!\n", __func__);
return 0;
}
LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong address, int rw,
int access_type, int mmu_idx)
target_ulong address,
MMUAccessType access_type,
int mmu_idx)
{
ppcmas_tlb_t *tlb;
hwaddr raddr;
@ -1098,7 +1044,7 @@ static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
continue;
}
ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
rw, access_type, mmu_idx);
access_type, mmu_idx);
if (ret != -1) {
goto found_tlb;
}
@ -1361,8 +1307,8 @@ void dump_mmu(CPUPPCState *env)
}
}
static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw)
static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
MMUAccessType access_type)
{
int in_plb, ret;
@ -1393,7 +1339,7 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
if (in_plb ^ msr_px) {
/* Access in protected area */
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
/* Access is not allowed */
ret = -2;
}
@ -1413,28 +1359,28 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
return ret;
}
static int get_physical_address_wtlb(
CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type,
int mmu_idx)
static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr,
MMUAccessType access_type, int type,
int mmu_idx)
{
int ret = -1;
bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
|| (access_type != ACCESS_CODE && msr_dr == 0);
bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
|| (type != ACCESS_CODE && msr_dr == 0);
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
case POWERPC_MMU_SOFT_74xx:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
ret = check_physical(env, ctx, eaddr, access_type);
} else {
/* Try to find a BAT */
if (env->nb_BATs != 0) {
ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type);
ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type);
}
if (ret < 0) {
/* We didn't match any BAT entry or don't have BATs */
ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type);
ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type);
}
}
break;
@ -1442,19 +1388,17 @@ static int get_physical_address_wtlb(
case POWERPC_MMU_SOFT_4xx:
case POWERPC_MMU_SOFT_4xx_Z:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
ret = check_physical(env, ctx, eaddr, access_type);
} else {
ret = mmu40x_get_physical_address(env, ctx, eaddr,
rw, access_type);
ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type);
}
break;
case POWERPC_MMU_BOOKE:
ret = mmubooke_get_physical_address(env, ctx, eaddr,
rw, access_type);
ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type);
break;
case POWERPC_MMU_BOOKE206:
ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
access_type, mmu_idx);
ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type,
mmu_idx);
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
@ -1462,7 +1406,7 @@ static int get_physical_address_wtlb(
break;
case POWERPC_MMU_REAL:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
ret = check_physical(env, ctx, eaddr, access_type);
} else {
cpu_abort(env_cpu(env),
"PowerPC in real mode do not do any translation\n");
@ -1476,11 +1420,11 @@ static int get_physical_address_wtlb(
return ret;
}
static int get_physical_address(
CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type)
static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, MMUAccessType access_type,
int type)
{
return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0);
return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
}
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@ -1508,14 +1452,15 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
;
}
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
if (unlikely(get_physical_address(env, &ctx, addr, MMU_DATA_LOAD,
ACCESS_INT) != 0)) {
/*
* Some MMUs have separate TLBs for code and data. If we only
* try an ACCESS_INT, we may not be able to read instructions
* mapped by code TLBs, so we also try a ACCESS_CODE.
*/
if (unlikely(get_physical_address(env, &ctx, addr, 0,
if (unlikely(get_physical_address(env, &ctx, addr, MMU_INST_FETCH,
ACCESS_CODE) != 0)) {
return -1;
}
@ -1525,13 +1470,14 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
MMUAccessType access_type, int mmu_idx)
{
uint32_t epid;
bool as, pr;
uint32_t missed_tid = 0;
bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
if (rw == 2) {
if (access_type == MMU_INST_FETCH) {
as = msr_ir;
}
env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
@ -1579,24 +1525,23 @@ static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
/* Perform address translation */
static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
MMUAccessType access_type, int mmu_idx)
{
CPUState *cs = env_cpu(env);
PowerPCCPU *cpu = POWERPC_CPU(cs);
mmu_ctx_t ctx;
int access_type;
int type;
int ret = 0;
if (rw == 2) {
if (access_type == MMU_INST_FETCH) {
/* code access */
rw = 0;
access_type = ACCESS_CODE;
type = ACCESS_CODE;
} else {
/* data access */
access_type = env->access_type;
type = env->access_type;
}
ret = get_physical_address_wtlb(env, &ctx, address, rw,
access_type, mmu_idx);
ret = get_physical_address_wtlb(env, &ctx, address, access_type,
type, mmu_idx);
if (ret == 0) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
@ -1604,7 +1549,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
ret = 0;
} else if (ret < 0) {
LOG_MMU_STATE(cs);
if (access_type == ACCESS_CODE) {
if (type == ACCESS_CODE) {
switch (ret) {
case -1:
/* No matches in page tables or TLB */
@ -1632,7 +1577,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
cs->exception_index = POWERPC_EXCP_ITLB;
env->error_code = 0;
env->spr[SPR_BOOKE_DEAR] = address;
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0);
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD);
return -1;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
@ -1674,7 +1619,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
/* No matches in page tables or TLB */
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
cs->exception_index = POWERPC_EXCP_DSTLB;
env->error_code = 1 << 16;
} else {
@ -1691,7 +1636,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
get_pteg_offset32(cpu, ctx.hash[1]);
break;
case POWERPC_MMU_SOFT_74xx:
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
cs->exception_index = POWERPC_EXCP_DSTLB;
} else {
cs->exception_index = POWERPC_EXCP_DLTLB;
@ -1708,7 +1653,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
cs->exception_index = POWERPC_EXCP_DTLB;
env->error_code = 0;
env->spr[SPR_40x_DEAR] = address;
if (rw) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_40x_ESR] = 0x00800000;
} else {
env->spr[SPR_40x_ESR] = 0x00000000;
@ -1719,13 +1664,13 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE206:
booke206_update_mas_tlb_miss(env, address, rw, mmu_idx);
booke206_update_mas_tlb_miss(env, address, access_type, mmu_idx);
/* fall through */
case POWERPC_MMU_BOOKE:
cs->exception_index = POWERPC_EXCP_DTLB;
env->error_code = 0;
env->spr[SPR_BOOKE_DEAR] = address;
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
return -1;
case POWERPC_MMU_REAL:
cpu_abort(cs, "PowerPC in real mode should never raise "
@ -1743,16 +1688,16 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
if (env->mmu_model == POWERPC_MMU_SOFT_4xx
|| env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
env->spr[SPR_40x_DEAR] = address;
if (rw) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_40x_ESR] |= 0x00800000;
}
} else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
(env->mmu_model == POWERPC_MMU_BOOKE206)) {
env->spr[SPR_BOOKE_DEAR] = address;
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
} else {
env->spr[SPR_DAR] = address;
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x0A000000;
} else {
env->spr[SPR_DSISR] = 0x08000000;
@ -1761,7 +1706,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
case -4:
/* Direct store exception */
switch (access_type) {
switch (type) {
case ACCESS_FLOAT:
/* Floating point load/store */
cs->exception_index = POWERPC_EXCP_ALIGN;
@ -1773,7 +1718,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = address;
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x06000000;
} else {
env->spr[SPR_DSISR] = 0x04000000;
@ -1784,7 +1729,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = address;
if (rw == 1) {
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_DSISR] = 0x06100000;
} else {
env->spr[SPR_DSISR] = 0x04100000;
@ -2085,32 +2030,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
/*****************************************************************************/
/* Special registers manipulation */
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
PowerPCCPU *cpu = env_archcpu(env);
qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
assert(!cpu->vhyp);
#if defined(TARGET_PPC64)
if (mmu_is_64bit(env->mmu_model)) {
target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
target_ulong htabsize = value & SDR_64_HTABSIZE;
if (value & ~sdr_mask) {
error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1",
value & ~sdr_mask);
value &= sdr_mask;
}
if (htabsize > 28) {
error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
htabsize);
return;
}
}
#endif /* defined(TARGET_PPC64) */
/* FIXME: Should check for valid HTABMASK values in 32-bit case */
env->spr[SPR_SDR1] = value;
}
#if defined(TARGET_PPC64)
void ppc_store_ptcr(CPUPPCState *env, target_ulong value)
{

136
target/ppc/spr_tcg.h Normal file
View File

@ -0,0 +1,136 @@
/*
* PowerPC emulation for qemu: read/write callbacks for SPRs
*
* Copyright (C) 2021 Instituto de Pesquisas Eldorado
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SPR_TCG_H
#define SPR_TCG_H
#define SPR_NOACCESS (&spr_noaccess)
/* prototypes for readers and writers for SPRs */
void spr_noaccess(DisasContext *ctx, int gprn, int sprn);
void spr_read_generic(DisasContext *ctx, int gprn, int sprn);
void spr_write_generic(DisasContext *ctx, int sprn, int gprn);
void spr_read_xer(DisasContext *ctx, int gprn, int sprn);
void spr_write_xer(DisasContext *ctx, int sprn, int gprn);
void spr_read_lr(DisasContext *ctx, int gprn, int sprn);
void spr_write_lr(DisasContext *ctx, int sprn, int gprn);
void spr_read_ctr(DisasContext *ctx, int gprn, int sprn);
void spr_write_ctr(DisasContext *ctx, int sprn, int gprn);
void spr_read_ureg(DisasContext *ctx, int gprn, int sprn);
void spr_read_tbl(DisasContext *ctx, int gprn, int sprn);
void spr_read_tbu(DisasContext *ctx, int gprn, int sprn);
void spr_read_atbl(DisasContext *ctx, int gprn, int sprn);
void spr_read_atbu(DisasContext *ctx, int gprn, int sprn);
void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn);
void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn);
void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn);
void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn);
#ifndef CONFIG_USER_ONLY
void spr_write_generic32(DisasContext *ctx, int sprn, int gprn);
void spr_write_clear(DisasContext *ctx, int sprn, int gprn);
void spr_access_nop(DisasContext *ctx, int sprn, int gprn);
void spr_read_decr(DisasContext *ctx, int gprn, int sprn);
void spr_write_decr(DisasContext *ctx, int sprn, int gprn);
void spr_write_tbl(DisasContext *ctx, int sprn, int gprn);
void spr_write_tbu(DisasContext *ctx, int sprn, int gprn);
void spr_write_atbl(DisasContext *ctx, int sprn, int gprn);
void spr_write_atbu(DisasContext *ctx, int sprn, int gprn);
void spr_read_ibat(DisasContext *ctx, int gprn, int sprn);
void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn);
void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn);
void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn);
void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn);
void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn);
void spr_read_dbat(DisasContext *ctx, int gprn, int sprn);
void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn);
void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn);
void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn);
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn);
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn);
void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn);
void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn);
void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn);
void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn);
void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn);
void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn);
void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn);
void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn);
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn);
void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn);
void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn);
void spr_write_pir(DisasContext *ctx, int sprn, int gprn);
void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn);
void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn);
void spr_read_thrm(DisasContext *ctx, int gprn, int sprn);
void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn);
void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn);
void spr_write_eplc(DisasContext *ctx, int sprn, int gprn);
void spr_write_epsc(DisasContext *ctx, int sprn, int gprn);
void spr_write_mas73(DisasContext *ctx, int sprn, int gprn);
void spr_read_mas73(DisasContext *ctx, int gprn, int sprn);
#ifdef TARGET_PPC64
void spr_read_cfar(DisasContext *ctx, int gprn, int sprn);
void spr_write_cfar(DisasContext *ctx, int sprn, int gprn);
void spr_write_ureg(DisasContext *ctx, int sprn, int gprn);
void spr_read_purr(DisasContext *ctx, int gprn, int sprn);
void spr_write_purr(DisasContext *ctx, int sprn, int gprn);
void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn);
void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn);
void spr_read_vtb(DisasContext *ctx, int gprn, int sprn);
void spr_write_vtb(DisasContext *ctx, int sprn, int gprn);
void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn);
void spr_write_pidr(DisasContext *ctx, int sprn, int gprn);
void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn);
void spr_read_hior(DisasContext *ctx, int gprn, int sprn);
void spr_write_hior(DisasContext *ctx, int sprn, int gprn);
void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn);
void spr_write_pcr(DisasContext *ctx, int sprn, int gprn);
void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn);
void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn);
void spr_write_amr(DisasContext *ctx, int sprn, int gprn);
void spr_write_uamor(DisasContext *ctx, int sprn, int gprn);
void spr_write_iamr(DisasContext *ctx, int sprn, int gprn);
#endif
#endif
#ifdef TARGET_PPC64
void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn);
void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn);
void spr_read_tar(DisasContext *ctx, int gprn, int sprn);
void spr_write_tar(DisasContext *ctx, int sprn, int gprn);
void spr_read_tm(DisasContext *ctx, int gprn, int sprn);
void spr_write_tm(DisasContext *ctx, int sprn, int gprn);
void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn);
void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn);
void spr_read_ebb(DisasContext *ctx, int gprn, int sprn);
void spr_write_ebb(DisasContext *ctx, int sprn, int gprn);
void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn);
void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn);
void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -139,7 +139,7 @@ static void gen_lxvwsx(DisasContext *ctx)
gen_addr_reg_index(ctx, EA);
data = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, MO_TEUL);
tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
tcg_temp_free(EA);
@ -162,7 +162,7 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_addr_reg_index(ctx, EA);
data = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, MO_TEQ);
tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
tcg_temp_free(EA);