mirror of https://github.com/xemu-project/xemu.git
target-arm queue:
* hw/arm/sbsa-ref: set 'slots' property of xhci * linux-user: Remove pointless NULL check in clock_adjtime handling * ptw: Fix S1_ptw_translate() debug path * ptw: Account for FEAT_RME when applying {N}SW, SA bits * accel/tcg: Zero-pad PC in TCG CPU exec trace lines * hw/nvram: Avoid unnecessary Xilinx eFuse backstore write -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmS1OEUZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3syoEACBj2B+btKASbWs6c7iUF9R bsMhVVZbeNrW7try7fIjAcvRQV2X7cdqHMGeX0yP9M5EcqBfz4ptxDbxcmEsgm0h kZJudG8RuZ/gnw7wbwQ1TfJf4KgsBh49yZjlom2s8CgVStpbuFO4xz7ZucR65uhl PwLCgW0/DJR4SQTvDLnCOTTNbY/cuWCKK1CmuLMOE9IgozMNOxxW5wkryrvdllKs hYSCWM1jy9fJ4TRlhDJy8JI7+t4TEZN9ESwYGE6QDly8r3GoGMFj5Z9okUbGp3/V MYfkbz7l2/C5QxcpY5d0mJUR1HlP7McO7rWhtQjqmCPGpDVqMUu4/DClu6Q/2Ob3 GRQcgztZ8a9wgVa6b4g1UBkqCnloT7WtU3wLVVmZGF3DO4k+oz53XPHb2zFtI3Xx pQ9LyABIoKCM5ql+/WaA3thtTC1qH6lZnjMBqVBx8+d0zKYWSG4wlnbihy70GOpw V5n0fQlTXr5WV4tZT/euP17odvnkictH7Vmj6zHUFkHdqHxwFwG0OCw1ZjBrMbzl 7kY9DxGA+5iKEZoTwHpxXYny70MnpdRIrUhpZ/4PNq68hzIAQ5Dqm29DtKjodM60 M49CIo+O9E3+0xpcGPDtcuJ7bVPd/95o3usVjapDdBREGWcJsPS6PHK3MuAxgkHo B0y1egitacJYp3x91gYIRA== =JPpH -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20230717' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * hw/arm/sbsa-ref: set 'slots' property of xhci * linux-user: Remove pointless NULL check in clock_adjtime handling * ptw: Fix S1_ptw_translate() debug path * ptw: Account for FEAT_RME when applying {N}SW, SA bits * accel/tcg: Zero-pad PC in TCG CPU exec trace lines * hw/nvram: Avoid unnecessary Xilinx eFuse backstore write # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmS1OEUZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3syoEACBj2B+btKASbWs6c7iUF9R # bsMhVVZbeNrW7try7fIjAcvRQV2X7cdqHMGeX0yP9M5EcqBfz4ptxDbxcmEsgm0h # kZJudG8RuZ/gnw7wbwQ1TfJf4KgsBh49yZjlom2s8CgVStpbuFO4xz7ZucR65uhl # PwLCgW0/DJR4SQTvDLnCOTTNbY/cuWCKK1CmuLMOE9IgozMNOxxW5wkryrvdllKs # hYSCWM1jy9fJ4TRlhDJy8JI7+t4TEZN9ESwYGE6QDly8r3GoGMFj5Z9okUbGp3/V # MYfkbz7l2/C5QxcpY5d0mJUR1HlP7McO7rWhtQjqmCPGpDVqMUu4/DClu6Q/2Ob3 # GRQcgztZ8a9wgVa6b4g1UBkqCnloT7WtU3wLVVmZGF3DO4k+oz53XPHb2zFtI3Xx # pQ9LyABIoKCM5ql+/WaA3thtTC1qH6lZnjMBqVBx8+d0zKYWSG4wlnbihy70GOpw # V5n0fQlTXr5WV4tZT/euP17odvnkictH7Vmj6zHUFkHdqHxwFwG0OCw1ZjBrMbzl # 7kY9DxGA+5iKEZoTwHpxXYny70MnpdRIrUhpZ/4PNq68hzIAQ5Dqm29DtKjodM60 # M49CIo+O9E3+0xpcGPDtcuJ7bVPd/95o3usVjapDdBREGWcJsPS6PHK3MuAxgkHo # B0y1egitacJYp3x91gYIRA== # =JPpH # -----END PGP SIGNATURE----- # gpg: Signature made Mon 17 Jul 2023 01:47:01 PM BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] * tag 'pull-target-arm-20230717' of https://git.linaro.org/people/pmaydell/qemu-arm: hw/nvram: Avoid unnecessary Xilinx eFuse backstore write accel/tcg: Zero-pad PC in TCG CPU exec trace lines target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits target/arm: Fix S1_ptw_translate() debug path target/arm/ptw.c: Add comments to S1Translate struct fields linux-user: Remove pointless NULL check in clock_adjtime handling hw/arm/sbsa-ref: set 'slots' property of xhci Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
08572022e5
|
@ -298,7 +298,7 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
|
||||||
if (qemu_log_in_addr_range(pc)) {
|
if (qemu_log_in_addr_range(pc)) {
|
||||||
qemu_log_mask(CPU_LOG_EXEC,
|
qemu_log_mask(CPU_LOG_EXEC,
|
||||||
"Trace %d: %p [%08" PRIx64
|
"Trace %d: %p [%08" PRIx64
|
||||||
"/%" VADDR_PRIx "/%08x/%08x] %s\n",
|
"/%016" VADDR_PRIx "/%08x/%08x] %s\n",
|
||||||
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
|
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
|
||||||
tb->flags, tb->cflags, lookup_symbol(pc));
|
tb->flags, tb->cflags, lookup_symbol(pc));
|
||||||
|
|
||||||
|
@ -487,7 +487,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||||
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
||||||
vaddr pc = log_pc(cpu, last_tb);
|
vaddr pc = log_pc(cpu, last_tb);
|
||||||
if (qemu_log_in_addr_range(pc)) {
|
if (qemu_log_in_addr_range(pc)) {
|
||||||
qemu_log("Stopped execution of TB chain before %p [%"
|
qemu_log("Stopped execution of TB chain before %p [%016"
|
||||||
VADDR_PRIx "] %s\n",
|
VADDR_PRIx "] %s\n",
|
||||||
last_tb->tc.ptr, pc, lookup_symbol(pc));
|
last_tb->tc.ptr, pc, lookup_symbol(pc));
|
||||||
}
|
}
|
||||||
|
|
|
@ -637,7 +637,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||||
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
||||||
vaddr pc = log_pc(cpu, tb);
|
vaddr pc = log_pc(cpu, tb);
|
||||||
if (qemu_log_in_addr_range(pc)) {
|
if (qemu_log_in_addr_range(pc)) {
|
||||||
qemu_log("cpu_io_recompile: rewound execution of TB to %"
|
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
|
||||||
VADDR_PRIx "\n", pc);
|
VADDR_PRIx "\n", pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -611,6 +611,7 @@ static void create_xhci(const SBSAMachineState *sms)
|
||||||
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
|
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
|
||||||
int irq = sbsa_ref_irqmap[SBSA_XHCI];
|
int irq = sbsa_ref_irqmap[SBSA_XHCI];
|
||||||
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
|
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
|
||||||
|
qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS);
|
||||||
|
|
||||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
||||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
||||||
|
|
|
@ -143,6 +143,8 @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k)
|
||||||
|
|
||||||
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
|
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
|
||||||
{
|
{
|
||||||
|
uint32_t set, *row;
|
||||||
|
|
||||||
if (efuse_ro_bits_find(s, bit)) {
|
if (efuse_ro_bits_find(s, bit)) {
|
||||||
g_autofree char *path = object_get_canonical_path(OBJECT(s));
|
g_autofree char *path = object_get_canonical_path(OBJECT(s));
|
||||||
|
|
||||||
|
@ -152,8 +154,13 @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->fuse32[bit / 32] |= 1 << (bit % 32);
|
/* Avoid back-end write unless there is a real update */
|
||||||
efuse_bdrv_sync(s, bit);
|
row = &s->fuse32[bit / 32];
|
||||||
|
set = 1 << (bit % 32);
|
||||||
|
if (!(set & *row)) {
|
||||||
|
*row |= set;
|
||||||
|
efuse_bdrv_sync(s, bit);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11190,16 +11190,14 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
|
||||||
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
|
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
|
||||||
case TARGET_NR_clock_adjtime:
|
case TARGET_NR_clock_adjtime:
|
||||||
{
|
{
|
||||||
struct timex htx, *phtx = &htx;
|
struct timex htx;
|
||||||
|
|
||||||
if (target_to_host_timex(phtx, arg2) != 0) {
|
if (target_to_host_timex(&htx, arg2) != 0) {
|
||||||
return -TARGET_EFAULT;
|
return -TARGET_EFAULT;
|
||||||
}
|
}
|
||||||
ret = get_errno(clock_adjtime(arg1, phtx));
|
ret = get_errno(clock_adjtime(arg1, &htx));
|
||||||
if (!is_error(ret) && phtx) {
|
if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
|
||||||
if (host_to_target_timex(arg2, phtx) != 0) {
|
return -TARGET_EFAULT;
|
||||||
return -TARGET_EFAULT;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -19,10 +19,50 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct S1Translate {
|
typedef struct S1Translate {
|
||||||
|
/*
|
||||||
|
* in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
|
||||||
|
* Together with in_space, specifies the architectural translation regime.
|
||||||
|
*/
|
||||||
ARMMMUIdx in_mmu_idx;
|
ARMMMUIdx in_mmu_idx;
|
||||||
|
/*
|
||||||
|
* in_ptw_idx: specifies which mmuidx to use for the actual
|
||||||
|
* page table descriptor load operations. This will be one of the
|
||||||
|
* ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
|
||||||
|
* If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
|
||||||
|
* this field is updated accordingly.
|
||||||
|
*/
|
||||||
ARMMMUIdx in_ptw_idx;
|
ARMMMUIdx in_ptw_idx;
|
||||||
|
/*
|
||||||
|
* in_space: the security space for this walk. This plus
|
||||||
|
* the in_mmu_idx specify the architectural translation regime.
|
||||||
|
* If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
|
||||||
|
* this field is updated accordingly.
|
||||||
|
*
|
||||||
|
* Note that the security space for the in_ptw_idx may be different
|
||||||
|
* from that for the in_mmu_idx. We do not need to explicitly track
|
||||||
|
* the in_ptw_idx security space because:
|
||||||
|
* - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
|
||||||
|
* itself specifies the security space
|
||||||
|
* - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
|
||||||
|
* space used for ptw reads is the same as that of the security
|
||||||
|
* space of the stage 1 translation for all cases except where
|
||||||
|
* stage 1 is Secure; in that case the only possibilities for
|
||||||
|
* the ptw read are Secure and NonSecure, and the in_ptw_idx
|
||||||
|
* value being Stage2 vs Stage2_S distinguishes those.
|
||||||
|
*/
|
||||||
ARMSecuritySpace in_space;
|
ARMSecuritySpace in_space;
|
||||||
|
/*
|
||||||
|
* in_secure: whether the translation regime is a Secure one.
|
||||||
|
* This is always equal to arm_space_is_secure(in_space).
|
||||||
|
* If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
|
||||||
|
* this field is updated accordingly.
|
||||||
|
*/
|
||||||
bool in_secure;
|
bool in_secure;
|
||||||
|
/*
|
||||||
|
* in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
|
||||||
|
* accesses will not update the guest page table access flags
|
||||||
|
* and will not change the state of the softmmu TLBs.
|
||||||
|
*/
|
||||||
bool in_debug;
|
bool in_debug;
|
||||||
/*
|
/*
|
||||||
* If this is stage 2 of a stage 1+2 page table walk, then this must
|
* If this is stage 2 of a stage 1+2 page table walk, then this must
|
||||||
|
@ -445,11 +485,39 @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
|
||||||
|
ARMMMUIdx s2_mmu_idx)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Return the security space to use for stage 2 when doing
|
||||||
|
* the S1 page table descriptor load.
|
||||||
|
*/
|
||||||
|
if (regime_is_stage2(s2_mmu_idx)) {
|
||||||
|
/*
|
||||||
|
* The security space for ptw reads is almost always the same
|
||||||
|
* as that of the security space of the stage 1 translation.
|
||||||
|
* The only exception is when stage 1 is Secure; in that case
|
||||||
|
* the ptw read might be to the Secure or the NonSecure space
|
||||||
|
* (but never Realm or Root), and the s2_mmu_idx tells us which.
|
||||||
|
* Root translations are always single-stage.
|
||||||
|
*/
|
||||||
|
if (s1_space == ARMSS_Secure) {
|
||||||
|
return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
|
||||||
|
} else {
|
||||||
|
assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
|
||||||
|
assert(s1_space != ARMSS_Root);
|
||||||
|
return s1_space;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* ptw loads are from phys: the mmu idx itself says which space */
|
||||||
|
return arm_phys_to_space(s2_mmu_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Translate a S1 pagetable walk through S2 if needed. */
|
/* Translate a S1 pagetable walk through S2 if needed. */
|
||||||
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||||
hwaddr addr, ARMMMUFaultInfo *fi)
|
hwaddr addr, ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
ARMSecuritySpace space = ptw->in_space;
|
|
||||||
bool is_secure = ptw->in_secure;
|
bool is_secure = ptw->in_secure;
|
||||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||||
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
|
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
|
||||||
|
@ -462,13 +530,12 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||||
* From gdbstub, do not use softmmu so that we don't modify the
|
* From gdbstub, do not use softmmu so that we don't modify the
|
||||||
* state of the cpu at all, including softmmu tlb contents.
|
* state of the cpu at all, including softmmu tlb contents.
|
||||||
*/
|
*/
|
||||||
|
ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
|
||||||
S1Translate s2ptw = {
|
S1Translate s2ptw = {
|
||||||
.in_mmu_idx = s2_mmu_idx,
|
.in_mmu_idx = s2_mmu_idx,
|
||||||
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
||||||
.in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
|
.in_secure = arm_space_is_secure(s2_space),
|
||||||
.in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
|
.in_space = s2_space,
|
||||||
: space == ARMSS_Realm ? ARMSS_Realm
|
|
||||||
: ARMSS_NonSecure),
|
|
||||||
.in_debug = true,
|
.in_debug = true,
|
||||||
};
|
};
|
||||||
GetPhysAddrResult s2 = { };
|
GetPhysAddrResult s2 = { };
|
||||||
|
@ -3051,6 +3118,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
hwaddr ipa;
|
hwaddr ipa;
|
||||||
int s1_prot, s1_lgpgsz;
|
int s1_prot, s1_lgpgsz;
|
||||||
bool is_secure = ptw->in_secure;
|
bool is_secure = ptw->in_secure;
|
||||||
|
ARMSecuritySpace in_space = ptw->in_space;
|
||||||
bool ret, ipa_secure;
|
bool ret, ipa_secure;
|
||||||
ARMCacheAttrs cacheattrs1;
|
ARMCacheAttrs cacheattrs1;
|
||||||
ARMSecuritySpace ipa_space;
|
ARMSecuritySpace ipa_space;
|
||||||
|
@ -3133,11 +3201,13 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
* Check if IPA translates to secure or non-secure PA space.
|
* Check if IPA translates to secure or non-secure PA space.
|
||||||
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
||||||
*/
|
*/
|
||||||
result->f.attrs.secure =
|
if (in_space == ARMSS_Secure) {
|
||||||
(is_secure
|
result->f.attrs.secure =
|
||||||
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
!(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
||||||
&& (ipa_secure
|
&& (ipa_secure
|
||||||
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
|
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
|
||||||
|
result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue