mirror of https://github.com/xqemu/xqemu.git
target-arm and miscellaneous queue:
* fix KVM state save/restore for GICv3 priority registers for high IRQ numbers * hw/arm/mps2-tz: Put ethernet controller behind PPC * hw/sh/sh7750: Convert away from old_mmio * hw/m68k/mcf5206: Convert away from old_mmio * hw/block/pflash_cfi02: Convert away from old_mmio * hw/watchdog/wdt_i6300esb: Convert away from old_mmio * hw/input/pckbd: Convert away from old_mmio * hw/char/parallel: Convert away from old_mmio * armv7m: refactor to get rid of armv7m_init() function * arm: Don't crash if user tries to use a Cortex-M CPU without an NVIC * hw/core/or-irq: Support more than 16 inputs to an OR gate * cpu-defs.h: Document CPUIOTLBEntry 'addr' field * cputlb: Pass cpu_transaction_failed() the correct physaddr * CODING_STYLE: Define our preferred form for multiline comments * Add and use new stn_*_p() and ldn_*_p() memory access functions * target/arm: More parts of the upcoming SVE support * aspeed_scu: Implement RNG register * m25p80: add support for two bytes WRSR for Macronix chips * exec.c: Handle IOMMUs being in the path of TCG CPU memory accesses * target/arm: Allow ARMv6-M Thumb2 instructions -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJbI8wDAAoJEDwlJe0UNgze8acP/i3q3aGcb4b+FwWogktqVC+h +eY+T0dQC0ohGjWMvR8jk4h7Gh75zpvH5CO0Oh7u8JqOBTEvPllvCatcqYJ36Vik +spqdNr94yW7M5GKMl1+vJdQNv5SejuUg5EdwVdk2bJlObuVIfQDCAAi0DaW443B iJaUrIxVP06DeVjPvSG+WVgP+N3BualHURXBdH6h23gZeftWZ3iDMPELPYuLbUOF LF1N/w/rtvx0qOzr7VBdsSEdEyFFli5B5Pv4+xiCfgKTBu9cm+ima0NXPPv4Wcaw 322LQvvQBDEVT3AWP6oPLfXmpNjmCe73G73ncInWiN8pESe8O6T0aJk6vAk6WP5A UDGtHGw4H6jCDFn28W/Sl/vHVV6qdd8PFDAlomEBNhy2j4jqMW4xPPu2mMprLV1L 6aYsENCxgg7kIBmJlMiO710MdLvIBcVL1eWILTG82fvKmb4qwbJ1r4PInGBOxM6s LGB14JSvgfsKqlxZPjD6oNXOyxbTdeU72YIBMCUhBD5/8B1zFk8MKofhVy2Z50QA yxwjBlJL/g1xLMM0WdED9F7SeZKCjaJK+VZF+5qwvvmA0VL9QPsxXq+djTlF/mkZ agu/jS7L7dFdSeionRRKTdc0jXfimoSE9QFj7tpxdh3VehSGRBBp7vTYQAp+Bc9s 5ghD5oektvRQslM/qLWu =McgN -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180615' into staging target-arm and miscellaneous queue: * fix KVM state save/restore for GICv3 priority registers for high IRQ numbers * hw/arm/mps2-tz: Put ethernet controller behind PPC * hw/sh/sh7750: Convert away from old_mmio * hw/m68k/mcf5206: Convert away from old_mmio * hw/block/pflash_cfi02: Convert away from old_mmio * hw/watchdog/wdt_i6300esb: Convert away from old_mmio * hw/input/pckbd: Convert away from old_mmio * hw/char/parallel: Convert away from old_mmio * armv7m: refactor to get rid of armv7m_init() function * arm: Don't crash if user tries to use a Cortex-M CPU without an NVIC * hw/core/or-irq: Support more than 16 inputs to an OR gate * cpu-defs.h: Document CPUIOTLBEntry 'addr' field * cputlb: Pass cpu_transaction_failed() the correct physaddr * CODING_STYLE: Define our preferred form for multiline comments * Add and use new stn_*_p() and ldn_*_p() memory access functions * target/arm: More parts of the upcoming SVE support * aspeed_scu: Implement RNG register * m25p80: add support for two bytes WRSR for Macronix chips * exec.c: Handle IOMMUs being in the path of TCG CPU memory accesses * target/arm: Allow ARMv6-M Thumb2 instructions # gpg: Signature made Fri 15 Jun 2018 15:24:03 BST # gpg: using RSA key 3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20180615: (43 commits) target/arm: Allow ARMv6-M Thumb2 instructions exec.c: Handle IOMMUs in address_space_translate_for_iotlb() iommu: Add IOMMU index argument to translate method iommu: Add IOMMU index argument to notifier APIs iommu: Add IOMMU index concept to IOMMU API m25p80: add support for two bytes WRSR for Macronix chips aspeed_scu: Implement RNG register target/arm: Implement SVE Floating Point Arithmetic - Unpredicated Group target/arm: Implement SVE Integer Wide Immediate - Unpredicated Group target/arm: Implement FDUP/DUP target/arm: Implement SVE Integer Compare - Scalars Group target/arm: Implement SVE Predicate Count Group target/arm: Implement SVE Partition Break Group target/arm: Implement SVE Integer Compare - Immediate Group target/arm: Implement SVE Integer Compare - Vectors Group target/arm: Implement SVE Select Vectors Group target/arm: Implement SVE vector splice (predicated) target/arm: Implement SVE reverse within elements target/arm: Implement SVE copy to vector (predicated) target/arm: Implement SVE conditionally broadcast/extract element ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
81d3864796
17
CODING_STYLE
17
CODING_STYLE
|
@ -124,6 +124,23 @@ We use traditional C-style /* */ comments and avoid // comments.
|
||||||
Rationale: The // form is valid in C99, so this is purely a matter of
|
Rationale: The // form is valid in C99, so this is purely a matter of
|
||||||
consistency of style. The checkpatch script will warn you about this.
|
consistency of style. The checkpatch script will warn you about this.
|
||||||
|
|
||||||
|
Multiline comment blocks should have a row of stars on the left,
|
||||||
|
and the initial /* and terminating */ both on their own lines:
|
||||||
|
/*
|
||||||
|
* like
|
||||||
|
* this
|
||||||
|
*/
|
||||||
|
This is the same format required by the Linux kernel coding style.
|
||||||
|
|
||||||
|
(Some of the existing comments in the codebase use the GNU Coding
|
||||||
|
Standards form which does not have stars on the left, or other
|
||||||
|
variations; avoid these when writing new comments, but don't worry
|
||||||
|
about converting to the preferred form unless you're editing that
|
||||||
|
comment anyway.)
|
||||||
|
|
||||||
|
Rationale: Consistency, and ease of visually picking out a multiline
|
||||||
|
comment from the surrounding code.
|
||||||
|
|
||||||
8. trace-events style
|
8. trace-events style
|
||||||
|
|
||||||
8.1 0x prefix
|
8.1 0x prefix
|
||||||
|
|
|
@ -632,7 +632,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
sz = size;
|
sz = size;
|
||||||
section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
|
section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz,
|
||||||
|
attrs, &prot);
|
||||||
assert(sz >= TARGET_PAGE_SIZE);
|
assert(sz >= TARGET_PAGE_SIZE);
|
||||||
|
|
||||||
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
|
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
|
||||||
|
@ -664,6 +665,18 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
|
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
|
||||||
|
|
||||||
/* refill the tlb */
|
/* refill the tlb */
|
||||||
|
/*
|
||||||
|
* At this point iotlb contains a physical section number in the lower
|
||||||
|
* TARGET_PAGE_BITS, and either
|
||||||
|
* + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
|
||||||
|
* + the offset within section->mr of the page base (otherwise)
|
||||||
|
* We subtract the vaddr (which is page aligned and thus won't
|
||||||
|
* disturb the low bits) to give an offset which can be added to the
|
||||||
|
* (non-page-aligned) vaddr of the eventual memory access to get
|
||||||
|
* the MemoryRegion offset for the access. Note that the vaddr we
|
||||||
|
* subtract here is that of the page base, and not the same as the
|
||||||
|
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
||||||
|
*/
|
||||||
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
|
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
|
||||||
env->iotlb[mmu_idx][index].attrs = attrs;
|
env->iotlb[mmu_idx][index].attrs = attrs;
|
||||||
|
|
||||||
|
@ -765,13 +778,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
target_ulong addr, uintptr_t retaddr, int size)
|
target_ulong addr, uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
hwaddr physaddr = iotlbentry->addr;
|
hwaddr mr_offset;
|
||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegionSection *section;
|
||||||
|
MemoryRegion *mr;
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
|
mr = section->mr;
|
||||||
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
|
@ -783,9 +799,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
r = memory_region_dispatch_read(mr, physaddr,
|
r = memory_region_dispatch_read(mr, mr_offset,
|
||||||
&val, size, iotlbentry->attrs);
|
&val, size, iotlbentry->attrs);
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
|
hwaddr physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -802,12 +822,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
uintptr_t retaddr, int size)
|
uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
hwaddr physaddr = iotlbentry->addr;
|
hwaddr mr_offset;
|
||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegionSection *section;
|
||||||
|
MemoryRegion *mr;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
|
mr = section->mr;
|
||||||
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -818,9 +841,13 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
r = memory_region_dispatch_write(mr, physaddr,
|
r = memory_region_dispatch_write(mr, mr_offset,
|
||||||
val, size, iotlbentry->attrs);
|
val, size, iotlbentry->attrs);
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
|
hwaddr physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -868,12 +895,13 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||||
*/
|
*/
|
||||||
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
{
|
{
|
||||||
int mmu_idx, index, pd;
|
int mmu_idx, index;
|
||||||
void *p;
|
void *p;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
|
MemoryRegionSection *section;
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
CPUIOTLBEntry *iotlbentry;
|
CPUIOTLBEntry *iotlbentry;
|
||||||
hwaddr physaddr;
|
hwaddr physaddr, mr_offset;
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
mmu_idx = cpu_mmu_index(env, true);
|
mmu_idx = cpu_mmu_index(env, true);
|
||||||
|
@ -884,8 +912,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||||
pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
|
mr = section->mr;
|
||||||
if (memory_region_is_unassigned(mr)) {
|
if (memory_region_is_unassigned(mr)) {
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
if (memory_region_request_mmio_ptr(mr, addr)) {
|
if (memory_region_request_mmio_ptr(mr, addr)) {
|
||||||
|
@ -906,7 +934,10 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
* and use the MemTXResult it produced). However it is the
|
* and use the MemTXResult it produced). However it is the
|
||||||
* simplest place we have currently available for the check.
|
* simplest place we have currently available for the check.
|
||||||
*/
|
*/
|
||||||
physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
|
physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
|
cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
|
||||||
iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
|
iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
|
||||||
|
|
||||||
|
|
|
@ -53,9 +53,24 @@ The ``_{endian}`` infix is omitted for target-endian accesses.
|
||||||
The target endian accessors are only available to source
|
The target endian accessors are only available to source
|
||||||
files which are built per-target.
|
files which are built per-target.
|
||||||
|
|
||||||
|
There are also functions which take the size as an argument:
|
||||||
|
|
||||||
|
load: ``ldn{endian}_p(ptr, sz)``
|
||||||
|
|
||||||
|
which performs an unsigned load of ``sz`` bytes from ``ptr``
|
||||||
|
as an ``{endian}`` order value and returns it in a uint64_t.
|
||||||
|
|
||||||
|
store: ``stn{endian}_p(ptr, sz, val)``
|
||||||
|
|
||||||
|
which stores ``val`` to ``ptr`` as an ``{endian}`` order value
|
||||||
|
of size ``sz`` bytes.
|
||||||
|
|
||||||
|
|
||||||
Regexes for git grep
|
Regexes for git grep
|
||||||
- ``\<ldf\?[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
|
- ``\<ldf\?[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
|
||||||
- ``\<stf\?[bwlq]\(_[hbl]e\)\?_p\>``
|
- ``\<stf\?[bwlq]\(_[hbl]e\)\?_p\>``
|
||||||
|
- ``\<ldn_\([hbl]e\)?_p\>``
|
||||||
|
- ``\<stn_\([hbl]e\)?_p\>``
|
||||||
|
|
||||||
``cpu_{ld,st}_*``
|
``cpu_{ld,st}_*``
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
261
exec.c
261
exec.c
|
@ -501,8 +501,15 @@ static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iomm
|
||||||
do {
|
do {
|
||||||
hwaddr addr = *xlat;
|
hwaddr addr = *xlat;
|
||||||
IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
|
IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
|
||||||
IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
|
int iommu_idx = 0;
|
||||||
IOMMU_WO : IOMMU_RO);
|
IOMMUTLBEntry iotlb;
|
||||||
|
|
||||||
|
if (imrc->attrs_to_index) {
|
||||||
|
iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
iotlb = imrc->translate(iommu_mr, addr, is_write ?
|
||||||
|
IOMMU_WO : IOMMU_RO, iommu_idx);
|
||||||
|
|
||||||
if (!(iotlb.perm & (1 << is_write))) {
|
if (!(iotlb.perm & (1 << is_write))) {
|
||||||
goto unassigned;
|
goto unassigned;
|
||||||
|
@ -646,18 +653,144 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
|
||||||
return mr;
|
return mr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct TCGIOMMUNotifier {
|
||||||
|
IOMMUNotifier n;
|
||||||
|
MemoryRegion *mr;
|
||||||
|
CPUState *cpu;
|
||||||
|
int iommu_idx;
|
||||||
|
bool active;
|
||||||
|
} TCGIOMMUNotifier;
|
||||||
|
|
||||||
|
static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||||
|
{
|
||||||
|
TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
|
||||||
|
|
||||||
|
if (!notifier->active) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tlb_flush(notifier->cpu);
|
||||||
|
notifier->active = false;
|
||||||
|
/* We leave the notifier struct on the list to avoid reallocating it later.
|
||||||
|
* Generally the number of IOMMUs a CPU deals with will be small.
|
||||||
|
* In any case we can't unregister the iommu notifier from a notify
|
||||||
|
* callback.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tcg_register_iommu_notifier(CPUState *cpu,
|
||||||
|
IOMMUMemoryRegion *iommu_mr,
|
||||||
|
int iommu_idx)
|
||||||
|
{
|
||||||
|
/* Make sure this CPU has an IOMMU notifier registered for this
|
||||||
|
* IOMMU/IOMMU index combination, so that we can flush its TLB
|
||||||
|
* when the IOMMU tells us the mappings we've cached have changed.
|
||||||
|
*/
|
||||||
|
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
|
||||||
|
TCGIOMMUNotifier *notifier;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
|
||||||
|
notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
|
||||||
|
if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i == cpu->iommu_notifiers->len) {
|
||||||
|
/* Not found, add a new entry at the end of the array */
|
||||||
|
cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
|
||||||
|
notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
|
||||||
|
|
||||||
|
notifier->mr = mr;
|
||||||
|
notifier->iommu_idx = iommu_idx;
|
||||||
|
notifier->cpu = cpu;
|
||||||
|
/* Rather than trying to register interest in the specific part
|
||||||
|
* of the iommu's address space that we've accessed and then
|
||||||
|
* expand it later as subsequent accesses touch more of it, we
|
||||||
|
* just register interest in the whole thing, on the assumption
|
||||||
|
* that iommu reconfiguration will be rare.
|
||||||
|
*/
|
||||||
|
iommu_notifier_init(¬ifier->n,
|
||||||
|
tcg_iommu_unmap_notify,
|
||||||
|
IOMMU_NOTIFIER_UNMAP,
|
||||||
|
0,
|
||||||
|
HWADDR_MAX,
|
||||||
|
iommu_idx);
|
||||||
|
memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!notifier->active) {
|
||||||
|
notifier->active = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tcg_iommu_free_notifier_list(CPUState *cpu)
|
||||||
|
{
|
||||||
|
/* Destroy the CPU's notifier list */
|
||||||
|
int i;
|
||||||
|
TCGIOMMUNotifier *notifier;
|
||||||
|
|
||||||
|
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
|
||||||
|
notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
|
||||||
|
memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n);
|
||||||
|
}
|
||||||
|
g_array_free(cpu->iommu_notifiers, true);
|
||||||
|
}
|
||||||
|
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
MemoryRegionSection *
|
MemoryRegionSection *
|
||||||
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
||||||
hwaddr *xlat, hwaddr *plen)
|
hwaddr *xlat, hwaddr *plen,
|
||||||
|
MemTxAttrs attrs, int *prot)
|
||||||
{
|
{
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
|
IOMMUMemoryRegion *iommu_mr;
|
||||||
|
IOMMUMemoryRegionClass *imrc;
|
||||||
|
IOMMUTLBEntry iotlb;
|
||||||
|
int iommu_idx;
|
||||||
AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
|
AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
|
||||||
|
|
||||||
section = address_space_translate_internal(d, addr, xlat, plen, false);
|
for (;;) {
|
||||||
|
section = address_space_translate_internal(d, addr, &addr, plen, false);
|
||||||
|
|
||||||
|
iommu_mr = memory_region_get_iommu(section->mr);
|
||||||
|
if (!iommu_mr) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
|
||||||
|
|
||||||
|
iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
|
||||||
|
tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
|
||||||
|
/* We need all the permissions, so pass IOMMU_NONE so the IOMMU
|
||||||
|
* doesn't short-cut its translation table walk.
|
||||||
|
*/
|
||||||
|
iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
|
||||||
|
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
|
||||||
|
| (addr & iotlb.addr_mask));
|
||||||
|
/* Update the caller's prot bits to remove permissions the IOMMU
|
||||||
|
* is giving us a failure response for. If we get down to no
|
||||||
|
* permissions left at all we can give up now.
|
||||||
|
*/
|
||||||
|
if (!(iotlb.perm & IOMMU_RO)) {
|
||||||
|
*prot &= ~(PAGE_READ | PAGE_EXEC);
|
||||||
|
}
|
||||||
|
if (!(iotlb.perm & IOMMU_WO)) {
|
||||||
|
*prot &= ~PAGE_WRITE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!*prot) {
|
||||||
|
goto translate_fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
|
||||||
|
}
|
||||||
|
|
||||||
assert(!memory_region_is_iommu(section->mr));
|
assert(!memory_region_is_iommu(section->mr));
|
||||||
|
*xlat = addr;
|
||||||
return section;
|
return section;
|
||||||
|
|
||||||
|
translate_fail:
|
||||||
|
return &d->map.sections[PHYS_SECTION_UNASSIGNED];
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -816,6 +949,9 @@ void cpu_exec_unrealizefn(CPUState *cpu)
|
||||||
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
||||||
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
|
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
|
||||||
}
|
}
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
tcg_iommu_free_notifier_list(cpu);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Property cpu_common_props[] = {
|
Property cpu_common_props[] = {
|
||||||
|
@ -863,6 +999,8 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
|
||||||
if (cc->vmsd != NULL) {
|
if (cc->vmsd != NULL) {
|
||||||
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
|
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2544,22 +2682,7 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
||||||
memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
|
memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
|
||||||
ram_addr, size);
|
ram_addr, size);
|
||||||
|
|
||||||
switch (size) {
|
stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
|
||||||
case 1:
|
|
||||||
stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
memory_notdirty_write_complete(&ndi);
|
memory_notdirty_write_complete(&ndi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2739,22 +2862,8 @@ static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
|
||||||
if (res) {
|
if (res) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
switch (len) {
|
*data = ldn_p(buf, len);
|
||||||
case 1:
|
|
||||||
*data = ldub_p(buf);
|
|
||||||
return MEMTX_OK;
|
return MEMTX_OK;
|
||||||
case 2:
|
|
||||||
*data = lduw_p(buf);
|
|
||||||
return MEMTX_OK;
|
|
||||||
case 4:
|
|
||||||
*data = ldl_p(buf);
|
|
||||||
return MEMTX_OK;
|
|
||||||
case 8:
|
|
||||||
*data = ldq_p(buf);
|
|
||||||
return MEMTX_OK;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static MemTxResult subpage_write(void *opaque, hwaddr addr,
|
static MemTxResult subpage_write(void *opaque, hwaddr addr,
|
||||||
|
@ -2768,22 +2877,7 @@ static MemTxResult subpage_write(void *opaque, hwaddr addr,
|
||||||
" value %"PRIx64"\n",
|
" value %"PRIx64"\n",
|
||||||
__func__, subpage, len, addr, value);
|
__func__, subpage, len, addr, value);
|
||||||
#endif
|
#endif
|
||||||
switch (len) {
|
stn_p(buf, len, value);
|
||||||
case 1:
|
|
||||||
stb_p(buf, value);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
stw_p(buf, value);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
stl_p(buf, value);
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
stq_p(buf, value);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
|
return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2897,14 +2991,15 @@ static const MemoryRegionOps readonly_mem_ops = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
|
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
|
hwaddr index, MemTxAttrs attrs)
|
||||||
{
|
{
|
||||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||||
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
||||||
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
|
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
|
||||||
MemoryRegionSection *sections = d->map.sections;
|
MemoryRegionSection *sections = d->map.sections;
|
||||||
|
|
||||||
return sections[index & ~TARGET_PAGE_MASK].mr;
|
return §ions[index & ~TARGET_PAGE_MASK];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_mem_init(void)
|
static void io_mem_init(void)
|
||||||
|
@ -3128,34 +3223,8 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
||||||
l = memory_access_size(mr, l, addr1);
|
l = memory_access_size(mr, l, addr1);
|
||||||
/* XXX: could force current_cpu to NULL to avoid
|
/* XXX: could force current_cpu to NULL to avoid
|
||||||
potential bugs */
|
potential bugs */
|
||||||
switch (l) {
|
val = ldn_p(buf, l);
|
||||||
case 8:
|
result |= memory_region_dispatch_write(mr, addr1, val, l, attrs);
|
||||||
/* 64 bit write access */
|
|
||||||
val = ldq_p(buf);
|
|
||||||
result |= memory_region_dispatch_write(mr, addr1, val, 8,
|
|
||||||
attrs);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
/* 32 bit write access */
|
|
||||||
val = (uint32_t)ldl_p(buf);
|
|
||||||
result |= memory_region_dispatch_write(mr, addr1, val, 4,
|
|
||||||
attrs);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
/* 16 bit write access */
|
|
||||||
val = lduw_p(buf);
|
|
||||||
result |= memory_region_dispatch_write(mr, addr1, val, 2,
|
|
||||||
attrs);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
/* 8 bit write access */
|
|
||||||
val = ldub_p(buf);
|
|
||||||
result |= memory_region_dispatch_write(mr, addr1, val, 1,
|
|
||||||
attrs);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/* RAM case */
|
/* RAM case */
|
||||||
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
|
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
|
||||||
|
@ -3216,34 +3285,8 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
||||||
/* I/O case */
|
/* I/O case */
|
||||||
release_lock |= prepare_mmio_access(mr);
|
release_lock |= prepare_mmio_access(mr);
|
||||||
l = memory_access_size(mr, l, addr1);
|
l = memory_access_size(mr, l, addr1);
|
||||||
switch (l) {
|
result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs);
|
||||||
case 8:
|
stn_p(buf, l, val);
|
||||||
/* 64 bit read access */
|
|
||||||
result |= memory_region_dispatch_read(mr, addr1, &val, 8,
|
|
||||||
attrs);
|
|
||||||
stq_p(buf, val);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
/* 32 bit read access */
|
|
||||||
result |= memory_region_dispatch_read(mr, addr1, &val, 4,
|
|
||||||
attrs);
|
|
||||||
stl_p(buf, val);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
/* 16 bit read access */
|
|
||||||
result |= memory_region_dispatch_read(mr, addr1, &val, 2,
|
|
||||||
attrs);
|
|
||||||
stw_p(buf, val);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
/* 8 bit read access */
|
|
||||||
result |= memory_region_dispatch_read(mr, addr1, &val, 1,
|
|
||||||
attrs);
|
|
||||||
stb_p(buf, val);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/* RAM case */
|
/* RAM case */
|
||||||
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
|
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
|
||||||
|
|
|
@ -666,7 +666,8 @@ static bool window_translate(TyphoonWindow *win, hwaddr addr,
|
||||||
Pchip and generate a machine check interrupt. */
|
Pchip and generate a machine check interrupt. */
|
||||||
static IOMMUTLBEntry typhoon_translate_iommu(IOMMUMemoryRegion *iommu,
|
static IOMMUTLBEntry typhoon_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag,
|
||||||
|
int iommu_idx)
|
||||||
{
|
{
|
||||||
TyphoonPchip *pchip = container_of(iommu, TyphoonPchip, iommu);
|
TyphoonPchip *pchip = container_of(iommu, TyphoonPchip, iommu);
|
||||||
IOMMUTLBEntry ret;
|
IOMMUTLBEntry ret;
|
||||||
|
|
|
@ -178,6 +178,12 @@ static void armv7m_realize(DeviceState *dev, Error **errp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tell the CPU where the NVIC is; it will fail realize if it doesn't
|
||||||
|
* have one.
|
||||||
|
*/
|
||||||
|
s->cpu->env.nvic = &s->nvic;
|
||||||
|
|
||||||
object_property_set_bool(OBJECT(s->cpu), true, "realized", &err);
|
object_property_set_bool(OBJECT(s->cpu), true, "realized", &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
error_propagate(errp, err);
|
error_propagate(errp, err);
|
||||||
|
@ -202,7 +208,6 @@ static void armv7m_realize(DeviceState *dev, Error **errp)
|
||||||
sbd = SYS_BUS_DEVICE(&s->nvic);
|
sbd = SYS_BUS_DEVICE(&s->nvic);
|
||||||
sysbus_connect_irq(sbd, 0,
|
sysbus_connect_irq(sbd, 0,
|
||||||
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
|
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
|
||||||
s->cpu->env.nvic = &s->nvic;
|
|
||||||
|
|
||||||
memory_region_add_subregion(&s->container, 0xe000e000,
|
memory_region_add_subregion(&s->container, 0xe000e000,
|
||||||
sysbus_mmio_get_region(sbd, 0));
|
sysbus_mmio_get_region(sbd, 0));
|
||||||
|
@ -261,27 +266,6 @@ static void armv7m_reset(void *opaque)
|
||||||
cpu_reset(CPU(cpu));
|
cpu_reset(CPU(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Init CPU and memory for a v7-M based board.
|
|
||||||
mem_size is in bytes.
|
|
||||||
Returns the ARMv7M device. */
|
|
||||||
|
|
||||||
DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
|
|
||||||
const char *kernel_filename, const char *cpu_type)
|
|
||||||
{
|
|
||||||
DeviceState *armv7m;
|
|
||||||
|
|
||||||
armv7m = qdev_create(NULL, TYPE_ARMV7M);
|
|
||||||
qdev_prop_set_uint32(armv7m, "num-irq", num_irq);
|
|
||||||
qdev_prop_set_string(armv7m, "cpu-type", cpu_type);
|
|
||||||
object_property_set_link(OBJECT(armv7m), OBJECT(get_system_memory()),
|
|
||||||
"memory", &error_abort);
|
|
||||||
/* This will exit with an error if the user passed us a bad cpu_type */
|
|
||||||
qdev_init_nofail(armv7m);
|
|
||||||
|
|
||||||
armv7m_load_kernel(ARM_CPU(first_cpu), kernel_filename, mem_size);
|
|
||||||
return armv7m;
|
|
||||||
}
|
|
||||||
|
|
||||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size)
|
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size)
|
||||||
{
|
{
|
||||||
int image_size;
|
int image_size;
|
||||||
|
|
|
@ -74,12 +74,13 @@ typedef struct {
|
||||||
UnimplementedDeviceState spi[5];
|
UnimplementedDeviceState spi[5];
|
||||||
UnimplementedDeviceState i2c[4];
|
UnimplementedDeviceState i2c[4];
|
||||||
UnimplementedDeviceState i2s_audio;
|
UnimplementedDeviceState i2s_audio;
|
||||||
UnimplementedDeviceState gpio[5];
|
UnimplementedDeviceState gpio[4];
|
||||||
UnimplementedDeviceState dma[4];
|
UnimplementedDeviceState dma[4];
|
||||||
UnimplementedDeviceState gfx;
|
UnimplementedDeviceState gfx;
|
||||||
CMSDKAPBUART uart[5];
|
CMSDKAPBUART uart[5];
|
||||||
SplitIRQ sec_resp_splitter;
|
SplitIRQ sec_resp_splitter;
|
||||||
qemu_or_irq uart_irq_orgate;
|
qemu_or_irq uart_irq_orgate;
|
||||||
|
DeviceState *lan9118;
|
||||||
} MPS2TZMachineState;
|
} MPS2TZMachineState;
|
||||||
|
|
||||||
#define TYPE_MPS2TZ_MACHINE "mps2tz"
|
#define TYPE_MPS2TZ_MACHINE "mps2tz"
|
||||||
|
@ -224,6 +225,26 @@ static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
|
||||||
return sysbus_mmio_get_region(SYS_BUS_DEVICE(fpgaio), 0);
|
return sysbus_mmio_get_region(SYS_BUS_DEVICE(fpgaio), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
|
||||||
|
const char *name, hwaddr size)
|
||||||
|
{
|
||||||
|
SysBusDevice *s;
|
||||||
|
DeviceState *iotkitdev = DEVICE(&mms->iotkit);
|
||||||
|
NICInfo *nd = &nd_table[0];
|
||||||
|
|
||||||
|
/* In hardware this is a LAN9220; the LAN9118 is software compatible
|
||||||
|
* except that it doesn't support the checksum-offload feature.
|
||||||
|
*/
|
||||||
|
qemu_check_nic_model(nd, "lan9118");
|
||||||
|
mms->lan9118 = qdev_create(NULL, "lan9118");
|
||||||
|
qdev_set_nic_properties(mms->lan9118, nd);
|
||||||
|
qdev_init_nofail(mms->lan9118);
|
||||||
|
|
||||||
|
s = SYS_BUS_DEVICE(mms->lan9118);
|
||||||
|
sysbus_connect_irq(s, 0, qdev_get_gpio_in_named(iotkitdev, "EXP_IRQ", 16));
|
||||||
|
return sysbus_mmio_get_region(s, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static void mps2tz_common_init(MachineState *machine)
|
static void mps2tz_common_init(MachineState *machine)
|
||||||
{
|
{
|
||||||
MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine);
|
MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine);
|
||||||
|
@ -363,7 +384,7 @@ static void mps2tz_common_init(MachineState *machine)
|
||||||
{ "gpio1", make_unimp_dev, &mms->gpio[1], 0x40101000, 0x1000 },
|
{ "gpio1", make_unimp_dev, &mms->gpio[1], 0x40101000, 0x1000 },
|
||||||
{ "gpio2", make_unimp_dev, &mms->gpio[2], 0x40102000, 0x1000 },
|
{ "gpio2", make_unimp_dev, &mms->gpio[2], 0x40102000, 0x1000 },
|
||||||
{ "gpio3", make_unimp_dev, &mms->gpio[3], 0x40103000, 0x1000 },
|
{ "gpio3", make_unimp_dev, &mms->gpio[3], 0x40103000, 0x1000 },
|
||||||
{ "gpio4", make_unimp_dev, &mms->gpio[4], 0x40104000, 0x1000 },
|
{ "eth", make_eth_dev, NULL, 0x42000000, 0x100000 },
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
.name = "ahb_ppcexp1",
|
.name = "ahb_ppcexp1",
|
||||||
|
@ -447,13 +468,6 @@ static void mps2tz_common_init(MachineState *machine)
|
||||||
"cfg_sec_resp", 0));
|
"cfg_sec_resp", 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* In hardware this is a LAN9220; the LAN9118 is software compatible
|
|
||||||
* except that it doesn't support the checksum-offload feature.
|
|
||||||
* The ethernet controller is not behind a PPC.
|
|
||||||
*/
|
|
||||||
lan9118_init(&nd_table[0], 0x42000000,
|
|
||||||
qdev_get_gpio_in_named(iotkitdev, "EXP_IRQ", 16));
|
|
||||||
|
|
||||||
create_unimplemented_device("FPGA NS PC", 0x48007000, 0x1000);
|
create_unimplemented_device("FPGA NS PC", 0x48007000, 0x1000);
|
||||||
|
|
||||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x400000);
|
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x400000);
|
||||||
|
|
|
@ -538,7 +538,7 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
|
||||||
}
|
}
|
||||||
|
|
||||||
static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
|
static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
|
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
|
||||||
SMMUv3State *s = sdev->smmu;
|
SMMUv3State *s = sdev->smmu;
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
#include "exec/address-spaces.h"
|
#include "exec/address-spaces.h"
|
||||||
#include "sysemu/sysemu.h"
|
#include "sysemu/sysemu.h"
|
||||||
|
#include "hw/arm/armv7m.h"
|
||||||
#include "hw/char/pl011.h"
|
#include "hw/char/pl011.h"
|
||||||
#include "hw/misc/unimp.h"
|
#include "hw/misc/unimp.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
@ -1298,8 +1299,13 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
|
||||||
&error_fatal);
|
&error_fatal);
|
||||||
memory_region_add_subregion(system_memory, 0x20000000, sram);
|
memory_region_add_subregion(system_memory, 0x20000000, sram);
|
||||||
|
|
||||||
nvic = armv7m_init(system_memory, flash_size, NUM_IRQ_LINES,
|
nvic = qdev_create(NULL, TYPE_ARMV7M);
|
||||||
ms->kernel_filename, ms->cpu_type);
|
qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES);
|
||||||
|
qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type);
|
||||||
|
object_property_set_link(OBJECT(nvic), OBJECT(get_system_memory()),
|
||||||
|
"memory", &error_abort);
|
||||||
|
/* This will exit with an error if the user passed us a bad cpu_type */
|
||||||
|
qdev_init_nofail(nvic);
|
||||||
|
|
||||||
qdev_connect_gpio_out_named(nvic, "SYSRESETREQ", 0,
|
qdev_connect_gpio_out_named(nvic, "SYSRESETREQ", 0,
|
||||||
qemu_allocate_irq(&do_sys_reset, NULL, 0));
|
qemu_allocate_irq(&do_sys_reset, NULL, 0));
|
||||||
|
@ -1431,6 +1437,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
|
||||||
create_unimplemented_device("analogue-comparator", 0x4003c000, 0x1000);
|
create_unimplemented_device("analogue-comparator", 0x4003c000, 0x1000);
|
||||||
create_unimplemented_device("hibernation", 0x400fc000, 0x1000);
|
create_unimplemented_device("hibernation", 0x400fc000, 0x1000);
|
||||||
create_unimplemented_device("flash-control", 0x400fd000, 0x1000);
|
create_unimplemented_device("flash-control", 0x400fd000, 0x1000);
|
||||||
|
|
||||||
|
armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, flash_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: Figure out how to generate these from stellaris_boards. */
|
/* FIXME: Figure out how to generate these from stellaris_boards. */
|
||||||
|
|
|
@ -698,6 +698,7 @@ static void complete_collecting_data(Flash *s)
|
||||||
case MAN_MACRONIX:
|
case MAN_MACRONIX:
|
||||||
s->quad_enable = extract32(s->data[0], 6, 1);
|
s->quad_enable = extract32(s->data[0], 6, 1);
|
||||||
if (s->len > 1) {
|
if (s->len > 1) {
|
||||||
|
s->volatile_cfg = s->data[1];
|
||||||
s->four_bytes_address_mode = extract32(s->data[1], 5, 1);
|
s->four_bytes_address_mode = extract32(s->data[1], 5, 1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -493,102 +493,41 @@ static void pflash_write (pflash_t *pfl, hwaddr offset,
|
||||||
pfl->cmd = 0;
|
pfl->cmd = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t pflash_be_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
static uint32_t pflash_readb_be(void *opaque, hwaddr addr)
|
|
||||||
{
|
{
|
||||||
return pflash_read(opaque, addr, 1, 1);
|
return pflash_read(opaque, addr, size, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t pflash_readb_le(void *opaque, hwaddr addr)
|
static void pflash_be_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
{
|
{
|
||||||
return pflash_read(opaque, addr, 1, 0);
|
pflash_write(opaque, addr, value, size, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t pflash_readw_be(void *opaque, hwaddr addr)
|
static uint64_t pflash_le_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
{
|
{
|
||||||
pflash_t *pfl = opaque;
|
return pflash_read(opaque, addr, size, 0);
|
||||||
|
|
||||||
return pflash_read(pfl, addr, 2, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t pflash_readw_le(void *opaque, hwaddr addr)
|
static void pflash_le_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
{
|
{
|
||||||
pflash_t *pfl = opaque;
|
pflash_write(opaque, addr, value, size, 0);
|
||||||
|
|
||||||
return pflash_read(pfl, addr, 2, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t pflash_readl_be(void *opaque, hwaddr addr)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
return pflash_read(pfl, addr, 4, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t pflash_readl_le(void *opaque, hwaddr addr)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
return pflash_read(pfl, addr, 4, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writeb_be(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_write(opaque, addr, value, 1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writeb_le(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_write(opaque, addr, value, 1, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writew_be(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
pflash_write(pfl, addr, value, 2, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writew_le(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
pflash_write(pfl, addr, value, 2, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writel_be(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
pflash_write(pfl, addr, value, 4, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pflash_writel_le(void *opaque, hwaddr addr,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
pflash_t *pfl = opaque;
|
|
||||||
|
|
||||||
pflash_write(pfl, addr, value, 4, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps pflash_cfi02_ops_be = {
|
static const MemoryRegionOps pflash_cfi02_ops_be = {
|
||||||
.old_mmio = {
|
.read = pflash_be_readfn,
|
||||||
.read = { pflash_readb_be, pflash_readw_be, pflash_readl_be, },
|
.write = pflash_be_writefn,
|
||||||
.write = { pflash_writeb_be, pflash_writew_be, pflash_writel_be, },
|
.valid.min_access_size = 1,
|
||||||
},
|
.valid.max_access_size = 4,
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const MemoryRegionOps pflash_cfi02_ops_le = {
|
static const MemoryRegionOps pflash_cfi02_ops_le = {
|
||||||
.old_mmio = {
|
.read = pflash_le_readfn,
|
||||||
.read = { pflash_readb_le, pflash_readw_le, pflash_readl_le, },
|
.write = pflash_le_writefn,
|
||||||
.write = { pflash_writeb_le, pflash_writew_le, pflash_writel_le, },
|
.valid.min_access_size = 1,
|
||||||
},
|
.valid.max_access_size = 4,
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -554,56 +554,28 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Memory mapped interface */
|
/* Memory mapped interface */
|
||||||
static uint32_t parallel_mm_readb (void *opaque, hwaddr addr)
|
static uint64_t parallel_mm_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
{
|
{
|
||||||
ParallelState *s = opaque;
|
ParallelState *s = opaque;
|
||||||
|
|
||||||
return parallel_ioport_read_sw(s, addr >> s->it_shift) & 0xFF;
|
return parallel_ioport_read_sw(s, addr >> s->it_shift) &
|
||||||
|
MAKE_64BIT_MASK(0, size * 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parallel_mm_writeb (void *opaque,
|
static void parallel_mm_writefn(void *opaque, hwaddr addr,
|
||||||
hwaddr addr, uint32_t value)
|
uint64_t value, unsigned size)
|
||||||
{
|
{
|
||||||
ParallelState *s = opaque;
|
ParallelState *s = opaque;
|
||||||
|
|
||||||
parallel_ioport_write_sw(s, addr >> s->it_shift, value & 0xFF);
|
parallel_ioport_write_sw(s, addr >> s->it_shift,
|
||||||
}
|
value & MAKE_64BIT_MASK(0, size * 8));
|
||||||
|
|
||||||
static uint32_t parallel_mm_readw (void *opaque, hwaddr addr)
|
|
||||||
{
|
|
||||||
ParallelState *s = opaque;
|
|
||||||
|
|
||||||
return parallel_ioport_read_sw(s, addr >> s->it_shift) & 0xFFFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void parallel_mm_writew (void *opaque,
|
|
||||||
hwaddr addr, uint32_t value)
|
|
||||||
{
|
|
||||||
ParallelState *s = opaque;
|
|
||||||
|
|
||||||
parallel_ioport_write_sw(s, addr >> s->it_shift, value & 0xFFFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t parallel_mm_readl (void *opaque, hwaddr addr)
|
|
||||||
{
|
|
||||||
ParallelState *s = opaque;
|
|
||||||
|
|
||||||
return parallel_ioport_read_sw(s, addr >> s->it_shift);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void parallel_mm_writel (void *opaque,
|
|
||||||
hwaddr addr, uint32_t value)
|
|
||||||
{
|
|
||||||
ParallelState *s = opaque;
|
|
||||||
|
|
||||||
parallel_ioport_write_sw(s, addr >> s->it_shift, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps parallel_mm_ops = {
|
static const MemoryRegionOps parallel_mm_ops = {
|
||||||
.old_mmio = {
|
.read = parallel_mm_readfn,
|
||||||
.read = { parallel_mm_readb, parallel_mm_readw, parallel_mm_readl },
|
.write = parallel_mm_writefn,
|
||||||
.write = { parallel_mm_writeb, parallel_mm_writew, parallel_mm_writel },
|
.valid.min_access_size = 1,
|
||||||
},
|
.valid.max_access_size = 4,
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -66,14 +66,49 @@ static void or_irq_init(Object *obj)
|
||||||
qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
|
qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The original version of this device had a fixed 16 entries in its
|
||||||
|
* VMState array; devices with more inputs than this need to
|
||||||
|
* migrate the extra lines via a subsection.
|
||||||
|
* The subsection migrates as much of the levels[] array as is needed
|
||||||
|
* (including repeating the first 16 elements), to avoid the awkwardness
|
||||||
|
* of splitting it in two to meet the requirements of VMSTATE_VARRAY_UINT16.
|
||||||
|
*/
|
||||||
|
#define OLD_MAX_OR_LINES 16
|
||||||
|
#if MAX_OR_LINES < OLD_MAX_OR_LINES
|
||||||
|
#error MAX_OR_LINES must be at least 16 for migration compatibility
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static bool vmstate_extras_needed(void *opaque)
|
||||||
|
{
|
||||||
|
qemu_or_irq *s = OR_IRQ(opaque);
|
||||||
|
|
||||||
|
return s->num_lines >= OLD_MAX_OR_LINES;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_or_irq_extras = {
|
||||||
|
.name = "or-irq-extras",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.needed = vmstate_extras_needed,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_VARRAY_UINT16_UNSAFE(levels, qemu_or_irq, num_lines, 0,
|
||||||
|
vmstate_info_bool, bool),
|
||||||
|
VMSTATE_END_OF_LIST(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
static const VMStateDescription vmstate_or_irq = {
|
static const VMStateDescription vmstate_or_irq = {
|
||||||
.name = TYPE_OR_IRQ,
|
.name = TYPE_OR_IRQ,
|
||||||
.version_id = 1,
|
.version_id = 1,
|
||||||
.minimum_version_id = 1,
|
.minimum_version_id = 1,
|
||||||
.fields = (VMStateField[]) {
|
.fields = (VMStateField[]) {
|
||||||
VMSTATE_BOOL_ARRAY(levels, qemu_or_irq, MAX_OR_LINES),
|
VMSTATE_BOOL_SUB_ARRAY(levels, qemu_or_irq, 0, OLD_MAX_OR_LINES),
|
||||||
VMSTATE_END_OF_LIST(),
|
VMSTATE_END_OF_LIST(),
|
||||||
}
|
},
|
||||||
|
.subsections = (const VMStateDescription*[]) {
|
||||||
|
&vmstate_or_irq_extras,
|
||||||
|
NULL
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static Property or_irq_properties[] = {
|
static Property or_irq_properties[] = {
|
||||||
|
|
|
@ -491,7 +491,7 @@ static const MemoryRegionOps jazzio_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static IOMMUTLBEntry rc4030_dma_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
static IOMMUTLBEntry rc4030_dma_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
rc4030State *s = container_of(iommu, rc4030State, dma_mr);
|
rc4030State *s = container_of(iommu, rc4030State, dma_mr);
|
||||||
IOMMUTLBEntry ret = {
|
IOMMUTLBEntry ret = {
|
||||||
|
|
|
@ -991,7 +991,7 @@ static inline bool amdvi_is_interrupt_addr(hwaddr addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
|
AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
|
||||||
AMDVIState *s = as->iommu_state;
|
AMDVIState *s = as->iommu_state;
|
||||||
|
|
|
@ -1023,7 +1023,7 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
|
||||||
static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry,
|
static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry,
|
||||||
void *private)
|
void *private)
|
||||||
{
|
{
|
||||||
memory_region_notify_iommu((IOMMUMemoryRegion *)private, *entry);
|
memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1581,7 +1581,7 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
|
||||||
.addr_mask = size - 1,
|
.addr_mask = size - 1,
|
||||||
.perm = IOMMU_NONE,
|
.perm = IOMMU_NONE,
|
||||||
};
|
};
|
||||||
memory_region_notify_iommu(&vtd_as->iommu, entry);
|
memory_region_notify_iommu(&vtd_as->iommu, 0, entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2015,7 +2015,7 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
|
||||||
entry.iova = addr;
|
entry.iova = addr;
|
||||||
entry.perm = IOMMU_NONE;
|
entry.perm = IOMMU_NONE;
|
||||||
entry.translated_addr = 0;
|
entry.translated_addr = 0;
|
||||||
memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
|
memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
return true;
|
return true;
|
||||||
|
@ -2471,7 +2471,7 @@ static void vtd_mem_write(void *opaque, hwaddr addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
|
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
|
||||||
IntelIOMMUState *s = vtd_as->iommu_state;
|
IntelIOMMUState *s = vtd_as->iommu_state;
|
||||||
|
|
|
@ -434,7 +434,7 @@ static const VMStateDescription vmstate_kbd = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Memory mapped interface */
|
/* Memory mapped interface */
|
||||||
static uint32_t kbd_mm_readb (void *opaque, hwaddr addr)
|
static uint64_t kbd_mm_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
{
|
{
|
||||||
KBDState *s = opaque;
|
KBDState *s = opaque;
|
||||||
|
|
||||||
|
@ -444,7 +444,8 @@ static uint32_t kbd_mm_readb (void *opaque, hwaddr addr)
|
||||||
return kbd_read_data(s, 0, 1) & 0xff;
|
return kbd_read_data(s, 0, 1) & 0xff;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kbd_mm_writeb (void *opaque, hwaddr addr, uint32_t value)
|
static void kbd_mm_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
{
|
{
|
||||||
KBDState *s = opaque;
|
KBDState *s = opaque;
|
||||||
|
|
||||||
|
@ -454,12 +455,13 @@ static void kbd_mm_writeb (void *opaque, hwaddr addr, uint32_t value)
|
||||||
kbd_write_data(s, 0, value & 0xff, 1);
|
kbd_write_data(s, 0, value & 0xff, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static const MemoryRegionOps i8042_mmio_ops = {
|
static const MemoryRegionOps i8042_mmio_ops = {
|
||||||
|
.read = kbd_mm_readfn,
|
||||||
|
.write = kbd_mm_writefn,
|
||||||
|
.valid.min_access_size = 1,
|
||||||
|
.valid.max_access_size = 4,
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
.old_mmio = {
|
|
||||||
.read = { kbd_mm_readb, kbd_mm_readb, kbd_mm_readb },
|
|
||||||
.write = { kbd_mm_writeb, kbd_mm_writeb, kbd_mm_writeb },
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq,
|
void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq,
|
||||||
|
|
|
@ -135,7 +135,14 @@ static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
|
||||||
uint32_t reg, *field;
|
uint32_t reg, *field;
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
field = (uint32_t *)bmp;
|
/* For the KVM GICv3, affinity routing is always enabled, and the first 8
|
||||||
|
* GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
|
||||||
|
* functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
|
||||||
|
* sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
|
||||||
|
* offset.
|
||||||
|
*/
|
||||||
|
field = (uint32_t *)(bmp + GIC_INTERNAL);
|
||||||
|
offset += (GIC_INTERNAL * 8) / 8;
|
||||||
for_each_dist_irq_reg(irq, s->num_irq, 8) {
|
for_each_dist_irq_reg(irq, s->num_irq, 8) {
|
||||||
kvm_gicd_access(s, offset, ®, false);
|
kvm_gicd_access(s, offset, ®, false);
|
||||||
*field = reg;
|
*field = reg;
|
||||||
|
@ -149,7 +156,14 @@ static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
|
||||||
uint32_t reg, *field;
|
uint32_t reg, *field;
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
field = (uint32_t *)bmp;
|
/* For the KVM GICv3, affinity routing is always enabled, and the first 8
|
||||||
|
* GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
|
||||||
|
* functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
|
||||||
|
* sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
|
||||||
|
* offset.
|
||||||
|
*/
|
||||||
|
field = (uint32_t *)(bmp + GIC_INTERNAL);
|
||||||
|
offset += (GIC_INTERNAL * 8) / 8;
|
||||||
for_each_dist_irq_reg(irq, s->num_irq, 8) {
|
for_each_dist_irq_reg(irq, s->num_irq, 8) {
|
||||||
reg = *field;
|
reg = *field;
|
||||||
kvm_gicd_access(s, offset, ®, true);
|
kvm_gicd_access(s, offset, ®, true);
|
||||||
|
|
|
@ -2183,7 +2183,11 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
|
||||||
int regionlen;
|
int regionlen;
|
||||||
|
|
||||||
s->cpu = ARM_CPU(qemu_get_cpu(0));
|
s->cpu = ARM_CPU(qemu_get_cpu(0));
|
||||||
assert(s->cpu);
|
|
||||||
|
if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
|
||||||
|
error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->num_irq > NVIC_MAX_IRQ) {
|
if (s->num_irq > NVIC_MAX_IRQ) {
|
||||||
error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
|
error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
|
||||||
|
|
|
@ -512,19 +512,43 @@ static void m5206_mbar_writel(void *opaque, hwaddr offset,
|
||||||
m5206_mbar_write(s, offset, value, 4);
|
m5206_mbar_write(s, offset, value, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t m5206_mbar_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
return m5206_mbar_readb(opaque, addr);
|
||||||
|
case 2:
|
||||||
|
return m5206_mbar_readw(opaque, addr);
|
||||||
|
case 4:
|
||||||
|
return m5206_mbar_readl(opaque, addr);
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void m5206_mbar_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
m5206_mbar_writeb(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
m5206_mbar_writew(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
m5206_mbar_writel(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps m5206_mbar_ops = {
|
static const MemoryRegionOps m5206_mbar_ops = {
|
||||||
.old_mmio = {
|
.read = m5206_mbar_readfn,
|
||||||
.read = {
|
.write = m5206_mbar_writefn,
|
||||||
m5206_mbar_readb,
|
.valid.min_access_size = 1,
|
||||||
m5206_mbar_readw,
|
.valid.max_access_size = 4,
|
||||||
m5206_mbar_readl,
|
|
||||||
},
|
|
||||||
.write = {
|
|
||||||
m5206_mbar_writeb,
|
|
||||||
m5206_mbar_writew,
|
|
||||||
m5206_mbar_writel,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include "qapi/visitor.h"
|
#include "qapi/visitor.h"
|
||||||
#include "qemu/bitops.h"
|
#include "qemu/bitops.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
|
#include "crypto/random.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#define TO_REG(offset) ((offset) >> 2)
|
#define TO_REG(offset) ((offset) >> 2)
|
||||||
|
@ -154,6 +155,19 @@ static const uint32_t ast2500_a1_resets[ASPEED_SCU_NR_REGS] = {
|
||||||
[BMC_DEV_ID] = 0x00002402U
|
[BMC_DEV_ID] = 0x00002402U
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static uint32_t aspeed_scu_get_random(void)
|
||||||
|
{
|
||||||
|
Error *err = NULL;
|
||||||
|
uint32_t num;
|
||||||
|
|
||||||
|
if (qcrypto_random_bytes((uint8_t *)&num, sizeof(num), &err)) {
|
||||||
|
error_report_err(err);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return num;
|
||||||
|
}
|
||||||
|
|
||||||
static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
|
static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
|
||||||
{
|
{
|
||||||
AspeedSCUState *s = ASPEED_SCU(opaque);
|
AspeedSCUState *s = ASPEED_SCU(opaque);
|
||||||
|
@ -167,6 +181,12 @@ static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
|
case RNG_DATA:
|
||||||
|
/* On hardware, RNG_DATA works regardless of
|
||||||
|
* the state of the enable bit in RNG_CTRL
|
||||||
|
*/
|
||||||
|
s->regs[RNG_DATA] = aspeed_scu_get_random();
|
||||||
|
break;
|
||||||
case WAKEUP_EN:
|
case WAKEUP_EN:
|
||||||
qemu_log_mask(LOG_GUEST_ERROR,
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
"%s: Read of write-only offset 0x%" HWADDR_PRIx "\n",
|
"%s: Read of write-only offset 0x%" HWADDR_PRIx "\n",
|
||||||
|
|
|
@ -112,7 +112,8 @@ static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table)
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu,
|
static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag,
|
||||||
|
int iommu_idx)
|
||||||
{
|
{
|
||||||
sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
|
sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
|
||||||
uint64_t tce;
|
uint64_t tce;
|
||||||
|
@ -428,7 +429,7 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
|
||||||
entry.translated_addr = tce & page_mask;
|
entry.translated_addr = tce & page_mask;
|
||||||
entry.addr_mask = ~page_mask;
|
entry.addr_mask = ~page_mask;
|
||||||
entry.perm = spapr_tce_iommu_access_flags(tce);
|
entry.perm = spapr_tce_iommu_access_flags(tce);
|
||||||
memory_region_notify_iommu(&tcet->iommu, entry);
|
memory_region_notify_iommu(&tcet->iommu, 0, entry);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -484,7 +484,7 @@ uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
|
static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
|
S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
|
||||||
S390IOTLBEntry *entry;
|
S390IOTLBEntry *entry;
|
||||||
|
|
|
@ -589,7 +589,7 @@ static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
notify.perm = IOMMU_NONE;
|
notify.perm = IOMMU_NONE;
|
||||||
memory_region_notify_iommu(&iommu->iommu_mr, notify);
|
memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
|
||||||
notify.perm = entry->perm;
|
notify.perm = entry->perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +601,7 @@ static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
|
||||||
g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
|
g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
memory_region_notify_iommu(&iommu->iommu_mr, notify);
|
memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
|
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
|
||||||
|
|
|
@ -450,15 +450,43 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t sh7750_mem_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
return sh7750_mem_readb(opaque, addr);
|
||||||
|
case 2:
|
||||||
|
return sh7750_mem_readw(opaque, addr);
|
||||||
|
case 4:
|
||||||
|
return sh7750_mem_readl(opaque, addr);
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sh7750_mem_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
sh7750_mem_writeb(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
sh7750_mem_writew(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
sh7750_mem_writel(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps sh7750_mem_ops = {
|
static const MemoryRegionOps sh7750_mem_ops = {
|
||||||
.old_mmio = {
|
.read = sh7750_mem_readfn,
|
||||||
.read = {sh7750_mem_readb,
|
.write = sh7750_mem_writefn,
|
||||||
sh7750_mem_readw,
|
.valid.min_access_size = 1,
|
||||||
sh7750_mem_readl },
|
.valid.max_access_size = 4,
|
||||||
.write = {sh7750_mem_writeb,
|
|
||||||
sh7750_mem_writew,
|
|
||||||
sh7750_mem_writel },
|
|
||||||
},
|
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -282,7 +282,8 @@ static void iommu_bad_addr(IOMMUState *s, hwaddr addr,
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
static IOMMUTLBEntry sun4m_translate_iommu(IOMMUMemoryRegion *iommu,
|
static IOMMUTLBEntry sun4m_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
IOMMUAccessFlags flags)
|
IOMMUAccessFlags flags,
|
||||||
|
int iommu_idx)
|
||||||
{
|
{
|
||||||
IOMMUState *is = container_of(iommu, IOMMUState, iommu);
|
IOMMUState *is = container_of(iommu, IOMMUState, iommu);
|
||||||
hwaddr page, pa;
|
hwaddr page, pa;
|
||||||
|
|
|
@ -73,7 +73,7 @@
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
static IOMMUTLBEntry sun4u_translate_iommu(IOMMUMemoryRegion *iommu,
|
static IOMMUTLBEntry sun4u_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
IOMMUAccessFlags flag)
|
IOMMUAccessFlags flag, int iommu_idx)
|
||||||
{
|
{
|
||||||
IOMMUState *is = container_of(iommu, IOMMUState, iommu);
|
IOMMUState *is = container_of(iommu, IOMMUState, iommu);
|
||||||
hwaddr baseaddr, offset;
|
hwaddr baseaddr, offset;
|
||||||
|
|
|
@ -507,6 +507,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
if (memory_region_is_iommu(section->mr)) {
|
if (memory_region_is_iommu(section->mr)) {
|
||||||
VFIOGuestIOMMU *giommu;
|
VFIOGuestIOMMU *giommu;
|
||||||
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
|
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
|
||||||
|
int iommu_idx;
|
||||||
|
|
||||||
trace_vfio_listener_region_add_iommu(iova, end);
|
trace_vfio_listener_region_add_iommu(iova, end);
|
||||||
/*
|
/*
|
||||||
|
@ -523,10 +524,13 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
llend = int128_add(int128_make64(section->offset_within_region),
|
llend = int128_add(int128_make64(section->offset_within_region),
|
||||||
section->size);
|
section->size);
|
||||||
llend = int128_sub(llend, int128_one());
|
llend = int128_sub(llend, int128_one());
|
||||||
|
iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
|
||||||
|
MEMTXATTRS_UNSPECIFIED);
|
||||||
iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
|
iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
|
||||||
IOMMU_NOTIFIER_ALL,
|
IOMMU_NOTIFIER_ALL,
|
||||||
section->offset_within_region,
|
section->offset_within_region,
|
||||||
int128_get64(llend));
|
int128_get64(llend),
|
||||||
|
iommu_idx);
|
||||||
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
|
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
|
||||||
|
|
||||||
memory_region_register_iommu_notifier(section->mr, &giommu->n);
|
memory_region_register_iommu_notifier(section->mr, &giommu->n);
|
||||||
|
|
|
@ -662,6 +662,8 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
||||||
iommu_listener);
|
iommu_listener);
|
||||||
struct vhost_iommu *iommu;
|
struct vhost_iommu *iommu;
|
||||||
Int128 end;
|
Int128 end;
|
||||||
|
int iommu_idx;
|
||||||
|
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
|
||||||
|
|
||||||
if (!memory_region_is_iommu(section->mr)) {
|
if (!memory_region_is_iommu(section->mr)) {
|
||||||
return;
|
return;
|
||||||
|
@ -671,10 +673,13 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
||||||
end = int128_add(int128_make64(section->offset_within_region),
|
end = int128_add(int128_make64(section->offset_within_region),
|
||||||
section->size);
|
section->size);
|
||||||
end = int128_sub(end, int128_one());
|
end = int128_sub(end, int128_one());
|
||||||
|
iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
|
||||||
|
MEMTXATTRS_UNSPECIFIED);
|
||||||
iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
|
iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
|
||||||
IOMMU_NOTIFIER_UNMAP,
|
IOMMU_NOTIFIER_UNMAP,
|
||||||
section->offset_within_region,
|
section->offset_within_region,
|
||||||
int128_get64(end));
|
int128_get64(end),
|
||||||
|
iommu_idx);
|
||||||
iommu->mr = section->mr;
|
iommu->mr = section->mr;
|
||||||
iommu->iommu_offset = section->offset_within_address_space -
|
iommu->iommu_offset = section->offset_within_address_space -
|
||||||
section->offset_within_region;
|
section->offset_within_region;
|
||||||
|
|
|
@ -361,19 +361,43 @@ static void i6300esb_mem_writel(void *vp, hwaddr addr, uint32_t val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t i6300esb_mem_readfn(void *opaque, hwaddr addr, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
return i6300esb_mem_readb(opaque, addr);
|
||||||
|
case 2:
|
||||||
|
return i6300esb_mem_readw(opaque, addr);
|
||||||
|
case 4:
|
||||||
|
return i6300esb_mem_readl(opaque, addr);
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i6300esb_mem_writefn(void *opaque, hwaddr addr,
|
||||||
|
uint64_t value, unsigned size)
|
||||||
|
{
|
||||||
|
switch (size) {
|
||||||
|
case 1:
|
||||||
|
i6300esb_mem_writeb(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
i6300esb_mem_writew(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
i6300esb_mem_writel(opaque, addr, value);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps i6300esb_ops = {
|
static const MemoryRegionOps i6300esb_ops = {
|
||||||
.old_mmio = {
|
.read = i6300esb_mem_readfn,
|
||||||
.read = {
|
.write = i6300esb_mem_writefn,
|
||||||
i6300esb_mem_readb,
|
.valid.min_access_size = 1,
|
||||||
i6300esb_mem_readw,
|
.valid.max_access_size = 4,
|
||||||
i6300esb_mem_readl,
|
|
||||||
},
|
|
||||||
.write = {
|
|
||||||
i6300esb_mem_writeb,
|
|
||||||
i6300esb_mem_writew,
|
|
||||||
i6300esb_mem_writel,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,8 @@ static inline void tswap64s(uint64_t *s)
|
||||||
#define stq_p(p, v) stq_be_p(p, v)
|
#define stq_p(p, v) stq_be_p(p, v)
|
||||||
#define stfl_p(p, v) stfl_be_p(p, v)
|
#define stfl_p(p, v) stfl_be_p(p, v)
|
||||||
#define stfq_p(p, v) stfq_be_p(p, v)
|
#define stfq_p(p, v) stfq_be_p(p, v)
|
||||||
|
#define ldn_p(p, sz) ldn_be_p(p, sz)
|
||||||
|
#define stn_p(p, sz, v) stn_be_p(p, sz, v)
|
||||||
#else
|
#else
|
||||||
#define lduw_p(p) lduw_le_p(p)
|
#define lduw_p(p) lduw_le_p(p)
|
||||||
#define ldsw_p(p) ldsw_le_p(p)
|
#define ldsw_p(p) ldsw_le_p(p)
|
||||||
|
@ -145,6 +147,8 @@ static inline void tswap64s(uint64_t *s)
|
||||||
#define stq_p(p, v) stq_le_p(p, v)
|
#define stq_p(p, v) stq_le_p(p, v)
|
||||||
#define stfl_p(p, v) stfl_le_p(p, v)
|
#define stfl_p(p, v) stfl_le_p(p, v)
|
||||||
#define stfq_p(p, v) stfq_le_p(p, v)
|
#define stfq_p(p, v) stfq_le_p(p, v)
|
||||||
|
#define ldn_p(p, sz) ldn_le_p(p, sz)
|
||||||
|
#define stn_p(p, sz, v) stn_le_p(p, sz, v)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* MMU memory access macros */
|
/* MMU memory access macros */
|
||||||
|
|
|
@ -127,6 +127,15 @@ QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
||||||
* structs into one.)
|
* structs into one.)
|
||||||
*/
|
*/
|
||||||
typedef struct CPUIOTLBEntry {
|
typedef struct CPUIOTLBEntry {
|
||||||
|
/*
|
||||||
|
* @addr contains:
|
||||||
|
* - in the lower TARGET_PAGE_BITS, a physical section number
|
||||||
|
* - with the lower TARGET_PAGE_BITS masked off, an offset which
|
||||||
|
* must be added to the virtual address to obtain:
|
||||||
|
* + the ram_addr_t of the target RAM (if the physical section
|
||||||
|
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
|
||||||
|
* + the offset within the target MemoryRegion (otherwise)
|
||||||
|
*/
|
||||||
hwaddr addr;
|
hwaddr addr;
|
||||||
MemTxAttrs attrs;
|
MemTxAttrs attrs;
|
||||||
} CPUIOTLBEntry;
|
} CPUIOTLBEntry;
|
||||||
|
|
|
@ -437,7 +437,16 @@ void tb_lock_reset(void);
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
|
/**
|
||||||
|
* iotlb_to_section:
|
||||||
|
* @cpu: CPU performing the access
|
||||||
|
* @index: TCG CPU IOTLB entry
|
||||||
|
*
|
||||||
|
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
|
||||||
|
* it refers to. @index will have been initially created and returned
|
||||||
|
* by memory_region_section_get_iotlb().
|
||||||
|
*/
|
||||||
|
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
hwaddr index, MemTxAttrs attrs);
|
hwaddr index, MemTxAttrs attrs);
|
||||||
|
|
||||||
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
||||||
|
@ -469,7 +478,8 @@ void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
|
||||||
|
|
||||||
MemoryRegionSection *
|
MemoryRegionSection *
|
||||||
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
||||||
hwaddr *xlat, hwaddr *plen);
|
hwaddr *xlat, hwaddr *plen,
|
||||||
|
MemTxAttrs attrs, int *prot);
|
||||||
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||||
MemoryRegionSection *section,
|
MemoryRegionSection *section,
|
||||||
target_ulong vaddr,
|
target_ulong vaddr,
|
||||||
|
|
|
@ -98,18 +98,21 @@ struct IOMMUNotifier {
|
||||||
/* Notify for address space range start <= addr <= end */
|
/* Notify for address space range start <= addr <= end */
|
||||||
hwaddr start;
|
hwaddr start;
|
||||||
hwaddr end;
|
hwaddr end;
|
||||||
|
int iommu_idx;
|
||||||
QLIST_ENTRY(IOMMUNotifier) node;
|
QLIST_ENTRY(IOMMUNotifier) node;
|
||||||
};
|
};
|
||||||
typedef struct IOMMUNotifier IOMMUNotifier;
|
typedef struct IOMMUNotifier IOMMUNotifier;
|
||||||
|
|
||||||
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
|
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
|
||||||
IOMMUNotifierFlag flags,
|
IOMMUNotifierFlag flags,
|
||||||
hwaddr start, hwaddr end)
|
hwaddr start, hwaddr end,
|
||||||
|
int iommu_idx)
|
||||||
{
|
{
|
||||||
n->notify = fn;
|
n->notify = fn;
|
||||||
n->notifier_flags = flags;
|
n->notifier_flags = flags;
|
||||||
n->start = start;
|
n->start = start;
|
||||||
n->end = end;
|
n->end = end;
|
||||||
|
n->iommu_idx = iommu_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -206,6 +209,20 @@ enum IOMMUMemoryRegionAttr {
|
||||||
* to report whenever mappings are changed, by calling
|
* to report whenever mappings are changed, by calling
|
||||||
* memory_region_notify_iommu() (or, if necessary, by calling
|
* memory_region_notify_iommu() (or, if necessary, by calling
|
||||||
* memory_region_notify_one() for each registered notifier).
|
* memory_region_notify_one() for each registered notifier).
|
||||||
|
*
|
||||||
|
* Conceptually an IOMMU provides a mapping from input address
|
||||||
|
* to an output TLB entry. If the IOMMU is aware of memory transaction
|
||||||
|
* attributes and the output TLB entry depends on the transaction
|
||||||
|
* attributes, we represent this using IOMMU indexes. Each index
|
||||||
|
* selects a particular translation table that the IOMMU has:
|
||||||
|
* @attrs_to_index returns the IOMMU index for a set of transaction attributes
|
||||||
|
* @translate takes an input address and an IOMMU index
|
||||||
|
* and the mapping returned can only depend on the input address and the
|
||||||
|
* IOMMU index.
|
||||||
|
*
|
||||||
|
* Most IOMMUs don't care about the transaction attributes and support
|
||||||
|
* only a single IOMMU index. A more complex IOMMU might have one index
|
||||||
|
* for secure transactions and one for non-secure transactions.
|
||||||
*/
|
*/
|
||||||
typedef struct IOMMUMemoryRegionClass {
|
typedef struct IOMMUMemoryRegionClass {
|
||||||
/* private */
|
/* private */
|
||||||
|
@ -234,9 +251,10 @@ typedef struct IOMMUMemoryRegionClass {
|
||||||
* @iommu: the IOMMUMemoryRegion
|
* @iommu: the IOMMUMemoryRegion
|
||||||
* @hwaddr: address to be translated within the memory region
|
* @hwaddr: address to be translated within the memory region
|
||||||
* @flag: requested access permissions
|
* @flag: requested access permissions
|
||||||
|
* @iommu_idx: IOMMU index for the translation
|
||||||
*/
|
*/
|
||||||
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
|
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
|
||||||
IOMMUAccessFlags flag);
|
IOMMUAccessFlags flag, int iommu_idx);
|
||||||
/* Returns minimum supported page size in bytes.
|
/* Returns minimum supported page size in bytes.
|
||||||
* If this method is not provided then the minimum is assumed to
|
* If this method is not provided then the minimum is assumed to
|
||||||
* be TARGET_PAGE_SIZE.
|
* be TARGET_PAGE_SIZE.
|
||||||
|
@ -290,6 +308,29 @@ typedef struct IOMMUMemoryRegionClass {
|
||||||
*/
|
*/
|
||||||
int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
|
int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
|
||||||
void *data);
|
void *data);
|
||||||
|
|
||||||
|
/* Return the IOMMU index to use for a given set of transaction attributes.
|
||||||
|
*
|
||||||
|
* Optional method: if an IOMMU only supports a single IOMMU index then
|
||||||
|
* the default implementation of memory_region_iommu_attrs_to_index()
|
||||||
|
* will return 0.
|
||||||
|
*
|
||||||
|
* The indexes supported by an IOMMU must be contiguous, starting at 0.
|
||||||
|
*
|
||||||
|
* @iommu: the IOMMUMemoryRegion
|
||||||
|
* @attrs: memory transaction attributes
|
||||||
|
*/
|
||||||
|
int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
|
||||||
|
|
||||||
|
/* Return the number of IOMMU indexes this IOMMU supports.
|
||||||
|
*
|
||||||
|
* Optional method: if this method is not provided, then
|
||||||
|
* memory_region_iommu_num_indexes() will return 1, indicating that
|
||||||
|
* only a single IOMMU index is supported.
|
||||||
|
*
|
||||||
|
* @iommu: the IOMMUMemoryRegion
|
||||||
|
*/
|
||||||
|
int (*num_indexes)(IOMMUMemoryRegion *iommu);
|
||||||
} IOMMUMemoryRegionClass;
|
} IOMMUMemoryRegionClass;
|
||||||
|
|
||||||
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
||||||
|
@ -971,11 +1012,13 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
|
||||||
* should be notified with an UNMAP followed by a MAP.
|
* should be notified with an UNMAP followed by a MAP.
|
||||||
*
|
*
|
||||||
* @iommu_mr: the memory region that was changed
|
* @iommu_mr: the memory region that was changed
|
||||||
|
* @iommu_idx: the IOMMU index for the translation table which has changed
|
||||||
* @entry: the new entry in the IOMMU translation table. The entry
|
* @entry: the new entry in the IOMMU translation table. The entry
|
||||||
* replaces all old entries for the same virtual I/O address range.
|
* replaces all old entries for the same virtual I/O address range.
|
||||||
* Deleted entries have .@perm == 0.
|
* Deleted entries have .@perm == 0.
|
||||||
*/
|
*/
|
||||||
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
||||||
|
int iommu_idx,
|
||||||
IOMMUTLBEntry entry);
|
IOMMUTLBEntry entry);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1054,6 +1097,24 @@ int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
|
||||||
enum IOMMUMemoryRegionAttr attr,
|
enum IOMMUMemoryRegionAttr attr,
|
||||||
void *data);
|
void *data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* memory_region_iommu_attrs_to_index: return the IOMMU index to
|
||||||
|
* use for translations with the given memory transaction attributes.
|
||||||
|
*
|
||||||
|
* @iommu_mr: the memory region
|
||||||
|
* @attrs: the memory transaction attributes
|
||||||
|
*/
|
||||||
|
int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
|
||||||
|
MemTxAttrs attrs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* memory_region_iommu_num_indexes: return the total number of IOMMU
|
||||||
|
* indexes that this IOMMU supports.
|
||||||
|
*
|
||||||
|
* @iommu_mr: the memory region
|
||||||
|
*/
|
||||||
|
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* memory_region_name: get a memory region's name
|
* memory_region_name: get a memory region's name
|
||||||
*
|
*
|
||||||
|
|
|
@ -23,9 +23,6 @@ typedef enum {
|
||||||
ARM_ENDIANNESS_BE32,
|
ARM_ENDIANNESS_BE32,
|
||||||
} arm_endianness;
|
} arm_endianness;
|
||||||
|
|
||||||
/* armv7m.c */
|
|
||||||
DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
|
|
||||||
const char *kernel_filename, const char *cpu_type);
|
|
||||||
/**
|
/**
|
||||||
* armv7m_load_kernel:
|
* armv7m_load_kernel:
|
||||||
* @cpu: CPU
|
* @cpu: CPU
|
||||||
|
@ -33,9 +30,8 @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
|
||||||
* @mem_size: mem_size: maximum image size to load
|
* @mem_size: mem_size: maximum image size to load
|
||||||
*
|
*
|
||||||
* Load the guest image for an ARMv7M system. This must be called by
|
* Load the guest image for an ARMv7M system. This must be called by
|
||||||
* any ARMv7M board, either directly or via armv7m_init(). (This is
|
* any ARMv7M board. (This is necessary to ensure that the CPU resets
|
||||||
* necessary to ensure that the CPU resets correctly on system reset,
|
* correctly on system reset, as well as for kernel loading.)
|
||||||
* as well as for kernel loading.)
|
|
||||||
*/
|
*/
|
||||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
|
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,10 @@
|
||||||
|
|
||||||
#define TYPE_OR_IRQ "or-irq"
|
#define TYPE_OR_IRQ "or-irq"
|
||||||
|
|
||||||
#define MAX_OR_LINES 16
|
/* This can safely be increased if necessary without breaking
|
||||||
|
* migration compatibility (as long as it remains greater than 15).
|
||||||
|
*/
|
||||||
|
#define MAX_OR_LINES 32
|
||||||
|
|
||||||
typedef struct OrIRQState qemu_or_irq;
|
typedef struct OrIRQState qemu_or_irq;
|
||||||
|
|
||||||
|
|
|
@ -290,6 +290,15 @@ typedef union {
|
||||||
* For accessors that take a guest address rather than a
|
* For accessors that take a guest address rather than a
|
||||||
* host address, see the cpu_{ld,st}_* accessors defined in
|
* host address, see the cpu_{ld,st}_* accessors defined in
|
||||||
* cpu_ldst.h.
|
* cpu_ldst.h.
|
||||||
|
*
|
||||||
|
* For cases where the size to be used is not fixed at compile time,
|
||||||
|
* there are
|
||||||
|
* stn{endian}_p(ptr, sz, val)
|
||||||
|
* which stores @val to @ptr as an @endian-order number @sz bytes in size
|
||||||
|
* and
|
||||||
|
* ldn{endian}_p(ptr, sz)
|
||||||
|
* which loads @sz bytes from @ptr as an unsigned @endian-order number
|
||||||
|
* and returns it in a uint64_t.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline int ldub_p(const void *ptr)
|
static inline int ldub_p(const void *ptr)
|
||||||
|
@ -495,6 +504,49 @@ static inline unsigned long leul_to_cpu(unsigned long v)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Store v to p as a sz byte value in host order */
|
||||||
|
#define DO_STN_LDN_P(END) \
|
||||||
|
static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
|
||||||
|
{ \
|
||||||
|
switch (sz) { \
|
||||||
|
case 1: \
|
||||||
|
stb_p(ptr, v); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
stw_ ## END ## _p(ptr, v); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
stl_ ## END ## _p(ptr, v); \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
stq_ ## END ## _p(ptr, v); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
g_assert_not_reached(); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
|
||||||
|
{ \
|
||||||
|
switch (sz) { \
|
||||||
|
case 1: \
|
||||||
|
return ldub_p(ptr); \
|
||||||
|
case 2: \
|
||||||
|
return lduw_ ## END ## _p(ptr); \
|
||||||
|
case 4: \
|
||||||
|
return (uint32_t)ldl_ ## END ## _p(ptr); \
|
||||||
|
case 8: \
|
||||||
|
return ldq_ ## END ## _p(ptr); \
|
||||||
|
default: \
|
||||||
|
g_assert_not_reached(); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
DO_STN_LDN_P(he)
|
||||||
|
DO_STN_LDN_P(le)
|
||||||
|
DO_STN_LDN_P(be)
|
||||||
|
|
||||||
|
#undef DO_STN_LDN_P
|
||||||
|
|
||||||
#undef le_bswap
|
#undef le_bswap
|
||||||
#undef be_bswap
|
#undef be_bswap
|
||||||
#undef le_bswaps
|
#undef le_bswaps
|
||||||
|
|
|
@ -429,6 +429,9 @@ struct CPUState {
|
||||||
uint16_t pending_tlb_flush;
|
uint16_t pending_tlb_flush;
|
||||||
|
|
||||||
int hvf_fd;
|
int hvf_fd;
|
||||||
|
|
||||||
|
/* track IOMMUs whose translations we've cached in the TCG TLB */
|
||||||
|
GArray *iommu_notifiers;
|
||||||
};
|
};
|
||||||
|
|
||||||
QTAILQ_HEAD(CPUTailQ, CPUState);
|
QTAILQ_HEAD(CPUTailQ, CPUState);
|
||||||
|
|
31
memory.c
31
memory.c
|
@ -1799,6 +1799,9 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr,
|
||||||
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
||||||
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
|
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
|
||||||
assert(n->start <= n->end);
|
assert(n->start <= n->end);
|
||||||
|
assert(n->iommu_idx >= 0 &&
|
||||||
|
n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
|
||||||
|
|
||||||
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
|
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
|
||||||
memory_region_update_iommu_notify_flags(iommu_mr);
|
memory_region_update_iommu_notify_flags(iommu_mr);
|
||||||
}
|
}
|
||||||
|
@ -1829,7 +1832,7 @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
|
||||||
granularity = memory_region_iommu_get_min_page_size(iommu_mr);
|
granularity = memory_region_iommu_get_min_page_size(iommu_mr);
|
||||||
|
|
||||||
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
|
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
|
||||||
iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
|
iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
|
||||||
if (iotlb.perm != IOMMU_NONE) {
|
if (iotlb.perm != IOMMU_NONE) {
|
||||||
n->notify(n, &iotlb);
|
n->notify(n, &iotlb);
|
||||||
}
|
}
|
||||||
|
@ -1891,6 +1894,7 @@ void memory_region_notify_one(IOMMUNotifier *notifier,
|
||||||
}
|
}
|
||||||
|
|
||||||
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
||||||
|
int iommu_idx,
|
||||||
IOMMUTLBEntry entry)
|
IOMMUTLBEntry entry)
|
||||||
{
|
{
|
||||||
IOMMUNotifier *iommu_notifier;
|
IOMMUNotifier *iommu_notifier;
|
||||||
|
@ -1898,8 +1902,10 @@ void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
|
||||||
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
|
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
|
||||||
|
|
||||||
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
|
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
|
||||||
|
if (iommu_notifier->iommu_idx == iommu_idx) {
|
||||||
memory_region_notify_one(iommu_notifier, &entry);
|
memory_region_notify_one(iommu_notifier, &entry);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
|
int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
|
||||||
|
@ -1915,6 +1921,29 @@ int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
|
||||||
return imrc->get_attr(iommu_mr, attr, data);
|
return imrc->get_attr(iommu_mr, attr, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
|
||||||
|
MemTxAttrs attrs)
|
||||||
|
{
|
||||||
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
||||||
|
|
||||||
|
if (!imrc->attrs_to_index) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return imrc->attrs_to_index(iommu_mr, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
|
||||||
|
{
|
||||||
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
||||||
|
|
||||||
|
if (!imrc->num_indexes) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return imrc->num_indexes(iommu_mr);
|
||||||
|
}
|
||||||
|
|
||||||
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
||||||
{
|
{
|
||||||
uint8_t mask = 1 << client;
|
uint8_t mask = 1 << client;
|
||||||
|
|
|
@ -767,6 +767,24 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
/* The NVIC and M-profile CPU are two halves of a single piece of
|
||||||
|
* hardware; trying to use one without the other is a command line
|
||||||
|
* error and will result in segfaults if not caught here.
|
||||||
|
*/
|
||||||
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
if (!env->nvic) {
|
||||||
|
error_setg(errp, "This board cannot be used with Cortex-M CPUs");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (env->nvic) {
|
||||||
|
error_setg(errp, "This board can only be used with Cortex-M CPUs");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
cpu_exec_realizefn(cs, &local_err);
|
cpu_exec_realizefn(cs, &local_err);
|
||||||
if (local_err != NULL) {
|
if (local_err != NULL) {
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
|
|
|
@ -195,6 +195,15 @@ DEF_HELPER_FLAGS_5(sve_lsl_zpzz_s, TCG_CALL_NO_RWG,
|
||||||
DEF_HELPER_FLAGS_5(sve_lsl_zpzz_d, TCG_CALL_NO_RWG,
|
DEF_HELPER_FLAGS_5(sve_lsl_zpzz_d, TCG_CALL_NO_RWG,
|
||||||
void, ptr, ptr, ptr, ptr, i32)
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_sel_zpzz_b, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_sel_zpzz_h, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
|
DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
|
||||||
void, ptr, ptr, ptr, ptr, i32)
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
|
DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
|
||||||
|
@ -416,6 +425,230 @@ DEF_HELPER_FLAGS_4(sve_cpy_z_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
DEF_HELPER_FLAGS_4(sve_ext, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
DEF_HELPER_FLAGS_4(sve_ext, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_insr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_insr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_insr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_insr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_3(sve_rev_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_rev_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_rev_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_rev_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_3(sve_uunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_uunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_uunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_zip_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_uzp_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_trn_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_rev_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_3(sve_punpk_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_2(sve_last_active_element, TCG_CALL_NO_RWG, s32, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_d, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmple_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_b, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmple_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_h, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmple_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_s, TCG_CALL_NO_RWG,
|
||||||
|
i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmple_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmple_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmple_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmple_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
@ -425,3 +658,64 @@ DEF_HELPER_FLAGS_5(sve_orn_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
DEF_HELPER_FLAGS_5(sve_nor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
DEF_HELPER_FLAGS_5(sve_nor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
DEF_HELPER_FLAGS_5(sve_nand_pppp, TCG_CALL_NO_RWG,
|
DEF_HELPER_FLAGS_5(sve_nand_pppp, TCG_CALL_NO_RWG,
|
||||||
void, ptr, ptr, ptr, ptr, i32)
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(sve_brkpa, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_brkpb, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_brkpas, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(sve_brkpbs, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brka_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkb_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brka_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkb_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkas_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkbs_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkas_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkbs_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_subri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_smini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
|
@ -601,6 +601,25 @@ DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
|
||||||
DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
|
DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
|
||||||
void, ptr, ptr, ptr, ptr, i32)
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
|
||||||
|
void, ptr, ptr, ptr, ptr, i32)
|
||||||
|
|
||||||
#ifdef TARGET_AARCH64
|
#ifdef TARGET_AARCH64
|
||||||
#include "helper-a64.h"
|
#include "helper-a64.h"
|
||||||
#include "helper-sve.h"
|
#include "helper-sve.h"
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
%imm4_16_p1 16:4 !function=plus1
|
%imm4_16_p1 16:4 !function=plus1
|
||||||
%imm6_22_5 22:1 5:5
|
%imm6_22_5 22:1 5:5
|
||||||
|
%imm7_22_16 22:2 16:5
|
||||||
%imm8_16_10 16:5 10:3
|
%imm8_16_10 16:5 10:3
|
||||||
%imm9_16_10 16:s6 10:3
|
%imm9_16_10 16:s6 10:3
|
||||||
|
|
||||||
|
@ -41,6 +42,8 @@
|
||||||
|
|
||||||
# Signed 8-bit immediate, optionally shifted left by 8.
|
# Signed 8-bit immediate, optionally shifted left by 8.
|
||||||
%sh8_i8s 5:9 !function=expand_imm_sh8s
|
%sh8_i8s 5:9 !function=expand_imm_sh8s
|
||||||
|
# Unsigned 8-bit immediate, optionally shifted left by 8.
|
||||||
|
%sh8_i8u 5:9 !function=expand_imm_sh8u
|
||||||
|
|
||||||
# Either a copy of rd (at bit 0), or a different source
|
# Either a copy of rd (at bit 0), or a different source
|
||||||
# as propagated via the MOVPRFX instruction.
|
# as propagated via the MOVPRFX instruction.
|
||||||
|
@ -58,6 +61,7 @@
|
||||||
&rri_esz rd rn imm esz
|
&rri_esz rd rn imm esz
|
||||||
&rrr_esz rd rn rm esz
|
&rrr_esz rd rn rm esz
|
||||||
&rpr_esz rd pg rn esz
|
&rpr_esz rd pg rn esz
|
||||||
|
&rpr_s rd pg rn s
|
||||||
&rprr_s rd pg rn rm s
|
&rprr_s rd pg rn rm s
|
||||||
&rprr_esz rd pg rn rm esz
|
&rprr_esz rd pg rn rm esz
|
||||||
&rprrr_esz rd pg rn rm ra esz
|
&rprrr_esz rd pg rn rm ra esz
|
||||||
|
@ -65,6 +69,8 @@
|
||||||
&ptrue rd esz pat s
|
&ptrue rd esz pat s
|
||||||
&incdec_cnt rd pat esz imm d u
|
&incdec_cnt rd pat esz imm d u
|
||||||
&incdec2_cnt rd rn pat esz imm d u
|
&incdec2_cnt rd rn pat esz imm d u
|
||||||
|
&incdec_pred rd pg esz d u
|
||||||
|
&incdec2_pred rd rn pg esz d u
|
||||||
|
|
||||||
###########################################################################
|
###########################################################################
|
||||||
# Named instruction formats. These are generally used to
|
# Named instruction formats. These are generally used to
|
||||||
|
@ -77,6 +83,9 @@
|
||||||
@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
|
@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
|
||||||
@rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz
|
@rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz
|
||||||
|
|
||||||
|
# Two operand with governing predicate, flags setting
|
||||||
|
@pd_pg_pn_s ........ . s:1 ...... .. pg:4 . rn:4 . rd:4 &rpr_s
|
||||||
|
|
||||||
# Three operand with unused vector element size
|
# Three operand with unused vector element size
|
||||||
@rd_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 &rrr_esz esz=0
|
@rd_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 &rrr_esz esz=0
|
||||||
|
|
||||||
|
@ -85,6 +94,15 @@
|
||||||
|
|
||||||
# Three operand, vector element size
|
# Three operand, vector element size
|
||||||
@rd_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 &rrr_esz
|
@rd_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 &rrr_esz
|
||||||
|
@pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz
|
||||||
|
@rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \
|
||||||
|
&rrr_esz rn=%reg_movprfx
|
||||||
|
@rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \
|
||||||
|
&rri_esz rn=%reg_movprfx imm=%sh8_i8u
|
||||||
|
@rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \
|
||||||
|
&rri_esz rn=%reg_movprfx
|
||||||
|
@rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \
|
||||||
|
&rri_esz rn=%reg_movprfx
|
||||||
|
|
||||||
# Three operand with "memory" size, aka immediate left shift
|
# Three operand with "memory" size, aka immediate left shift
|
||||||
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
|
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
|
||||||
|
@ -94,6 +112,8 @@
|
||||||
&rprr_esz rn=%reg_movprfx
|
&rprr_esz rn=%reg_movprfx
|
||||||
@rdm_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 \
|
@rdm_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 \
|
||||||
&rprr_esz rm=%reg_movprfx
|
&rprr_esz rm=%reg_movprfx
|
||||||
|
@rd_pg4_rn_rm ........ esz:2 . rm:5 .. pg:4 rn:5 rd:5 &rprr_esz
|
||||||
|
@pd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 . rd:4 &rprr_esz
|
||||||
|
|
||||||
# Three register operand, with governing predicate, vector element size
|
# Three register operand, with governing predicate, vector element size
|
||||||
@rda_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 \
|
@rda_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 \
|
||||||
|
@ -103,6 +123,7 @@
|
||||||
|
|
||||||
# One register operand, with governing predicate, vector element size
|
# One register operand, with governing predicate, vector element size
|
||||||
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
|
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
|
||||||
|
@rd_pg4_pn ........ esz:2 ... ... .. pg:4 . rn:4 rd:5 &rpr_esz
|
||||||
|
|
||||||
# Two register operands with a 6-bit signed immediate.
|
# Two register operands with a 6-bit signed immediate.
|
||||||
@rd_rn_i6 ........ ... rn:5 ..... imm:s6 rd:5 &rri
|
@rd_rn_i6 ........ ... rn:5 ..... imm:s6 rd:5 &rri
|
||||||
|
@ -125,6 +146,11 @@
|
||||||
@rdn_dbm ........ .. .... dbm:13 rd:5 \
|
@rdn_dbm ........ .. .... dbm:13 rd:5 \
|
||||||
&rr_dbm rn=%reg_movprfx
|
&rr_dbm rn=%reg_movprfx
|
||||||
|
|
||||||
|
# Predicate output, vector and immediate input,
|
||||||
|
# controlling predicate, element size.
|
||||||
|
@pd_pg_rn_i7 ........ esz:2 . imm:7 . pg:3 rn:5 . rd:4 &rpri_esz
|
||||||
|
@pd_pg_rn_i5 ........ esz:2 . imm:s5 ... pg:3 rn:5 . rd:4 &rpri_esz
|
||||||
|
|
||||||
# Basic Load/Store with 9-bit immediate offset
|
# Basic Load/Store with 9-bit immediate offset
|
||||||
@pd_rn_i9 ........ ........ ...... rn:5 . rd:4 \
|
@pd_rn_i9 ........ ........ ...... rn:5 . rd:4 \
|
||||||
&rri imm=%imm9_16_10
|
&rri imm=%imm9_16_10
|
||||||
|
@ -138,6 +164,12 @@
|
||||||
@incdec2_cnt ........ esz:2 .. .... ...... pat:5 rd:5 \
|
@incdec2_cnt ........ esz:2 .. .... ...... pat:5 rd:5 \
|
||||||
&incdec2_cnt imm=%imm4_16_p1 rn=%reg_movprfx
|
&incdec2_cnt imm=%imm4_16_p1 rn=%reg_movprfx
|
||||||
|
|
||||||
|
# One register, predicate.
|
||||||
|
# User must fill in U and D.
|
||||||
|
@incdec_pred ........ esz:2 .... .. ..... .. pg:4 rd:5 &incdec_pred
|
||||||
|
@incdec2_pred ........ esz:2 .... .. ..... .. pg:4 rd:5 \
|
||||||
|
&incdec2_pred rn=%reg_movprfx
|
||||||
|
|
||||||
###########################################################################
|
###########################################################################
|
||||||
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
|
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
|
||||||
|
|
||||||
|
@ -369,6 +401,145 @@ CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s
|
||||||
EXT 00000101 001 ..... 000 ... rm:5 rd:5 \
|
EXT 00000101 001 ..... 000 ... rm:5 rd:5 \
|
||||||
&rrri rn=%reg_movprfx imm=%imm8_16_10
|
&rrri rn=%reg_movprfx imm=%imm8_16_10
|
||||||
|
|
||||||
|
### SVE Permute - Unpredicated Group
|
||||||
|
|
||||||
|
# SVE broadcast general register
|
||||||
|
DUP_s 00000101 .. 1 00000 001110 ..... ..... @rd_rn
|
||||||
|
|
||||||
|
# SVE broadcast indexed element
|
||||||
|
DUP_x 00000101 .. 1 ..... 001000 rn:5 rd:5 \
|
||||||
|
&rri imm=%imm7_22_16
|
||||||
|
|
||||||
|
# SVE insert SIMD&FP scalar register
|
||||||
|
INSR_f 00000101 .. 1 10100 001110 ..... ..... @rdn_rm
|
||||||
|
|
||||||
|
# SVE insert general register
|
||||||
|
INSR_r 00000101 .. 1 00100 001110 ..... ..... @rdn_rm
|
||||||
|
|
||||||
|
# SVE reverse vector elements
|
||||||
|
REV_v 00000101 .. 1 11000 001110 ..... ..... @rd_rn
|
||||||
|
|
||||||
|
# SVE vector table lookup
|
||||||
|
TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm
|
||||||
|
|
||||||
|
# SVE unpack vector elements
|
||||||
|
UNPK 00000101 esz:2 1100 u:1 h:1 001110 rn:5 rd:5
|
||||||
|
|
||||||
|
### SVE Permute - Predicates Group
|
||||||
|
|
||||||
|
# SVE permute predicate elements
|
||||||
|
ZIP1_p 00000101 .. 10 .... 010 000 0 .... 0 .... @pd_pn_pm
|
||||||
|
ZIP2_p 00000101 .. 10 .... 010 001 0 .... 0 .... @pd_pn_pm
|
||||||
|
UZP1_p 00000101 .. 10 .... 010 010 0 .... 0 .... @pd_pn_pm
|
||||||
|
UZP2_p 00000101 .. 10 .... 010 011 0 .... 0 .... @pd_pn_pm
|
||||||
|
TRN1_p 00000101 .. 10 .... 010 100 0 .... 0 .... @pd_pn_pm
|
||||||
|
TRN2_p 00000101 .. 10 .... 010 101 0 .... 0 .... @pd_pn_pm
|
||||||
|
|
||||||
|
# SVE reverse predicate elements
|
||||||
|
REV_p 00000101 .. 11 0100 010 000 0 .... 0 .... @pd_pn
|
||||||
|
|
||||||
|
# SVE unpack predicate elements
|
||||||
|
PUNPKLO 00000101 00 11 0000 010 000 0 .... 0 .... @pd_pn_e0
|
||||||
|
PUNPKHI 00000101 00 11 0001 010 000 0 .... 0 .... @pd_pn_e0
|
||||||
|
|
||||||
|
### SVE Permute - Interleaving Group
|
||||||
|
|
||||||
|
# SVE permute vector elements
|
||||||
|
ZIP1_z 00000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
|
||||||
|
ZIP2_z 00000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
|
||||||
|
UZP1_z 00000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
|
||||||
|
UZP2_z 00000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
|
||||||
|
TRN1_z 00000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
|
||||||
|
TRN2_z 00000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
|
||||||
|
|
||||||
|
### SVE Permute - Predicated Group
|
||||||
|
|
||||||
|
# SVE compress active elements
|
||||||
|
# Note esz >= 2
|
||||||
|
COMPACT 00000101 .. 100001 100 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE conditionally broadcast element to vector
|
||||||
|
CLASTA_z 00000101 .. 10100 0 100 ... ..... ..... @rdn_pg_rm
|
||||||
|
CLASTB_z 00000101 .. 10100 1 100 ... ..... ..... @rdn_pg_rm
|
||||||
|
|
||||||
|
# SVE conditionally copy element to SIMD&FP scalar
|
||||||
|
CLASTA_v 00000101 .. 10101 0 100 ... ..... ..... @rd_pg_rn
|
||||||
|
CLASTB_v 00000101 .. 10101 1 100 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE conditionally copy element to general register
|
||||||
|
CLASTA_r 00000101 .. 11000 0 101 ... ..... ..... @rd_pg_rn
|
||||||
|
CLASTB_r 00000101 .. 11000 1 101 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE copy element to SIMD&FP scalar register
|
||||||
|
LASTA_v 00000101 .. 10001 0 100 ... ..... ..... @rd_pg_rn
|
||||||
|
LASTB_v 00000101 .. 10001 1 100 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE copy element to general register
|
||||||
|
LASTA_r 00000101 .. 10000 0 101 ... ..... ..... @rd_pg_rn
|
||||||
|
LASTB_r 00000101 .. 10000 1 101 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE copy element from SIMD&FP scalar register
|
||||||
|
CPY_m_v 00000101 .. 100000 100 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE copy element from general register to vector (predicated)
|
||||||
|
CPY_m_r 00000101 .. 101000 101 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE reverse within elements
|
||||||
|
# Note esz >= operation size
|
||||||
|
REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
|
||||||
|
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
|
||||||
|
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
|
||||||
|
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
|
||||||
|
|
||||||
|
# SVE vector splice (predicated)
|
||||||
|
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
|
||||||
|
|
||||||
|
### SVE Select Vectors Group
|
||||||
|
|
||||||
|
# SVE select vector elements (predicated)
|
||||||
|
SEL_zpzz 00000101 .. 1 ..... 11 .... ..... ..... @rd_pg4_rn_rm
|
||||||
|
|
||||||
|
### SVE Integer Compare - Vectors Group
|
||||||
|
|
||||||
|
# SVE integer compare_vectors
|
||||||
|
CMPHS_ppzz 00100100 .. 0 ..... 000 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPHI_ppzz 00100100 .. 0 ..... 000 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPGE_ppzz 00100100 .. 0 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPGT_ppzz 00100100 .. 0 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPEQ_ppzz 00100100 .. 0 ..... 101 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPNE_ppzz 00100100 .. 0 ..... 101 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
|
||||||
|
# SVE integer compare with wide elements
|
||||||
|
# Note these require esz != 3.
|
||||||
|
CMPEQ_ppzw 00100100 .. 0 ..... 001 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPNE_ppzw 00100100 .. 0 ..... 001 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPGE_ppzw 00100100 .. 0 ..... 010 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPGT_ppzw 00100100 .. 0 ..... 010 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPLT_ppzw 00100100 .. 0 ..... 011 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPLE_ppzw 00100100 .. 0 ..... 011 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPHS_ppzw 00100100 .. 0 ..... 110 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPHI_ppzw 00100100 .. 0 ..... 110 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
CMPLO_ppzw 00100100 .. 0 ..... 111 ... ..... 0 .... @pd_pg_rn_rm
|
||||||
|
CMPLS_ppzw 00100100 .. 0 ..... 111 ... ..... 1 .... @pd_pg_rn_rm
|
||||||
|
|
||||||
|
### SVE Integer Compare - Unsigned Immediate Group
|
||||||
|
|
||||||
|
# SVE integer compare with unsigned immediate
|
||||||
|
CMPHS_ppzi 00100100 .. 1 ....... 0 ... ..... 0 .... @pd_pg_rn_i7
|
||||||
|
CMPHI_ppzi 00100100 .. 1 ....... 0 ... ..... 1 .... @pd_pg_rn_i7
|
||||||
|
CMPLO_ppzi 00100100 .. 1 ....... 1 ... ..... 0 .... @pd_pg_rn_i7
|
||||||
|
CMPLS_ppzi 00100100 .. 1 ....... 1 ... ..... 1 .... @pd_pg_rn_i7
|
||||||
|
|
||||||
|
### SVE Integer Compare - Signed Immediate Group
|
||||||
|
|
||||||
|
# SVE integer compare with signed immediate
|
||||||
|
CMPGE_ppzi 00100101 .. 0 ..... 000 ... ..... 0 .... @pd_pg_rn_i5
|
||||||
|
CMPGT_ppzi 00100101 .. 0 ..... 000 ... ..... 1 .... @pd_pg_rn_i5
|
||||||
|
CMPLT_ppzi 00100101 .. 0 ..... 001 ... ..... 0 .... @pd_pg_rn_i5
|
||||||
|
CMPLE_ppzi 00100101 .. 0 ..... 001 ... ..... 1 .... @pd_pg_rn_i5
|
||||||
|
CMPEQ_ppzi 00100101 .. 0 ..... 100 ... ..... 0 .... @pd_pg_rn_i5
|
||||||
|
CMPNE_ppzi 00100101 .. 0 ..... 100 ... ..... 1 .... @pd_pg_rn_i5
|
||||||
|
|
||||||
### SVE Predicate Logical Operations Group
|
### SVE Predicate Logical Operations Group
|
||||||
|
|
||||||
# SVE predicate logical operations
|
# SVE predicate logical operations
|
||||||
|
@ -410,6 +581,83 @@ PFIRST 00100101 01 011 000 11000 00 .... 0 .... @pd_pn_e0
|
||||||
# SVE predicate next active
|
# SVE predicate next active
|
||||||
PNEXT 00100101 .. 011 001 11000 10 .... 0 .... @pd_pn
|
PNEXT 00100101 .. 011 001 11000 10 .... 0 .... @pd_pn
|
||||||
|
|
||||||
|
### SVE Partition Break Group
|
||||||
|
|
||||||
|
# SVE propagate break from previous partition
|
||||||
|
BRKPA 00100101 0. 00 .... 11 .... 0 .... 0 .... @pd_pg_pn_pm_s
|
||||||
|
BRKPB 00100101 0. 00 .... 11 .... 0 .... 1 .... @pd_pg_pn_pm_s
|
||||||
|
|
||||||
|
# SVE partition break condition
|
||||||
|
BRKA_z 00100101 0. 01000001 .... 0 .... 0 .... @pd_pg_pn_s
|
||||||
|
BRKB_z 00100101 1. 01000001 .... 0 .... 0 .... @pd_pg_pn_s
|
||||||
|
BRKA_m 00100101 0. 01000001 .... 0 .... 1 .... @pd_pg_pn_s
|
||||||
|
BRKB_m 00100101 1. 01000001 .... 0 .... 1 .... @pd_pg_pn_s
|
||||||
|
|
||||||
|
# SVE propagate break to next partition
|
||||||
|
BRKN 00100101 0. 01100001 .... 0 .... 0 .... @pd_pg_pn_s
|
||||||
|
|
||||||
|
### SVE Predicate Count Group
|
||||||
|
|
||||||
|
# SVE predicate count
|
||||||
|
CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn
|
||||||
|
|
||||||
|
# SVE inc/dec register by predicate count
|
||||||
|
INCDECP_r 00100101 .. 10110 d:1 10001 00 .... ..... @incdec_pred u=1
|
||||||
|
|
||||||
|
# SVE inc/dec vector by predicate count
|
||||||
|
INCDECP_z 00100101 .. 10110 d:1 10000 00 .... ..... @incdec2_pred u=1
|
||||||
|
|
||||||
|
# SVE saturating inc/dec register by predicate count
|
||||||
|
SINCDECP_r_32 00100101 .. 1010 d:1 u:1 10001 00 .... ..... @incdec_pred
|
||||||
|
SINCDECP_r_64 00100101 .. 1010 d:1 u:1 10001 10 .... ..... @incdec_pred
|
||||||
|
|
||||||
|
# SVE saturating inc/dec vector by predicate count
|
||||||
|
SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred
|
||||||
|
|
||||||
|
### SVE Integer Compare - Scalars Group
|
||||||
|
|
||||||
|
# SVE conditionally terminate scalars
|
||||||
|
CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
|
||||||
|
|
||||||
|
# SVE integer compare scalar count and limit
|
||||||
|
WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4
|
||||||
|
|
||||||
|
### SVE Integer Wide Immediate - Unpredicated Group
|
||||||
|
|
||||||
|
# SVE broadcast floating-point immediate (unpredicated)
|
||||||
|
FDUP 00100101 esz:2 111 00 1110 imm:8 rd:5
|
||||||
|
|
||||||
|
# SVE broadcast integer immediate (unpredicated)
|
||||||
|
DUP_i 00100101 esz:2 111 00 011 . ........ rd:5 imm=%sh8_i8s
|
||||||
|
|
||||||
|
# SVE integer add/subtract immediate (unpredicated)
|
||||||
|
ADD_zzi 00100101 .. 100 000 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
SUB_zzi 00100101 .. 100 001 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
SUBR_zzi 00100101 .. 100 011 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
SQADD_zzi 00100101 .. 100 100 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
UQADD_zzi 00100101 .. 100 101 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
SQSUB_zzi 00100101 .. 100 110 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
UQSUB_zzi 00100101 .. 100 111 11 . ........ ..... @rdn_sh_i8u
|
||||||
|
|
||||||
|
# SVE integer min/max immediate (unpredicated)
|
||||||
|
SMAX_zzi 00100101 .. 101 000 110 ........ ..... @rdn_i8s
|
||||||
|
UMAX_zzi 00100101 .. 101 001 110 ........ ..... @rdn_i8u
|
||||||
|
SMIN_zzi 00100101 .. 101 010 110 ........ ..... @rdn_i8s
|
||||||
|
UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
|
||||||
|
|
||||||
|
# SVE integer multiply immediate (unpredicated)
|
||||||
|
MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
|
||||||
|
|
||||||
|
### SVE Floating Point Arithmetic - Unpredicated Group
|
||||||
|
|
||||||
|
# SVE floating-point arithmetic (unpredicated)
|
||||||
|
FADD_zzz 01100101 .. 0 ..... 000 000 ..... ..... @rd_rn_rm
|
||||||
|
FSUB_zzz 01100101 .. 0 ..... 000 001 ..... ..... @rd_rn_rm
|
||||||
|
FMUL_zzz 01100101 .. 0 ..... 000 010 ..... ..... @rd_rn_rm
|
||||||
|
FTSMUL 01100101 .. 0 ..... 000 011 ..... ..... @rd_rn_rm
|
||||||
|
FRECPS 01100101 .. 0 ..... 000 110 ..... ..... @rd_rn_rm
|
||||||
|
FRSQRTS 01100101 .. 0 ..... 000 111 ..... ..... @rd_rn_rm
|
||||||
|
|
||||||
### SVE Memory - 32-bit Gather and Unsized Contiguous Group
|
### SVE Memory - 32-bit Gather and Unsized Contiguous Group
|
||||||
|
|
||||||
# SVE load predicate register
|
# SVE load predicate register
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -67,18 +67,26 @@ static inline void assert_fp_access_checked(DisasContext *s)
|
||||||
static inline int vec_reg_offset(DisasContext *s, int regno,
|
static inline int vec_reg_offset(DisasContext *s, int regno,
|
||||||
int element, TCGMemOp size)
|
int element, TCGMemOp size)
|
||||||
{
|
{
|
||||||
int offs = 0;
|
int element_size = 1 << size;
|
||||||
|
int offs = element * element_size;
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
/* This is complicated slightly because vfp.zregs[n].d[0] is
|
/* This is complicated slightly because vfp.zregs[n].d[0] is
|
||||||
* still the low half and vfp.zregs[n].d[1] the high half
|
* still the lowest and vfp.zregs[n].d[15] the highest of the
|
||||||
* of the 128 bit vector, even on big endian systems.
|
* 256 byte vector, even on big endian systems.
|
||||||
* Calculate the offset assuming a fully bigendian 128 bits,
|
*
|
||||||
* then XOR to account for the order of the two 64 bit halves.
|
* Calculate the offset assuming fully little-endian,
|
||||||
|
* then XOR to account for the order of the 8-byte units.
|
||||||
|
*
|
||||||
|
* For 16 byte elements, the two 8 byte halves will not form a
|
||||||
|
* host int128 if the host is bigendian, since they're in the
|
||||||
|
* wrong order. However the only 16 byte operation we have is
|
||||||
|
* a move, so we can ignore this for the moment. More complicated
|
||||||
|
* operations will have to special case loading and storing from
|
||||||
|
* the zregs array.
|
||||||
*/
|
*/
|
||||||
offs += (16 - ((element + 1) * (1 << size)));
|
if (element_size < 8) {
|
||||||
offs ^= 8;
|
offs ^= 8 - element_size;
|
||||||
#else
|
}
|
||||||
offs += element * (1 << size);
|
|
||||||
#endif
|
#endif
|
||||||
offs += offsetof(CPUARMState, vfp.zregs[regno]);
|
offs += offsetof(CPUARMState, vfp.zregs[regno]);
|
||||||
assert_fp_access_checked(s);
|
assert_fp_access_checked(s);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9965,7 +9965,8 @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
|
||||||
* end up actually treating this as two 16-bit insns, though,
|
* end up actually treating this as two 16-bit insns, though,
|
||||||
* if it's half of a bl/blx pair that might span a page boundary.
|
* if it's half of a bl/blx pair that might span a page boundary.
|
||||||
*/
|
*/
|
||||||
if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
|
if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
|
||||||
|
arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||||
/* Thumb2 cores (including all M profile ones) always treat
|
/* Thumb2 cores (including all M profile ones) always treat
|
||||||
* 32-bit insns as 32-bit.
|
* 32-bit insns as 32-bit.
|
||||||
*/
|
*/
|
||||||
|
@ -10085,10 +10086,38 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
||||||
int conds;
|
int conds;
|
||||||
int logic_cc;
|
int logic_cc;
|
||||||
|
|
||||||
/* The only 32 bit insn that's allowed for Thumb1 is the combined
|
/*
|
||||||
* BL/BLX prefix and suffix.
|
* ARMv6-M supports a limited subset of Thumb2 instructions.
|
||||||
|
* Other Thumb1 architectures allow only 32-bit
|
||||||
|
* combined BL/BLX prefix and suffix.
|
||||||
*/
|
*/
|
||||||
if ((insn & 0xf800e800) != 0xf000e800) {
|
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
||||||
|
!arm_dc_feature(s, ARM_FEATURE_V7)) {
|
||||||
|
int i;
|
||||||
|
bool found = false;
|
||||||
|
const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
|
||||||
|
0xf3b08040 /* dsb */,
|
||||||
|
0xf3b08050 /* dmb */,
|
||||||
|
0xf3b08060 /* isb */,
|
||||||
|
0xf3e08000 /* mrs */,
|
||||||
|
0xf000d000 /* bl */};
|
||||||
|
const uint32_t armv6m_mask[] = {0xffe0d000,
|
||||||
|
0xfff0d0f0,
|
||||||
|
0xfff0d0f0,
|
||||||
|
0xfff0d0f0,
|
||||||
|
0xffe0d000,
|
||||||
|
0xf800d000};
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
|
||||||
|
if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!found) {
|
||||||
|
goto illegal_op;
|
||||||
|
}
|
||||||
|
} else if ((insn & 0xf800e800) != 0xf000e800) {
|
||||||
ARCH(6T2);
|
ARCH(6T2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11009,7 +11038,11 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 3: /* Special control operations. */
|
case 3: /* Special control operations. */
|
||||||
ARCH(7);
|
if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
|
||||||
|
!(arm_dc_feature(s, ARM_FEATURE_V6) &&
|
||||||
|
arm_dc_feature(s, ARM_FEATURE_M))) {
|
||||||
|
goto illegal_op;
|
||||||
|
}
|
||||||
op = (insn >> 4) & 0xf;
|
op = (insn >> 4) & 0xf;
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case 2: /* clrex */
|
case 2: /* clrex */
|
||||||
|
|
|
@ -426,3 +426,72 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
|
||||||
}
|
}
|
||||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Floating-point trigonometric starting value.
|
||||||
|
* See the ARM ARM pseudocode function FPTrigSMul.
|
||||||
|
*/
|
||||||
|
static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat)
|
||||||
|
{
|
||||||
|
float16 result = float16_mul(op1, op1, stat);
|
||||||
|
if (!float16_is_any_nan(result)) {
|
||||||
|
result = float16_set_sign(result, op2 & 1);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat)
|
||||||
|
{
|
||||||
|
float32 result = float32_mul(op1, op1, stat);
|
||||||
|
if (!float32_is_any_nan(result)) {
|
||||||
|
result = float32_set_sign(result, op2 & 1);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat)
|
||||||
|
{
|
||||||
|
float64 result = float64_mul(op1, op1, stat);
|
||||||
|
if (!float64_is_any_nan(result)) {
|
||||||
|
result = float64_set_sign(result, op2 & 1);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DO_3OP(NAME, FUNC, TYPE) \
|
||||||
|
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
|
||||||
|
{ \
|
||||||
|
intptr_t i, oprsz = simd_oprsz(desc); \
|
||||||
|
TYPE *d = vd, *n = vn, *m = vm; \
|
||||||
|
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
|
||||||
|
d[i] = FUNC(n[i], m[i], stat); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
DO_3OP(gvec_fadd_h, float16_add, float16)
|
||||||
|
DO_3OP(gvec_fadd_s, float32_add, float32)
|
||||||
|
DO_3OP(gvec_fadd_d, float64_add, float64)
|
||||||
|
|
||||||
|
DO_3OP(gvec_fsub_h, float16_sub, float16)
|
||||||
|
DO_3OP(gvec_fsub_s, float32_sub, float32)
|
||||||
|
DO_3OP(gvec_fsub_d, float64_sub, float64)
|
||||||
|
|
||||||
|
DO_3OP(gvec_fmul_h, float16_mul, float16)
|
||||||
|
DO_3OP(gvec_fmul_s, float32_mul, float32)
|
||||||
|
DO_3OP(gvec_fmul_d, float64_mul, float64)
|
||||||
|
|
||||||
|
DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16)
|
||||||
|
DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32)
|
||||||
|
DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
|
||||||
|
|
||||||
|
#ifdef TARGET_AARCH64
|
||||||
|
|
||||||
|
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
|
||||||
|
DO_3OP(gvec_recps_s, helper_recpsf_f32, float32)
|
||||||
|
DO_3OP(gvec_recps_d, helper_recpsf_f64, float64)
|
||||||
|
|
||||||
|
DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
|
||||||
|
DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
|
||||||
|
DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
#undef DO_3OP
|
||||||
|
|
Loading…
Reference in New Issue