mirror of https://github.com/xqemu/xqemu.git
target-arm queue:
* implement M profile exception return properly * cadence GEM: fix multiqueue handling bugs * pxa2xx.c: QOMify a device * arm/kvm: Remove trailing newlines from error_report() * stellaris: Don't hw_error() on bad register accesses * Add assertion about FSC format for syndrome registers * Move excnames[] array into arm_log_exceptions() * exynos: minor code cleanups * hw/arm/boot: take Linux/arm64 TEXT_OFFSET header field into account * Fix APSR writes via M profile MSR -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJY+ORHAAoJEDwlJe0UNgzeCAsP/isQdHG5XkvKXNQmg8LC6GYG 0tvHjgOiQMyuJBEpOKEpmFTWXMqfd04CDahEl/3MqUPnG3p/ILwjjNvgbcGACH11 VyvOCd3XwwbdQkYAJNyTHZu+8/Ila+FzjlJcW95MAvb/wxnzVyyBws9mvBletfVn th0qVPgIgIxPYjae0Y714/k6UrTWPaUvSGEbjXvvsSqjwwHUYs7TJKuyDEQzrShG +RsDuo4Qjx2YOATg1coKdY2nFDzpHn/my+RYSGqhYhNpEd12dlkLJ2cZKESU4RcI 2GqsW4sfqDBoGIOejuDVU3ZI4wP2wvzOlXVXsm117b9dndMGoMtEcxIjWD9rdtsk ZXOTHjHWB+6SlUbzmJpt+aCuW2QkG3jMS3RKGChwNZCHNjx28AthZLw+6rqjPR3e DkAAsk9mYpsbAWYD9/B0kyjkFhJlrNbvr+NbxrT19LjpztGvHYTJqDgbK1cneVF2 DB7IubVlLx/ILVkH8TEYqXG6COWTspLa8kW14DmZi1ts7ns3UGcTghdBbVIXNTaE C4KrpFSgt2hAGeeXwJs704pCug9Rkpha2MM2//5Udk3YDLtFwzrTWPv6nq5+68HO T3hhXZ7OfPRVrc88M8u46KjlzdDJTFKQbrmvL2ecyPaodEAbA3+cdbqmMbmWh0Qd Fl8q4dFYPLii4xfOUkoH =Pipo -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170420' into staging target-arm queue: * implement M profile exception return properly * cadence GEM: fix multiqueue handling bugs * pxa2xx.c: QOMify a device * arm/kvm: Remove trailing newlines from error_report() * stellaris: Don't hw_error() on bad register accesses * Add assertion about FSC format for syndrome registers * Move excnames[] array into arm_log_exceptions() * exynos: minor code cleanups * hw/arm/boot: take Linux/arm64 TEXT_OFFSET header field into account * Fix APSR writes via M profile MSR # gpg: Signature made Thu 20 Apr 2017 17:39:35 BST # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20170420: (24 commits) arm: Remove workarounds for old M-profile exception return implementation arm: Implement M profile exception return properly arm: Track M profile handler mode state in TB flags arm: Abstract out "are we singlestepping" test to utility function arm: Move condition-failed codepath generation out of if() arm: Move gen_set_condexec() and gen_set_pc_im() up in the file arm: Factor out "generate right kind of step exception" arm: Thumb shift operations should not permit interworking branches arm: Don't implement BXJ on M-profile CPUs xlnx-zynqmp: Set the Cadence GEM revision cadence_gem: Make the revision a property cadence_gem: Correct the interupt logic cadence_gem: Correct the multi-queue can rx logic cadence_gem: Read the correct queue descriptor hw/arm: Qomify pxa2xx.c arm/kvm: Remove trailing newlines from error_report() stellaris: Don't hw_error() on bad register accesses target/arm: Add assertion about FSC format for syndrome registers arm: Move excnames[] array into arm_log_exceptions() target/arm: Add missing entries to excnames[] for log strings ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
da92ada855
|
@ -31,6 +31,9 @@
|
|||
#define KERNEL_LOAD_ADDR 0x00010000
|
||||
#define KERNEL64_LOAD_ADDR 0x00080000
|
||||
|
||||
#define ARM64_TEXT_OFFSET_OFFSET 8
|
||||
#define ARM64_MAGIC_OFFSET 56
|
||||
|
||||
typedef enum {
|
||||
FIXUP_NONE = 0, /* do nothing */
|
||||
FIXUP_TERMINATOR, /* end of insns */
|
||||
|
@ -768,6 +771,49 @@ static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
|
||||
hwaddr *entry)
|
||||
{
|
||||
hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR;
|
||||
uint8_t *buffer;
|
||||
int size;
|
||||
|
||||
/* On aarch64, it's the bootloader's job to uncompress the kernel. */
|
||||
size = load_image_gzipped_buffer(filename, LOAD_IMAGE_MAX_GUNZIP_BYTES,
|
||||
&buffer);
|
||||
|
||||
if (size < 0) {
|
||||
gsize len;
|
||||
|
||||
/* Load as raw file otherwise */
|
||||
if (!g_file_get_contents(filename, (char **)&buffer, &len, NULL)) {
|
||||
return -1;
|
||||
}
|
||||
size = len;
|
||||
}
|
||||
|
||||
/* check the arm64 magic header value -- very old kernels may not have it */
|
||||
if (memcmp(buffer + ARM64_MAGIC_OFFSET, "ARM\x64", 4) == 0) {
|
||||
uint64_t hdrvals[2];
|
||||
|
||||
/* The arm64 Image header has text_offset and image_size fields at 8 and
|
||||
* 16 bytes into the Image header, respectively. The text_offset field
|
||||
* is only valid if the image_size is non-zero.
|
||||
*/
|
||||
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
|
||||
if (hdrvals[1] != 0) {
|
||||
kernel_load_offset = le64_to_cpu(hdrvals[0]);
|
||||
}
|
||||
}
|
||||
|
||||
*entry = mem_base + kernel_load_offset;
|
||||
rom_add_blob_fixed(filename, buffer, size, *entry);
|
||||
|
||||
g_free(buffer);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static void arm_load_kernel_notify(Notifier *notifier, void *data)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
@ -776,7 +822,7 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
|
|||
int is_linux = 0;
|
||||
uint64_t elf_entry, elf_low_addr, elf_high_addr;
|
||||
int elf_machine;
|
||||
hwaddr entry, kernel_load_offset;
|
||||
hwaddr entry;
|
||||
static const ARMInsnFixup *primary_loader;
|
||||
ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier,
|
||||
notifier, notifier);
|
||||
|
@ -841,14 +887,12 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
|
|||
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
||||
primary_loader = bootloader_aarch64;
|
||||
kernel_load_offset = KERNEL64_LOAD_ADDR;
|
||||
elf_machine = EM_AARCH64;
|
||||
} else {
|
||||
primary_loader = bootloader;
|
||||
if (!info->write_board_setup) {
|
||||
primary_loader += BOOTLOADER_NO_BOARD_SETUP_OFFSET;
|
||||
}
|
||||
kernel_load_offset = KERNEL_LOAD_ADDR;
|
||||
elf_machine = EM_ARM;
|
||||
}
|
||||
|
||||
|
@ -900,17 +944,15 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
|
|||
kernel_size = load_uimage(info->kernel_filename, &entry, NULL,
|
||||
&is_linux, NULL, NULL);
|
||||
}
|
||||
/* On aarch64, it's the bootloader's job to uncompress the kernel. */
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && kernel_size < 0) {
|
||||
entry = info->loader_start + kernel_load_offset;
|
||||
kernel_size = load_image_gzipped(info->kernel_filename, entry,
|
||||
info->ram_size - kernel_load_offset);
|
||||
kernel_size = load_aarch64_image(info->kernel_filename,
|
||||
info->loader_start, &entry);
|
||||
is_linux = 1;
|
||||
}
|
||||
if (kernel_size < 0) {
|
||||
entry = info->loader_start + kernel_load_offset;
|
||||
} else if (kernel_size < 0) {
|
||||
/* 32-bit ARM */
|
||||
entry = info->loader_start + KERNEL_LOAD_ADDR;
|
||||
kernel_size = load_image_targphys(info->kernel_filename, entry,
|
||||
info->ram_size - kernel_load_offset);
|
||||
info->ram_size - KERNEL_LOAD_ADDR);
|
||||
is_linux = 1;
|
||||
}
|
||||
if (kernel_size < 0) {
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
@ -101,9 +102,9 @@ static Exynos4210State *exynos4_boards_init_common(MachineState *machine,
|
|||
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
||||
|
||||
if (smp_cpus != EXYNOS4210_NCPUS && !qtest_enabled()) {
|
||||
fprintf(stderr, "%s board supports only %d CPU cores. Ignoring smp_cpus"
|
||||
" value.\n",
|
||||
mc->name, EXYNOS4210_NCPUS);
|
||||
error_report("%s board supports only %d CPU cores, ignoring smp_cpus"
|
||||
" value",
|
||||
mc->name, EXYNOS4210_NCPUS);
|
||||
}
|
||||
|
||||
exynos4_board_binfo.ram_size = exynos4_board_ram_size[board_type];
|
||||
|
|
|
@ -755,19 +755,18 @@ static void pxa2xx_ssp_reset(DeviceState *d)
|
|||
s->rx_start = s->rx_level = 0;
|
||||
}
|
||||
|
||||
static int pxa2xx_ssp_init(SysBusDevice *sbd)
|
||||
static void pxa2xx_ssp_init(Object *obj)
|
||||
{
|
||||
DeviceState *dev = DEVICE(sbd);
|
||||
PXA2xxSSPState *s = PXA2XX_SSP(dev);
|
||||
|
||||
DeviceState *dev = DEVICE(obj);
|
||||
PXA2xxSSPState *s = PXA2XX_SSP(obj);
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
|
||||
sysbus_init_irq(sbd, &s->irq);
|
||||
|
||||
memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_ssp_ops, s,
|
||||
memory_region_init_io(&s->iomem, obj, &pxa2xx_ssp_ops, s,
|
||||
"pxa2xx-ssp", 0x1000);
|
||||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
|
||||
s->bus = ssi_create_bus(dev, "ssi");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Real-Time Clock */
|
||||
|
@ -2321,10 +2320,8 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
|
|||
|
||||
static void pxa2xx_ssp_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
sdc->init = pxa2xx_ssp_init;
|
||||
dc->reset = pxa2xx_ssp_reset;
|
||||
dc->vmsd = &vmstate_pxa2xx_ssp;
|
||||
}
|
||||
|
@ -2333,6 +2330,7 @@ static const TypeInfo pxa2xx_ssp_info = {
|
|||
.name = TYPE_PXA2XX_SSP,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(PXA2xxSSPState),
|
||||
.instance_init = pxa2xx_ssp_init,
|
||||
.class_init = pxa2xx_ssp_class_init,
|
||||
};
|
||||
|
||||
|
|
|
@ -108,7 +108,10 @@ static void gptm_reload(gptm_state *s, int n, int reset)
|
|||
} else if (s->mode[n] == 0xa) {
|
||||
/* PWM mode. Not implemented. */
|
||||
} else {
|
||||
hw_error("TODO: 16-bit timer mode 0x%x\n", s->mode[n]);
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"GPTM: 16-bit timer mode unimplemented: 0x%x\n",
|
||||
s->mode[n]);
|
||||
return;
|
||||
}
|
||||
s->tick[n] = tick;
|
||||
timer_mod(s->timer[n], tick);
|
||||
|
@ -149,7 +152,9 @@ static void gptm_tick(void *opaque)
|
|||
} else if (s->mode[n] == 0xa) {
|
||||
/* PWM mode. Not implemented. */
|
||||
} else {
|
||||
hw_error("TODO: 16-bit timer mode 0x%x\n", s->mode[n]);
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"GPTM: 16-bit timer mode unimplemented: 0x%x\n",
|
||||
s->mode[n]);
|
||||
}
|
||||
gptm_update_irq(s);
|
||||
}
|
||||
|
@ -286,7 +291,8 @@ static void gptm_write(void *opaque, hwaddr offset,
|
|||
s->match_prescale[0] = value;
|
||||
break;
|
||||
default:
|
||||
hw_error("gptm_write: Bad offset 0x%x\n", (int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"GPTM: read at bad offset 0x%x\n", (int)offset);
|
||||
}
|
||||
gptm_update_irq(s);
|
||||
}
|
||||
|
@ -425,7 +431,10 @@ static int ssys_board_class(const ssys_state *s)
|
|||
}
|
||||
/* for unknown classes, fall through */
|
||||
default:
|
||||
hw_error("ssys_board_class: Unknown class 0x%08x\n", did0);
|
||||
/* This can only happen if the hardwired constant did0 value
|
||||
* in this board's stellaris_board_info struct is wrong.
|
||||
*/
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -479,8 +488,7 @@ static uint64_t ssys_read(void *opaque, hwaddr offset,
|
|||
case DID0_CLASS_SANDSTORM:
|
||||
return pllcfg_sandstorm[xtal];
|
||||
default:
|
||||
hw_error("ssys_read: Unhandled class for PLLCFG read.\n");
|
||||
return 0;
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
case 0x070: /* RCC2 */
|
||||
|
@ -512,7 +520,8 @@ static uint64_t ssys_read(void *opaque, hwaddr offset,
|
|||
case 0x1e4: /* USER1 */
|
||||
return s->user1;
|
||||
default:
|
||||
hw_error("ssys_read: Bad offset 0x%x\n", (int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"SSYS: read at bad offset 0x%x\n", (int)offset);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -614,7 +623,8 @@ static void ssys_write(void *opaque, hwaddr offset,
|
|||
s->ldoarst = value;
|
||||
break;
|
||||
default:
|
||||
hw_error("ssys_write: Bad offset 0x%x\n", (int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"SSYS: write at bad offset 0x%x\n", (int)offset);
|
||||
}
|
||||
ssys_update(s);
|
||||
}
|
||||
|
@ -748,7 +758,8 @@ static uint64_t stellaris_i2c_read(void *opaque, hwaddr offset,
|
|||
case 0x20: /* MCR */
|
||||
return s->mcr;
|
||||
default:
|
||||
hw_error("strllaris_i2c_read: Bad offset 0x%x\n", (int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"stellaris_i2c: read at bad offset 0x%x\n", (int)offset);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -823,17 +834,18 @@ static void stellaris_i2c_write(void *opaque, hwaddr offset,
|
|||
s->mris &= ~value;
|
||||
break;
|
||||
case 0x20: /* MCR */
|
||||
if (value & 1)
|
||||
hw_error(
|
||||
"stellaris_i2c_write: Loopback not implemented\n");
|
||||
if (value & 0x20)
|
||||
hw_error(
|
||||
"stellaris_i2c_write: Slave mode not implemented\n");
|
||||
if (value & 1) {
|
||||
qemu_log_mask(LOG_UNIMP, "stellaris_i2c: Loopback not implemented");
|
||||
}
|
||||
if (value & 0x20) {
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"stellaris_i2c: Slave mode not implemented");
|
||||
}
|
||||
s->mcr = value & 0x31;
|
||||
break;
|
||||
default:
|
||||
hw_error("stellaris_i2c_write: Bad offset 0x%x\n",
|
||||
(int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"stellaris_i2c: write at bad offset 0x%x\n", (int)offset);
|
||||
}
|
||||
stellaris_i2c_update(s);
|
||||
}
|
||||
|
@ -1057,8 +1069,8 @@ static uint64_t stellaris_adc_read(void *opaque, hwaddr offset,
|
|||
case 0x30: /* SAC */
|
||||
return s->sac;
|
||||
default:
|
||||
hw_error("strllaris_adc_read: Bad offset 0x%x\n",
|
||||
(int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"stellaris_adc: read at bad offset 0x%x\n", (int)offset);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -1078,8 +1090,9 @@ static void stellaris_adc_write(void *opaque, hwaddr offset,
|
|||
return;
|
||||
case 0x04: /* SSCTL */
|
||||
if (value != 6) {
|
||||
hw_error("ADC: Unimplemented sequence %" PRIx64 "\n",
|
||||
value);
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"ADC: Unimplemented sequence %" PRIx64 "\n",
|
||||
value);
|
||||
}
|
||||
s->ssctl[n] = value;
|
||||
return;
|
||||
|
@ -1110,13 +1123,14 @@ static void stellaris_adc_write(void *opaque, hwaddr offset,
|
|||
s->sspri = value;
|
||||
break;
|
||||
case 0x28: /* PSSI */
|
||||
hw_error("Not implemented: ADC sample initiate\n");
|
||||
qemu_log_mask(LOG_UNIMP, "ADC: sample initiate unimplemented");
|
||||
break;
|
||||
case 0x30: /* SAC */
|
||||
s->sac = value;
|
||||
break;
|
||||
default:
|
||||
hw_error("stellaris_adc_write: Bad offset 0x%x\n", (int)offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"stellaris_adc: write at bad offset 0x%x\n", (int)offset);
|
||||
}
|
||||
stellaris_adc_update(s);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define ARM_PHYS_TIMER_PPI 30
|
||||
#define ARM_VIRT_TIMER_PPI 27
|
||||
|
||||
#define GEM_REVISION 0x40070106
|
||||
|
||||
#define GIC_BASE_ADDR 0xf9000000
|
||||
#define GIC_DIST_ADDR 0xf9010000
|
||||
#define GIC_CPU_ADDR 0xf9020000
|
||||
|
@ -334,8 +336,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
|
|||
qemu_check_nic_model(nd, TYPE_CADENCE_GEM);
|
||||
qdev_set_nic_properties(DEVICE(&s->gem[i]), nd);
|
||||
}
|
||||
object_property_set_int(OBJECT(&s->gem[i]), GEM_REVISION, "revision",
|
||||
&error_abort);
|
||||
object_property_set_int(OBJECT(&s->gem[i]), 2, "num-priority-queues",
|
||||
&error_abort);
|
||||
&error_abort);
|
||||
object_property_set_bool(OBJECT(&s->gem[i]), true, "realized", &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
|
|
|
@ -102,7 +102,7 @@ typedef struct Exynos4210UartReg {
|
|||
uint32_t reset_value;
|
||||
} Exynos4210UartReg;
|
||||
|
||||
static Exynos4210UartReg exynos4210_uart_regs[] = {
|
||||
static const Exynos4210UartReg exynos4210_uart_regs[] = {
|
||||
{"ULCON", ULCON, 0x00000000},
|
||||
{"UCON", UCON, 0x00003000},
|
||||
{"UFCON", UFCON, 0x00000000},
|
||||
|
@ -220,7 +220,7 @@ static uint8_t fifo_retrieve(Exynos4210UartFIFO *q)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int fifo_elements_number(Exynos4210UartFIFO *q)
|
||||
static int fifo_elements_number(const Exynos4210UartFIFO *q)
|
||||
{
|
||||
if (q->sp < q->rp) {
|
||||
return q->size - q->rp + q->sp;
|
||||
|
@ -229,7 +229,7 @@ static int fifo_elements_number(Exynos4210UartFIFO *q)
|
|||
return q->sp - q->rp;
|
||||
}
|
||||
|
||||
static int fifo_empty_elements_number(Exynos4210UartFIFO *q)
|
||||
static int fifo_empty_elements_number(const Exynos4210UartFIFO *q)
|
||||
{
|
||||
return q->size - fifo_elements_number(q);
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ static void fifo_reset(Exynos4210UartFIFO *q)
|
|||
q->rp = 0;
|
||||
}
|
||||
|
||||
static uint32_t exynos4210_uart_Tx_FIFO_trigger_level(Exynos4210UartState *s)
|
||||
static uint32_t exynos4210_uart_Tx_FIFO_trigger_level(const Exynos4210UartState *s)
|
||||
{
|
||||
uint32_t level = 0;
|
||||
uint32_t reg;
|
||||
|
|
|
@ -401,8 +401,8 @@ static uint64_t exynos4210_pmu_read(void *opaque, hwaddr offset,
|
|||
unsigned size)
|
||||
{
|
||||
Exynos4210PmuState *s = (Exynos4210PmuState *)opaque;
|
||||
unsigned i;
|
||||
const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) {
|
||||
if (reg_p->offset == offset) {
|
||||
|
@ -420,8 +420,8 @@ static void exynos4210_pmu_write(void *opaque, hwaddr offset,
|
|||
uint64_t val, unsigned size)
|
||||
{
|
||||
Exynos4210PmuState *s = (Exynos4210PmuState *)opaque;
|
||||
unsigned i;
|
||||
const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) {
|
||||
if (reg_p->offset == offset) {
|
||||
|
|
|
@ -300,6 +300,8 @@
|
|||
#define DESC_1_RX_SOF 0x00004000
|
||||
#define DESC_1_RX_EOF 0x00008000
|
||||
|
||||
#define GEM_MODID_VALUE 0x00020118
|
||||
|
||||
static inline unsigned tx_desc_get_buffer(unsigned *desc)
|
||||
{
|
||||
return desc[0];
|
||||
|
@ -481,14 +483,17 @@ static int gem_can_receive(NetClientState *nc)
|
|||
}
|
||||
|
||||
for (i = 0; i < s->num_priority_queues; i++) {
|
||||
if (rx_desc_get_ownership(s->rx_desc[i]) == 1) {
|
||||
if (s->can_rx_state != 2) {
|
||||
s->can_rx_state = 2;
|
||||
DB_PRINT("can't receive - busy buffer descriptor (q%d) 0x%x\n",
|
||||
i, s->rx_desc_addr[i]);
|
||||
}
|
||||
return 0;
|
||||
if (rx_desc_get_ownership(s->rx_desc[i]) != 1) {
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
if (i == s->num_priority_queues) {
|
||||
if (s->can_rx_state != 2) {
|
||||
s->can_rx_state = 2;
|
||||
DB_PRINT("can't receive - all the buffer descriptors are busy\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (s->can_rx_state != 0) {
|
||||
|
@ -506,7 +511,18 @@ static void gem_update_int_status(CadenceGEMState *s)
|
|||
{
|
||||
int i;
|
||||
|
||||
if ((s->num_priority_queues == 1) && s->regs[GEM_ISR]) {
|
||||
if (!s->regs[GEM_ISR]) {
|
||||
/* ISR isn't set, clear all the interrupts */
|
||||
for (i = 0; i < s->num_priority_queues; ++i) {
|
||||
qemu_set_irq(s->irq[i], 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we get here we know s->regs[GEM_ISR] is set, so we don't need to
|
||||
* check it again.
|
||||
*/
|
||||
if (s->num_priority_queues == 1) {
|
||||
/* No priority queues, just trigger the interrupt */
|
||||
DB_PRINT("asserting int.\n");
|
||||
qemu_set_irq(s->irq[0], 1);
|
||||
|
@ -790,8 +806,8 @@ static void gem_get_rx_desc(CadenceGEMState *s, int q)
|
|||
{
|
||||
DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr[q]);
|
||||
/* read current descriptor */
|
||||
cpu_physical_memory_read(s->rx_desc_addr[0],
|
||||
(uint8_t *)s->rx_desc[0], sizeof(s->rx_desc[0]));
|
||||
cpu_physical_memory_read(s->rx_desc_addr[q],
|
||||
(uint8_t *)s->rx_desc[q], sizeof(s->rx_desc[q]));
|
||||
|
||||
/* Descriptor owned by software ? */
|
||||
if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
|
||||
|
@ -1209,7 +1225,7 @@ static void gem_reset(DeviceState *d)
|
|||
s->regs[GEM_TXPAUSE] = 0x0000ffff;
|
||||
s->regs[GEM_TXPARTIALSF] = 0x000003ff;
|
||||
s->regs[GEM_RXPARTIALSF] = 0x000003ff;
|
||||
s->regs[GEM_MODID] = 0x00020118;
|
||||
s->regs[GEM_MODID] = s->revision;
|
||||
s->regs[GEM_DESCONF] = 0x02500111;
|
||||
s->regs[GEM_DESCONF2] = 0x2ab13fff;
|
||||
s->regs[GEM_DESCONF5] = 0x002f2145;
|
||||
|
@ -1271,7 +1287,6 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
|
|||
{
|
||||
CadenceGEMState *s;
|
||||
uint32_t retval;
|
||||
int i;
|
||||
s = (CadenceGEMState *)opaque;
|
||||
|
||||
offset >>= 2;
|
||||
|
@ -1282,9 +1297,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
|
|||
switch (offset) {
|
||||
case GEM_ISR:
|
||||
DB_PRINT("lowering irqs on ISR read\n");
|
||||
for (i = 0; i < s->num_priority_queues; ++i) {
|
||||
qemu_set_irq(s->irq[i], 0);
|
||||
}
|
||||
/* The interrupts get updated at the end of the function. */
|
||||
break;
|
||||
case GEM_PHYMNTNC:
|
||||
if (retval & GEM_PHYMNTNC_OP_R) {
|
||||
|
@ -1508,6 +1521,8 @@ static const VMStateDescription vmstate_cadence_gem = {
|
|||
|
||||
static Property gem_properties[] = {
|
||||
DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
|
||||
DEFINE_PROP_UINT32("revision", CadenceGEMState, revision,
|
||||
GEM_MODID_VALUE),
|
||||
DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
|
||||
num_priority_queues, 1),
|
||||
DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu/main-loop.h"
|
||||
|
@ -1372,8 +1373,9 @@ break;
|
|||
case L0_TCNTO: case L1_TCNTO:
|
||||
case L0_ICNTO: case L1_ICNTO:
|
||||
case L0_FRCNTO: case L1_FRCNTO:
|
||||
fprintf(stderr, "\n[exynos4210.mct: write to RO register "
|
||||
TARGET_FMT_plx "]\n\n", offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.mct: write to RO register " TARGET_FMT_plx,
|
||||
offset);
|
||||
break;
|
||||
|
||||
case L0_INT_CSTAT: case L1_INT_CSTAT:
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu-common.h"
|
||||
|
@ -252,9 +253,9 @@ static uint64_t exynos4210_pwm_read(void *opaque, hwaddr offset,
|
|||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"[exynos4210.pwm: bad read offset " TARGET_FMT_plx "]\n",
|
||||
offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.pwm: bad read offset " TARGET_FMT_plx,
|
||||
offset);
|
||||
break;
|
||||
}
|
||||
return value;
|
||||
|
@ -343,9 +344,9 @@ static void exynos4210_pwm_write(void *opaque, hwaddr offset,
|
|||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"[exynos4210.pwm: bad write offset " TARGET_FMT_plx "]\n",
|
||||
offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.pwm: bad write offset " TARGET_FMT_plx,
|
||||
offset);
|
||||
break;
|
||||
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu-common.h"
|
||||
|
@ -370,9 +371,9 @@ static uint64_t exynos4210_rtc_read(void *opaque, hwaddr offset,
|
|||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"[exynos4210.rtc: bad read offset " TARGET_FMT_plx "]\n",
|
||||
offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.rtc: bad read offset " TARGET_FMT_plx,
|
||||
offset);
|
||||
break;
|
||||
}
|
||||
return value;
|
||||
|
@ -433,9 +434,9 @@ static void exynos4210_rtc_write(void *opaque, hwaddr offset,
|
|||
if (value > TICNT_THRESHOLD) {
|
||||
s->reg_ticcnt = value;
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"[exynos4210.rtc: bad TICNT value %u ]\n",
|
||||
(uint32_t)value);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.rtc: bad TICNT value %u",
|
||||
(uint32_t)value);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -500,9 +501,9 @@ static void exynos4210_rtc_write(void *opaque, hwaddr offset,
|
|||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"[exynos4210.rtc: bad write offset " TARGET_FMT_plx "]\n",
|
||||
offset);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"exynos4210.rtc: bad write offset " TARGET_FMT_plx,
|
||||
offset);
|
||||
break;
|
||||
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ typedef struct CadenceGEMState {
|
|||
uint8_t num_priority_queues;
|
||||
uint8_t num_type1_screeners;
|
||||
uint8_t num_type2_screeners;
|
||||
uint32_t revision;
|
||||
|
||||
/* GEM registers backing store */
|
||||
uint32_t regs[CADENCE_GEM_MAXREG];
|
||||
|
|
|
@ -304,33 +304,6 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
||||
static void arm_v7m_unassigned_access(CPUState *cpu, hwaddr addr,
|
||||
bool is_write, bool is_exec, int opaque,
|
||||
unsigned size)
|
||||
{
|
||||
ARMCPU *arm = ARM_CPU(cpu);
|
||||
CPUARMState *env = &arm->env;
|
||||
|
||||
/* ARMv7-M interrupt return works by loading a magic value into the PC.
|
||||
* On real hardware the load causes the return to occur. The qemu
|
||||
* implementation performs the jump normally, then does the exception
|
||||
* return by throwing a special exception when when the CPU tries to
|
||||
* execute code at the magic address.
|
||||
*/
|
||||
if (env->v7m.exception != 0 && addr >= 0xfffffff0 && is_exec) {
|
||||
cpu->exception_index = EXCP_EXCEPTION_EXIT;
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
/* In real hardware an attempt to access parts of the address space
|
||||
* with nothing there will usually cause an external abort.
|
||||
* However our QEMU board models are often missing device models where
|
||||
* the guest can boot anyway with the default read-as-zero/writes-ignored
|
||||
* behaviour that you get without a QEMU unassigned_access hook.
|
||||
* So just return here to retain that default behaviour.
|
||||
*/
|
||||
}
|
||||
|
||||
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
|
@ -338,17 +311,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
CPUARMState *env = &cpu->env;
|
||||
bool ret = false;
|
||||
|
||||
/* ARMv7-M interrupt return works by loading a magic value
|
||||
* into the PC. On real hardware the load causes the
|
||||
* return to occur. The qemu implementation performs the
|
||||
* jump normally, then does the exception return when the
|
||||
* CPU tries to execute code at the magic address.
|
||||
* This will cause the magic PC value to be pushed to
|
||||
* the stack if an interrupt occurred at the wrong time.
|
||||
* We avoid this by disabling interrupts when
|
||||
* pc contains a magic address.
|
||||
*
|
||||
* ARMv7-M interrupt masking works differently than -A or -R.
|
||||
/* ARMv7-M interrupt masking works differently than -A or -R.
|
||||
* There is no FIQ/IRQ distinction. Instead of I and F bits
|
||||
* masking FIQ and IRQ interrupts, an exception is taken only
|
||||
* if it is higher priority than the current execution priority
|
||||
|
@ -356,8 +319,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
* currently active exception).
|
||||
*/
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD
|
||||
&& (armv7m_nvic_can_take_pending_exception(env->nvic))
|
||||
&& (env->regs[15] < 0xfffffff0)) {
|
||||
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
|
||||
cs->exception_index = EXCP_IRQ;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
|
@ -1091,7 +1053,6 @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
|
|||
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
|
||||
#endif
|
||||
|
||||
cc->do_unassigned_access = arm_v7m_unassigned_access;
|
||||
cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#define EXCP_SEMIHOST 16 /* semihosting call */
|
||||
#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
|
||||
#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
|
||||
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
|
||||
|
||||
#define ARMV7M_EXCP_RESET 1
|
||||
#define ARMV7M_EXCP_NMI 2
|
||||
|
@ -2290,6 +2291,9 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
|
|||
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
|
||||
#define ARM_TBFLAG_BE_DATA_SHIFT 20
|
||||
#define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
|
||||
/* For M profile only, Handler (ie not Thread) mode */
|
||||
#define ARM_TBFLAG_HANDLER_SHIFT 21
|
||||
#define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT)
|
||||
|
||||
/* Bit usage when in AArch64 state */
|
||||
#define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
|
||||
|
@ -2326,6 +2330,8 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
|
|||
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
|
||||
#define ARM_TBFLAG_BE_DATA(F) \
|
||||
(((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
|
||||
#define ARM_TBFLAG_HANDLER(F) \
|
||||
(((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT)
|
||||
#define ARM_TBFLAG_TBI0(F) \
|
||||
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
|
||||
#define ARM_TBFLAG_TBI1(F) \
|
||||
|
@ -2516,6 +2522,10 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||
}
|
||||
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
||||
|
||||
if (env->v7m.exception != 0) {
|
||||
*flags |= ARM_TBFLAG_HANDLER_MASK;
|
||||
}
|
||||
|
||||
*cs_base = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -6271,6 +6271,25 @@ static void arm_log_exception(int idx)
|
|||
{
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
||||
const char *exc = NULL;
|
||||
static const char * const excnames[] = {
|
||||
[EXCP_UDEF] = "Undefined Instruction",
|
||||
[EXCP_SWI] = "SVC",
|
||||
[EXCP_PREFETCH_ABORT] = "Prefetch Abort",
|
||||
[EXCP_DATA_ABORT] = "Data Abort",
|
||||
[EXCP_IRQ] = "IRQ",
|
||||
[EXCP_FIQ] = "FIQ",
|
||||
[EXCP_BKPT] = "Breakpoint",
|
||||
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
|
||||
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
|
||||
[EXCP_HVC] = "Hypervisor Call",
|
||||
[EXCP_HYP_TRAP] = "Hypervisor Trap",
|
||||
[EXCP_SMC] = "Secure Monitor Call",
|
||||
[EXCP_VIRQ] = "Virtual IRQ",
|
||||
[EXCP_VFIQ] = "Virtual FIQ",
|
||||
[EXCP_SEMIHOST] = "Semihosting call",
|
||||
[EXCP_NOCP] = "v7M NOCP UsageFault",
|
||||
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
|
||||
};
|
||||
|
||||
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
|
||||
exc = excnames[idx];
|
||||
|
|
|
@ -51,27 +51,6 @@ static inline bool excp_is_internal(int excp)
|
|||
|| excp == EXCP_SEMIHOST;
|
||||
}
|
||||
|
||||
/* Exception names for debug logging; note that not all of these
|
||||
* precisely correspond to architectural exceptions.
|
||||
*/
|
||||
static const char * const excnames[] = {
|
||||
[EXCP_UDEF] = "Undefined Instruction",
|
||||
[EXCP_SWI] = "SVC",
|
||||
[EXCP_PREFETCH_ABORT] = "Prefetch Abort",
|
||||
[EXCP_DATA_ABORT] = "Data Abort",
|
||||
[EXCP_IRQ] = "IRQ",
|
||||
[EXCP_FIQ] = "FIQ",
|
||||
[EXCP_BKPT] = "Breakpoint",
|
||||
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
|
||||
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
|
||||
[EXCP_HVC] = "Hypervisor Call",
|
||||
[EXCP_HYP_TRAP] = "Hypervisor Trap",
|
||||
[EXCP_SMC] = "Secure Monitor Call",
|
||||
[EXCP_VIRQ] = "Virtual IRQ",
|
||||
[EXCP_VFIQ] = "Virtual FIQ",
|
||||
[EXCP_SEMIHOST] = "Semihosting call",
|
||||
};
|
||||
|
||||
/* Scale factor for generic timers, ie number of ns per tick.
|
||||
* This gives a 62.5MHz timer.
|
||||
*/
|
||||
|
|
|
@ -940,7 +940,7 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
|
|||
* single step at this point so something has gone wrong.
|
||||
*/
|
||||
error_report("%s: guest single-step while debugging unsupported"
|
||||
" (%"PRIx64", %"PRIx32")\n",
|
||||
" (%"PRIx64", %"PRIx32")",
|
||||
__func__, env->pc, debug_exit->hsr);
|
||||
return false;
|
||||
}
|
||||
|
@ -965,7 +965,7 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
|
|||
break;
|
||||
}
|
||||
default:
|
||||
error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n",
|
||||
error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
|
||||
__func__, debug_exit->hsr, env->pc);
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
|
|||
if (unlikely(ret)) {
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t syn, exc;
|
||||
uint32_t syn, exc, fsc;
|
||||
unsigned int target_el;
|
||||
bool same_el;
|
||||
|
||||
|
@ -145,19 +145,32 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
|
|||
env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
|
||||
}
|
||||
same_el = arm_current_el(env) == target_el;
|
||||
/* AArch64 syndrome does not have an LPAE bit */
|
||||
syn = fsr & ~(1 << 9);
|
||||
|
||||
if (fsr & (1 << 9)) {
|
||||
/* LPAE format fault status register : bottom 6 bits are
|
||||
* status code in the same form as needed for syndrome
|
||||
*/
|
||||
fsc = extract32(fsr, 0, 6);
|
||||
} else {
|
||||
/* Short format FSR : this fault will never actually be reported
|
||||
* to an EL that uses a syndrome register. Check that here,
|
||||
* and use a (currently) reserved FSR code in case the constructed
|
||||
* syndrome does leak into the guest somehow.
|
||||
*/
|
||||
assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
|
||||
fsc = 0x3f;
|
||||
}
|
||||
|
||||
/* For insn and data aborts we assume there is no instruction syndrome
|
||||
* information; this is always true for exceptions reported to EL1.
|
||||
*/
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
|
||||
syn = syn_insn_abort(same_el, 0, fi.s1ptw, fsc);
|
||||
exc = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
|
||||
same_el, fi.s1ptw,
|
||||
access_type == MMU_DATA_STORE, syn);
|
||||
access_type == MMU_DATA_STORE, fsc);
|
||||
if (access_type == MMU_DATA_STORE
|
||||
&& arm_feature(env, ARM_FEATURE_V6)) {
|
||||
fsr |= (1 << 11);
|
||||
|
|
|
@ -296,6 +296,30 @@ static void gen_step_complete_exception(DisasContext *s)
|
|||
s->is_jmp = DISAS_EXC;
|
||||
}
|
||||
|
||||
static void gen_singlestep_exception(DisasContext *s)
|
||||
{
|
||||
/* Generate the right kind of exception for singlestep, which is
|
||||
* either the architectural singlestep or EXCP_DEBUG for QEMU's
|
||||
* gdb singlestepping.
|
||||
*/
|
||||
if (s->ss_active) {
|
||||
gen_step_complete_exception(s);
|
||||
} else {
|
||||
gen_exception_internal(EXCP_DEBUG);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool is_singlestepping(DisasContext *s)
|
||||
{
|
||||
/* Return true if we are singlestepping either because of
|
||||
* architectural singlestep or QEMU gdbstub singlestep. This does
|
||||
* not include the command line '-singlestep' mode which is rather
|
||||
* misnamed as it only means "one instruction per TB" and doesn't
|
||||
* affect the code we generate.
|
||||
*/
|
||||
return s->singlestep_enabled || s->ss_active;
|
||||
}
|
||||
|
||||
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
|
||||
{
|
||||
TCGv_i32 tmp1 = tcg_temp_new_i32();
|
||||
|
@ -880,6 +904,21 @@ static const uint8_t table_logic_cc[16] = {
|
|||
1, /* mvn */
|
||||
};
|
||||
|
||||
static inline void gen_set_condexec(DisasContext *s)
|
||||
{
|
||||
if (s->condexec_mask) {
|
||||
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp, val);
|
||||
store_cpu_field(tmp, condexec_bits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
|
||||
{
|
||||
tcg_gen_movi_i32(cpu_R[15], val);
|
||||
}
|
||||
|
||||
/* Set PC and Thumb state from an immediate address. */
|
||||
static inline void gen_bx_im(DisasContext *s, uint32_t addr)
|
||||
{
|
||||
|
@ -904,6 +943,51 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
|
|||
store_cpu_field(var, thumb);
|
||||
}
|
||||
|
||||
/* Set PC and Thumb state from var. var is marked as dead.
|
||||
* For M-profile CPUs, include logic to detect exception-return
|
||||
* branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
|
||||
* and BX reg, and no others, and happens only for code in Handler mode.
|
||||
*/
|
||||
static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
|
||||
{
|
||||
/* Generate the same code here as for a simple bx, but flag via
|
||||
* s->is_jmp that we need to do the rest of the work later.
|
||||
*/
|
||||
gen_bx(s, var);
|
||||
if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
s->is_jmp = DISAS_BX_EXCRET;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void gen_bx_excret_final_code(DisasContext *s)
|
||||
{
|
||||
/* Generate the code to finish possible exception return and end the TB */
|
||||
TCGLabel *excret_label = gen_new_label();
|
||||
|
||||
/* Is the new PC value in the magic range indicating exception return? */
|
||||
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
|
||||
/* No: end the TB as we would for a DISAS_JMP */
|
||||
if (is_singlestepping(s)) {
|
||||
gen_singlestep_exception(s);
|
||||
} else {
|
||||
tcg_gen_exit_tb(0);
|
||||
}
|
||||
gen_set_label(excret_label);
|
||||
/* Yes: this is an exception return.
|
||||
* At this point in runtime env->regs[15] and env->thumb will hold
|
||||
* the exception-return magic number, which do_v7m_exception_exit()
|
||||
* will read. Nothing else will be able to see those values because
|
||||
* the cpu-exec main loop guarantees that we will always go straight
|
||||
* from raising the exception to the exception-handling code.
|
||||
*
|
||||
* gen_ss_advance(s) does nothing on M profile currently but
|
||||
* calling it is conceptually the right thing as we have executed
|
||||
* this instruction (compare SWI, HVC, SMC handling).
|
||||
*/
|
||||
gen_ss_advance(s);
|
||||
gen_exception_internal(EXCP_EXCEPTION_EXIT);
|
||||
}
|
||||
|
||||
/* Variant of store_reg which uses branch&exchange logic when storing
|
||||
to r15 in ARM architecture v7 and above. The source must be a temporary
|
||||
and will be marked as dead. */
|
||||
|
@ -923,7 +1007,7 @@ static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
|
|||
static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
|
||||
{
|
||||
if (reg == 15 && ENABLE_ARCH_5) {
|
||||
gen_bx(s, var);
|
||||
gen_bx_excret(s, var);
|
||||
} else {
|
||||
store_reg(s, reg, var);
|
||||
}
|
||||
|
@ -1056,11 +1140,6 @@ DO_GEN_ST(8, MO_UB)
|
|||
DO_GEN_ST(16, MO_UW)
|
||||
DO_GEN_ST(32, MO_UL)
|
||||
|
||||
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
|
||||
{
|
||||
tcg_gen_movi_i32(cpu_R[15], val);
|
||||
}
|
||||
|
||||
static inline void gen_hvc(DisasContext *s, int imm16)
|
||||
{
|
||||
/* The pre HVC helper handles cases when HVC gets trapped
|
||||
|
@ -1094,17 +1173,6 @@ static inline void gen_smc(DisasContext *s)
|
|||
s->is_jmp = DISAS_SMC;
|
||||
}
|
||||
|
||||
static inline void
|
||||
gen_set_condexec (DisasContext *s)
|
||||
{
|
||||
if (s->condexec_mask) {
|
||||
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp, val);
|
||||
store_cpu_field(tmp, condexec_bits);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
|
||||
{
|
||||
gen_set_condexec(s);
|
||||
|
@ -4092,7 +4160,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
|
|||
|
||||
static inline void gen_jmp (DisasContext *s, uint32_t dest)
|
||||
{
|
||||
if (unlikely(s->singlestep_enabled || s->ss_active)) {
|
||||
if (unlikely(is_singlestepping(s))) {
|
||||
/* An indirect jump so that we still trigger the debug exception. */
|
||||
if (s->thumb)
|
||||
dest |= 1;
|
||||
|
@ -9858,7 +9926,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
||||
if (i == 15) {
|
||||
gen_bx(s, tmp);
|
||||
gen_bx_excret(s, tmp);
|
||||
} else if (i == rn) {
|
||||
loaded_var = tmp;
|
||||
loaded_base = 1;
|
||||
|
@ -9959,7 +10027,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
|
||||
if (logic_cc)
|
||||
gen_logic_CC(tmp);
|
||||
store_reg_bx(s, rd, tmp);
|
||||
store_reg(s, rd, tmp);
|
||||
break;
|
||||
case 1: /* Sign/zero extend. */
|
||||
op = (insn >> 20) & 7;
|
||||
|
@ -10485,7 +10553,12 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||
}
|
||||
break;
|
||||
case 4: /* bxj */
|
||||
/* Trivial implementation equivalent to bx. */
|
||||
/* Trivial implementation equivalent to bx.
|
||||
* This instruction doesn't exist at all for M-profile.
|
||||
*/
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
tmp = load_reg(s, rn);
|
||||
gen_bx(s, tmp);
|
||||
break;
|
||||
|
@ -10885,7 +10958,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||
goto illegal_op;
|
||||
}
|
||||
if (rs == 15) {
|
||||
gen_bx(s, tmp);
|
||||
gen_bx_excret(s, tmp);
|
||||
} else {
|
||||
store_reg(s, rs, tmp);
|
||||
}
|
||||
|
@ -11075,9 +11148,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|||
tmp2 = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp2, val);
|
||||
store_reg(s, 14, tmp2);
|
||||
gen_bx(s, tmp);
|
||||
} else {
|
||||
/* Only BX works as exception-return, not BLX */
|
||||
gen_bx_excret(s, tmp);
|
||||
}
|
||||
/* already thumb, no need to check */
|
||||
gen_bx(s, tmp);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -11752,6 +11827,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
|
||||
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
|
||||
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
|
||||
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags);
|
||||
dc->cp_regs = cpu->cp_regs;
|
||||
dc->features = env->features;
|
||||
|
||||
|
@ -11851,14 +11927,6 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
dc->is_jmp = DISAS_EXC;
|
||||
break;
|
||||
}
|
||||
#else
|
||||
if (arm_dc_feature(dc, ARM_FEATURE_M)) {
|
||||
/* Branches to the magic exception-return addresses should
|
||||
* already have been caught via the arm_v7m_unassigned_access hook,
|
||||
* and never get here.
|
||||
*/
|
||||
assert(dc->pc < 0xfffffff0);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
|
||||
|
@ -11953,9 +12021,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
|
||||
|
||||
} while (!dc->is_jmp && !tcg_op_buf_full() &&
|
||||
!cs->singlestep_enabled &&
|
||||
!is_singlestepping(dc) &&
|
||||
!singlestep &&
|
||||
!dc->ss_active &&
|
||||
!end_of_page &&
|
||||
num_insns < max_insns);
|
||||
|
||||
|
@ -11971,9 +12038,16 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
/* At this stage dc->condjmp will only be set when the skipped
|
||||
instruction was a conditional branch or trap, and the PC has
|
||||
already been written. */
|
||||
if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
|
||||
gen_set_condexec(dc);
|
||||
if (dc->is_jmp == DISAS_BX_EXCRET) {
|
||||
/* Exception return branches need some special case code at the
|
||||
* end of the TB, which is complex enough that it has to
|
||||
* handle the single-step vs not and the condition-failed
|
||||
* insn codepath itself.
|
||||
*/
|
||||
gen_bx_excret_final_code(dc);
|
||||
} else if (unlikely(is_singlestepping(dc))) {
|
||||
/* Unconditional and "condition passed" instruction codepath. */
|
||||
gen_set_condexec(dc);
|
||||
switch (dc->is_jmp) {
|
||||
case DISAS_SWI:
|
||||
gen_ss_advance(dc);
|
||||
|
@ -11993,24 +12067,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
gen_set_pc_im(dc, dc->pc);
|
||||
/* fall through */
|
||||
default:
|
||||
if (dc->ss_active) {
|
||||
gen_step_complete_exception(dc);
|
||||
} else {
|
||||
/* FIXME: Single stepping a WFI insn will not halt
|
||||
the CPU. */
|
||||
gen_exception_internal(EXCP_DEBUG);
|
||||
}
|
||||
}
|
||||
if (dc->condjmp) {
|
||||
/* "Condition failed" instruction codepath. */
|
||||
gen_set_label(dc->condlabel);
|
||||
gen_set_condexec(dc);
|
||||
gen_set_pc_im(dc, dc->pc);
|
||||
if (dc->ss_active) {
|
||||
gen_step_complete_exception(dc);
|
||||
} else {
|
||||
gen_exception_internal(EXCP_DEBUG);
|
||||
}
|
||||
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
|
||||
gen_singlestep_exception(dc);
|
||||
}
|
||||
} else {
|
||||
/* While branches must always occur at the end of an IT block,
|
||||
|
@ -12021,7 +12079,6 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
- Hardware watchpoints.
|
||||
Hardware breakpoints have already been handled and skip this code.
|
||||
*/
|
||||
gen_set_condexec(dc);
|
||||
switch(dc->is_jmp) {
|
||||
case DISAS_NEXT:
|
||||
gen_goto_tb(dc, 1, dc->pc);
|
||||
|
@ -12061,11 +12118,17 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
|||
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
||||
break;
|
||||
}
|
||||
if (dc->condjmp) {
|
||||
gen_set_label(dc->condlabel);
|
||||
gen_set_condexec(dc);
|
||||
}
|
||||
|
||||
if (dc->condjmp) {
|
||||
/* "Condition failed" instruction codepath for the branch/trap insn */
|
||||
gen_set_label(dc->condlabel);
|
||||
gen_set_condexec(dc);
|
||||
if (unlikely(is_singlestepping(dc))) {
|
||||
gen_set_pc_im(dc, dc->pc);
|
||||
gen_singlestep_exception(dc);
|
||||
} else {
|
||||
gen_goto_tb(dc, 1, dc->pc);
|
||||
dc->condjmp = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ typedef struct DisasContext {
|
|||
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
||||
int vec_len;
|
||||
int vec_stride;
|
||||
bool v7m_handler_mode;
|
||||
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
|
||||
* so that top level loop can generate correct syndrome information.
|
||||
*/
|
||||
|
@ -134,6 +135,10 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|||
#define DISAS_HVC 8
|
||||
#define DISAS_SMC 9
|
||||
#define DISAS_YIELD 10
|
||||
/* M profile branch which might be an exception return (and so needs
|
||||
* custom end-of-TB code)
|
||||
*/
|
||||
#define DISAS_BX_EXCRET 11
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
void a64_translate_init(void);
|
||||
|
|
Loading…
Reference in New Issue