mirror of https://github.com/xqemu/xqemu.git
target-arm queue:
* last few A64 Neon instructions * fix some PL011 UART bugs causing occasional serial lockups * fix the non-PCI AHCI device -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABCAAGBQJTKYb7AAoJEDwlJe0UNgzeo9IP/RaXAOFzCqtPQqzDHY/Cs8Tg 71qnsKwCR9VMSjeRHLcF2aG1CutXiVF9TSVVqeuRNpQ8WUSLcP+8AHeL05cLaWTA tbeedolNrQEndrXgeO7uLJUzOkHtW0+7AZC9hZKdz6S9VHncmgiiII1+K4V/Po97 4o6rkekDsHMqP3TQGB4zlFU4P+6FOa2LCBOWk0b9Hid4E+9N0OJpRufsBLunA087 RZ+zqeZAKRp1GTIMC2FVuOEO9+dhg5UMEcGpj0G0TzhRXVDSXqYBLPaAtjK0dmgg 7C1M7kJCoekPw8qc/maEqbq+ziBgJ+Z0oQW0GVz1oLdmM8criIVQ/Yyx0JnmqyuI nS1scf26jbcadE0N8Spf7nW94qPS/bMPMB92E2DCmHqWKOwl+d1J/CABYLCyWbd8 9dudxhgvyzOZf6bjr1QhDLJy/eFem3u/ugTAHdOoIlv5Cy7KWRLjH3lCIY+1HpQJ kw4WKuqzYjEzrZwqhYx7NuKY5r+2yK/X7cdnYp+APLvopIut7hd6dhb/ODiO8p3E rR7A6CwUUTN+B0kI9Tjx47FkI8pG84ioUbPs6ciJiZSc5JI+nfupdWmboA5WZLu5 Tj0v7OQBG0hDNDpxBqurkDZH8QPcXjmoM4emF8Wk1aSWzShy/NAharU48ZeS55EO PcJ6UkeL8Al+3hHui9tQ =3VGg -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140319' into staging target-arm queue: * last few A64 Neon instructions * fix some PL011 UART bugs causing occasional serial lockups * fix the non-PCI AHCI device # gpg: Signature made Wed 19 Mar 2014 12:00:59 GMT using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" * remotes/pmaydell/tags/pull-target-arm-20140319: target-arm: A64: Add saturating accumulate ops (USQADD/SUQADD) target-arm: A64: Add saturating int ops (SQNEG/SQABS) pl011: fix incorrect logic to set the RXFF flag pl011: fix UARTRSR accesses corrupting the UARTCR value pl011: reset the fifo when enabled or disabled ahci: fix sysbus support Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
319c66d5ab
|
@ -20,6 +20,7 @@ typedef struct PL011State {
|
|||
uint32_t readbuff;
|
||||
uint32_t flags;
|
||||
uint32_t lcr;
|
||||
uint32_t rsr;
|
||||
uint32_t cr;
|
||||
uint32_t dmacr;
|
||||
uint32_t int_enabled;
|
||||
|
@ -81,13 +82,14 @@ static uint64_t pl011_read(void *opaque, hwaddr offset,
|
|||
}
|
||||
if (s->read_count == s->read_trigger - 1)
|
||||
s->int_level &= ~ PL011_INT_RX;
|
||||
s->rsr = c >> 8;
|
||||
pl011_update(s);
|
||||
if (s->chr) {
|
||||
qemu_chr_accept_input(s->chr);
|
||||
}
|
||||
return c;
|
||||
case 1: /* UARTCR */
|
||||
return 0;
|
||||
case 1: /* UARTRSR */
|
||||
return s->rsr;
|
||||
case 6: /* UARTFR */
|
||||
return s->flags;
|
||||
case 8: /* UARTILPR */
|
||||
|
@ -146,8 +148,8 @@ static void pl011_write(void *opaque, hwaddr offset,
|
|||
s->int_level |= PL011_INT_TX;
|
||||
pl011_update(s);
|
||||
break;
|
||||
case 1: /* UARTCR */
|
||||
s->cr = value;
|
||||
case 1: /* UARTRSR/UARTECR */
|
||||
s->rsr = 0;
|
||||
break;
|
||||
case 6: /* UARTFR */
|
||||
/* Writes to Flag register are ignored. */
|
||||
|
@ -162,6 +164,11 @@ static void pl011_write(void *opaque, hwaddr offset,
|
|||
s->fbrd = value;
|
||||
break;
|
||||
case 11: /* UARTLCR_H */
|
||||
/* Reset the FIFO state on FIFO enable or disable */
|
||||
if ((s->lcr ^ value) & 0x10) {
|
||||
s->read_count = 0;
|
||||
s->read_pos = 0;
|
||||
}
|
||||
s->lcr = value;
|
||||
pl011_set_read_trigger(s);
|
||||
break;
|
||||
|
@ -214,7 +221,7 @@ static void pl011_put_fifo(void *opaque, uint32_t value)
|
|||
s->read_fifo[slot] = value;
|
||||
s->read_count++;
|
||||
s->flags &= ~PL011_FLAG_RXFE;
|
||||
if (s->cr & 0x10 || s->read_count == 16) {
|
||||
if (!(s->lcr & 0x10) || s->read_count == 16) {
|
||||
s->flags |= PL011_FLAG_RXFF;
|
||||
}
|
||||
if (s->read_count == s->read_trigger) {
|
||||
|
@ -242,13 +249,14 @@ static const MemoryRegionOps pl011_ops = {
|
|||
|
||||
static const VMStateDescription vmstate_pl011 = {
|
||||
.name = "pl011",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.minimum_version_id_old = 2,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(readbuff, PL011State),
|
||||
VMSTATE_UINT32(flags, PL011State),
|
||||
VMSTATE_UINT32(lcr, PL011State),
|
||||
VMSTATE_UINT32(rsr, PL011State),
|
||||
VMSTATE_UINT32(cr, PL011State),
|
||||
VMSTATE_UINT32(dmacr, PL011State),
|
||||
VMSTATE_UINT32(int_enabled, PL011State),
|
||||
|
|
|
@ -118,11 +118,12 @@ static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
|
|||
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev)
|
||||
{
|
||||
AHCIPCIState *d = container_of(s, AHCIPCIState, ahci);
|
||||
PCIDevice *pci_dev = PCI_DEVICE(d);
|
||||
PCIDevice *pci_dev =
|
||||
(PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE);
|
||||
|
||||
DPRINTF(0, "raise irq\n");
|
||||
|
||||
if (msi_enabled(pci_dev)) {
|
||||
if (pci_dev && msi_enabled(pci_dev)) {
|
||||
msi_notify(pci_dev, 0);
|
||||
} else {
|
||||
qemu_irq_raise(s->irq);
|
||||
|
@ -132,10 +133,12 @@ static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev)
|
|||
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev)
|
||||
{
|
||||
AHCIPCIState *d = container_of(s, AHCIPCIState, ahci);
|
||||
PCIDevice *pci_dev =
|
||||
(PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE);
|
||||
|
||||
DPRINTF(0, "lower irq\n");
|
||||
|
||||
if (!msi_enabled(PCI_DEVICE(d))) {
|
||||
if (!pci_dev || !msi_enabled(pci_dev)) {
|
||||
qemu_irq_lower(s->irq);
|
||||
}
|
||||
}
|
||||
|
@ -1311,7 +1314,7 @@ static const VMStateDescription vmstate_sysbus_ahci = {
|
|||
.name = "sysbus-ahci",
|
||||
.unmigratable = 1, /* Still buggy under I/O load */
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_AHCI(ahci, AHCIPCIState),
|
||||
VMSTATE_AHCI(ahci, SysbusAHCIState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
@ -1328,7 +1331,7 @@ static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
|
|||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||
SysbusAHCIState *s = SYSBUS_AHCI(dev);
|
||||
|
||||
ahci_init(&s->ahci, dev, NULL, s->num_ports);
|
||||
ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports);
|
||||
|
||||
sysbus_init_mmio(sbd, &s->ahci.mem);
|
||||
sysbus_init_irq(sbd, &s->ahci.irq);
|
||||
|
|
|
@ -186,12 +186,20 @@ DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
|
|||
DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
|
||||
|
||||
/* neon_helper.c */
|
||||
DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64)
|
||||
DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64)
|
||||
DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
|
||||
DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
|
||||
|
@ -375,12 +383,14 @@ DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
|
|||
DEF_HELPER_1(neon_negl_u16, i64, i64)
|
||||
DEF_HELPER_1(neon_negl_u32, i64, i64)
|
||||
|
||||
DEF_HELPER_2(neon_qabs_s8, i32, env, i32)
|
||||
DEF_HELPER_2(neon_qabs_s16, i32, env, i32)
|
||||
DEF_HELPER_2(neon_qabs_s32, i32, env, i32)
|
||||
DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
|
||||
DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
|
||||
DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
|
||||
DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
|
||||
DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
|
||||
|
||||
DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
|
||||
DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
|
||||
|
|
|
@ -236,6 +236,171 @@ uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
|
|||
return res;
|
||||
}
|
||||
|
||||
/* Unsigned saturating accumulate of signed value
|
||||
*
|
||||
* Op1/Rn is treated as signed
|
||||
* Op2/Rd is treated as unsigned
|
||||
*
|
||||
* Explicit casting is used to ensure the correct sign extension of
|
||||
* inputs. The result is treated as a unsigned value and saturated as such.
|
||||
*
|
||||
* We use a macro for the 8/16 bit cases which expects signed integers of va,
|
||||
* vb, and vr for interim calculation and an unsigned 32 bit result value r.
|
||||
*/
|
||||
|
||||
#define USATACC(bits, shift) \
|
||||
do { \
|
||||
va = sextract32(a, shift, bits); \
|
||||
vb = extract32(b, shift, bits); \
|
||||
vr = va + vb; \
|
||||
if (vr > UINT##bits##_MAX) { \
|
||||
SET_QC(); \
|
||||
vr = UINT##bits##_MAX; \
|
||||
} else if (vr < 0) { \
|
||||
SET_QC(); \
|
||||
vr = 0; \
|
||||
} \
|
||||
r = deposit32(r, shift, bits, vr); \
|
||||
} while (0)
|
||||
|
||||
uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int16_t va, vb, vr;
|
||||
uint32_t r = 0;
|
||||
|
||||
USATACC(8, 0);
|
||||
USATACC(8, 8);
|
||||
USATACC(8, 16);
|
||||
USATACC(8, 24);
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int32_t va, vb, vr;
|
||||
uint64_t r = 0;
|
||||
|
||||
USATACC(16, 0);
|
||||
USATACC(16, 16);
|
||||
return r;
|
||||
}
|
||||
|
||||
#undef USATACC
|
||||
|
||||
uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int64_t va = (int32_t)a;
|
||||
int64_t vb = (uint32_t)b;
|
||||
int64_t vr = va + vb;
|
||||
if (vr > UINT32_MAX) {
|
||||
SET_QC();
|
||||
vr = UINT32_MAX;
|
||||
} else if (vr < 0) {
|
||||
SET_QC();
|
||||
vr = 0;
|
||||
}
|
||||
return vr;
|
||||
}
|
||||
|
||||
uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t res;
|
||||
res = a + b;
|
||||
/* We only need to look at the pattern of SIGN bits to detect
|
||||
* +ve/-ve saturation
|
||||
*/
|
||||
if (~a & b & ~res & SIGNBIT64) {
|
||||
SET_QC();
|
||||
res = UINT64_MAX;
|
||||
} else if (a & ~b & res & SIGNBIT64) {
|
||||
SET_QC();
|
||||
res = 0;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Signed saturating accumulate of unsigned value
|
||||
*
|
||||
* Op1/Rn is treated as unsigned
|
||||
* Op2/Rd is treated as signed
|
||||
*
|
||||
* The result is treated as a signed value and saturated as such
|
||||
*
|
||||
* We use a macro for the 8/16 bit cases which expects signed integers of va,
|
||||
* vb, and vr for interim calculation and an unsigned 32 bit result value r.
|
||||
*/
|
||||
|
||||
#define SSATACC(bits, shift) \
|
||||
do { \
|
||||
va = extract32(a, shift, bits); \
|
||||
vb = sextract32(b, shift, bits); \
|
||||
vr = va + vb; \
|
||||
if (vr > INT##bits##_MAX) { \
|
||||
SET_QC(); \
|
||||
vr = INT##bits##_MAX; \
|
||||
} else if (vr < INT##bits##_MIN) { \
|
||||
SET_QC(); \
|
||||
vr = INT##bits##_MIN; \
|
||||
} \
|
||||
r = deposit32(r, shift, bits, vr); \
|
||||
} while (0)
|
||||
|
||||
uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int16_t va, vb, vr;
|
||||
uint32_t r = 0;
|
||||
|
||||
SSATACC(8, 0);
|
||||
SSATACC(8, 8);
|
||||
SSATACC(8, 16);
|
||||
SSATACC(8, 24);
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int32_t va, vb, vr;
|
||||
uint32_t r = 0;
|
||||
|
||||
SSATACC(16, 0);
|
||||
SSATACC(16, 16);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
#undef SSATACC
|
||||
|
||||
uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
int64_t res;
|
||||
int64_t op1 = (uint32_t)a;
|
||||
int64_t op2 = (int32_t)b;
|
||||
res = op1 + op2;
|
||||
if (res > INT32_MAX) {
|
||||
SET_QC();
|
||||
res = INT32_MAX;
|
||||
} else if (res < INT32_MIN) {
|
||||
SET_QC();
|
||||
res = INT32_MIN;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t res;
|
||||
res = a + b;
|
||||
/* We only need to look at the pattern of SIGN bits to detect an overflow */
|
||||
if (((a & res)
|
||||
| (~b & res)
|
||||
| (a & ~b)) & SIGNBIT64) {
|
||||
SET_QC();
|
||||
res = INT64_MAX;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
#define NEON_USAT(dest, src1, src2, type) do { \
|
||||
uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
|
||||
if (tmp != (type)tmp) { \
|
||||
|
@ -1776,6 +1941,28 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
|
|||
return x;
|
||||
}
|
||||
|
||||
uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
|
||||
{
|
||||
if (x == SIGNBIT64) {
|
||||
SET_QC();
|
||||
x = ~SIGNBIT64;
|
||||
} else if ((int64_t)x < 0) {
|
||||
x = -x;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
|
||||
{
|
||||
if (x == SIGNBIT64) {
|
||||
SET_QC();
|
||||
x = ~SIGNBIT64;
|
||||
} else {
|
||||
x = -x;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
/* NEON Float helpers. */
|
||||
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
|
||||
{
|
||||
|
|
|
@ -73,6 +73,7 @@ typedef struct AArch64DecodeTable {
|
|||
} AArch64DecodeTable;
|
||||
|
||||
/* Function prototype for gen_ functions for calling Neon helpers */
|
||||
typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
|
||||
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
|
||||
typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
|
||||
typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
|
||||
|
@ -6942,6 +6943,13 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
|
|||
*/
|
||||
tcg_gen_not_i64(tcg_rd, tcg_rn);
|
||||
break;
|
||||
case 0x7: /* SQABS, SQNEG */
|
||||
if (u) {
|
||||
gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
|
||||
} else {
|
||||
gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
|
||||
}
|
||||
break;
|
||||
case 0xa: /* CMLT */
|
||||
/* 64 bit integer comparison against zero, result is
|
||||
* test ? (2^64 - 1) : 0. We implement via setcond(!test) and
|
||||
|
@ -7313,6 +7321,101 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
|
|||
}
|
||||
}
|
||||
|
||||
/* Remaining saturating accumulating ops */
|
||||
static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
|
||||
bool is_q, int size, int rn, int rd)
|
||||
{
|
||||
bool is_double = (size == 3);
|
||||
|
||||
if (is_double) {
|
||||
TCGv_i64 tcg_rn = tcg_temp_new_i64();
|
||||
TCGv_i64 tcg_rd = tcg_temp_new_i64();
|
||||
int pass;
|
||||
|
||||
for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
|
||||
read_vec_element(s, tcg_rn, rn, pass, MO_64);
|
||||
read_vec_element(s, tcg_rd, rd, pass, MO_64);
|
||||
|
||||
if (is_u) { /* USQADD */
|
||||
gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
} else { /* SUQADD */
|
||||
gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
}
|
||||
write_vec_element(s, tcg_rd, rd, pass, MO_64);
|
||||
}
|
||||
if (is_scalar) {
|
||||
clear_vec_high(s, rd);
|
||||
}
|
||||
|
||||
tcg_temp_free_i64(tcg_rd);
|
||||
tcg_temp_free_i64(tcg_rn);
|
||||
} else {
|
||||
TCGv_i32 tcg_rn = tcg_temp_new_i32();
|
||||
TCGv_i32 tcg_rd = tcg_temp_new_i32();
|
||||
int pass, maxpasses;
|
||||
|
||||
if (is_scalar) {
|
||||
maxpasses = 1;
|
||||
} else {
|
||||
maxpasses = is_q ? 4 : 2;
|
||||
}
|
||||
|
||||
for (pass = 0; pass < maxpasses; pass++) {
|
||||
if (is_scalar) {
|
||||
read_vec_element_i32(s, tcg_rn, rn, pass, size);
|
||||
read_vec_element_i32(s, tcg_rd, rd, pass, size);
|
||||
} else {
|
||||
read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
|
||||
read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
|
||||
}
|
||||
|
||||
if (is_u) { /* USQADD */
|
||||
switch (size) {
|
||||
case 0:
|
||||
gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
case 1:
|
||||
gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
case 2:
|
||||
gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
} else { /* SUQADD */
|
||||
switch (size) {
|
||||
case 0:
|
||||
gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
case 1:
|
||||
gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
case 2:
|
||||
gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
if (is_scalar) {
|
||||
TCGv_i64 tcg_zero = tcg_const_i64(0);
|
||||
write_vec_element(s, tcg_zero, rd, 0, MO_64);
|
||||
tcg_temp_free_i64(tcg_zero);
|
||||
}
|
||||
write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
|
||||
}
|
||||
|
||||
if (!is_q) {
|
||||
clear_vec_high(s, rd);
|
||||
}
|
||||
|
||||
tcg_temp_free_i32(tcg_rd);
|
||||
tcg_temp_free_i32(tcg_rn);
|
||||
}
|
||||
}
|
||||
|
||||
/* C3.6.12 AdvSIMD scalar two reg misc
|
||||
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
|
||||
* +-----+---+-----------+------+-----------+--------+-----+------+------+
|
||||
|
@ -7332,6 +7435,11 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
TCGv_ptr tcg_fpstatus;
|
||||
|
||||
switch (opcode) {
|
||||
case 0x3: /* USQADD / SUQADD*/
|
||||
handle_2misc_satacc(s, true, u, false, size, rn, rd);
|
||||
return;
|
||||
case 0x7: /* SQABS / SQNEG */
|
||||
break;
|
||||
case 0xa: /* CMLT */
|
||||
if (u) {
|
||||
unallocated_encoding(s);
|
||||
|
@ -7417,10 +7525,7 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
}
|
||||
break;
|
||||
default:
|
||||
/* Other categories of encoding in this class:
|
||||
* + SUQADD/USQADD/SQABS/SQNEG : size 8, 16, 32 or 64
|
||||
*/
|
||||
unsupported_encoding(s, insn);
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -7441,11 +7546,25 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
write_fp_dreg(s, rd, tcg_rd);
|
||||
tcg_temp_free_i64(tcg_rd);
|
||||
tcg_temp_free_i64(tcg_rn);
|
||||
} else if (size == 2) {
|
||||
TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
|
||||
} else {
|
||||
TCGv_i32 tcg_rn = tcg_temp_new_i32();
|
||||
TCGv_i32 tcg_rd = tcg_temp_new_i32();
|
||||
|
||||
read_vec_element_i32(s, tcg_rn, rn, 0, size);
|
||||
|
||||
switch (opcode) {
|
||||
case 0x7: /* SQABS, SQNEG */
|
||||
{
|
||||
NeonGenOneOpEnvFn *genfn;
|
||||
static NeonGenOneOpEnvFn * const fns[3][2] = {
|
||||
{ gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
|
||||
{ gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
|
||||
{ gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
|
||||
};
|
||||
genfn = fns[size][u];
|
||||
genfn(tcg_rd, cpu_env, tcg_rn);
|
||||
break;
|
||||
}
|
||||
case 0x1a: /* FCVTNS */
|
||||
case 0x1b: /* FCVTMS */
|
||||
case 0x1c: /* FCVTAS */
|
||||
|
@ -7475,8 +7594,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
write_fp_sreg(s, rd, tcg_rd);
|
||||
tcg_temp_free_i32(tcg_rd);
|
||||
tcg_temp_free_i32(tcg_rn);
|
||||
} else {
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (is_fcvt) {
|
||||
|
@ -9172,13 +9289,18 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
}
|
||||
break;
|
||||
case 0x3: /* SUQADD, USQADD */
|
||||
if (size == 3 && !is_q) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
|
||||
return;
|
||||
case 0x7: /* SQABS, SQNEG */
|
||||
if (size == 3 && !is_q) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
unsupported_encoding(s, insn);
|
||||
return;
|
||||
break;
|
||||
case 0xc ... 0xf:
|
||||
case 0x16 ... 0x1d:
|
||||
case 0x1f:
|
||||
|
@ -9389,6 +9511,13 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
gen_helper_cls32(tcg_res, tcg_op);
|
||||
}
|
||||
break;
|
||||
case 0x7: /* SQABS, SQNEG */
|
||||
if (u) {
|
||||
gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
|
||||
} else {
|
||||
gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
|
||||
}
|
||||
break;
|
||||
case 0xb: /* ABS, NEG */
|
||||
if (u) {
|
||||
tcg_gen_neg_i32(tcg_res, tcg_op);
|
||||
|
@ -9463,6 +9592,17 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
|
|||
gen_helper_neon_cnt_u8(tcg_res, tcg_op);
|
||||
}
|
||||
break;
|
||||
case 0x7: /* SQABS, SQNEG */
|
||||
{
|
||||
NeonGenOneOpEnvFn *genfn;
|
||||
static NeonGenOneOpEnvFn * const fns[2][2] = {
|
||||
{ gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
|
||||
{ gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
|
||||
};
|
||||
genfn = fns[size][u];
|
||||
genfn(tcg_res, cpu_env, tcg_op);
|
||||
break;
|
||||
}
|
||||
case 0x8: /* CMGT, CMGE */
|
||||
case 0x9: /* CMEQ, CMLE */
|
||||
case 0xa: /* CMLT */
|
||||
|
|
Loading…
Reference in New Issue