target/arm: Convert MOVI, FMOV, ORR, BIC (vector immediate) to decodetree

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240912024114.1097832-14-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-09-11 19:40:58 -07:00 committed by Peter Maydell
parent c777e73cbe
commit 500928f242
2 changed files with 59 additions and 67 deletions

View File

@ -1184,3 +1184,12 @@ FMINV_s 0110 1110 10 11000 01111 10 ..... ..... @rr_q1e2
# Floating-point Immediate
FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd
# Advanced SIMD Modified Immediate
%abcdefgh 16:3 5:5
FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh
# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode.
Vimm 0 q:1 op:1 0 1111 00000 ... cmode:4 01 ..... rd:5 %abcdefgh

View File

@ -6913,6 +6913,52 @@ static bool trans_FMOVI_s(DisasContext *s, arg_FMOVI_s *a)
return true;
}
/*
* Advanced SIMD Modified Immediate
*/
static bool trans_FMOVI_v_h(DisasContext *s, arg_FMOVI_v_h *a)
{
if (!dc_isar_feature(aa64_fp16, s)) {
return false;
}
if (fp_access_check(s)) {
tcg_gen_gvec_dup_imm(MO_16, vec_full_reg_offset(s, a->rd),
a->q ? 16 : 8, vec_full_reg_size(s),
vfp_expand_imm(MO_16, a->abcdefgh));
}
return true;
}
static void gen_movi(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz)
{
tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
}
static bool trans_Vimm(DisasContext *s, arg_Vimm *a)
{
GVecGen2iFn *fn;
/* Handle decode of cmode/op here between ORR/BIC/MOVI */
if ((a->cmode & 1) && a->cmode < 12) {
/* For op=1, the imm will be inverted, so BIC becomes AND. */
fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
} else {
/* There is one unallocated cmode/op combination in this space */
if (a->cmode == 15 && a->op == 1 && a->q == 0) {
return false;
}
fn = gen_movi;
}
if (fp_access_check(s)) {
uint64_t imm = asimd_imm_const(a->abcdefgh, a->cmode, a->op);
gen_gvec_fn2i(s, a->q, a->rd, a->rd, imm, fn, MO_64);
}
return true;
}
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@ -9092,69 +9138,6 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
}
}
/* AdvSIMD modified immediate
* 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
* +---+---+----+---------------------+-----+-------+----+---+-------+------+
* | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
* +---+---+----+---------------------+-----+-------+----+---+-------+------+
*
* There are a number of operations that can be carried out here:
* MOVI - move (shifted) imm into register
* MVNI - move inverted (shifted) imm into register
* ORR - bitwise OR of (shifted) imm with register
* BIC - bitwise clear of (shifted) imm with register
* With ARMv8.2 we also have:
* FMOV half-precision
*/
static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
{
int rd = extract32(insn, 0, 5);
int cmode = extract32(insn, 12, 4);
int o2 = extract32(insn, 11, 1);
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
bool is_neg = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
uint64_t imm = 0;
if (o2) {
if (cmode != 0xf || is_neg) {
unallocated_encoding(s);
return;
}
/* FMOV (vector, immediate) - half-precision */
if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
imm = vfp_expand_imm(MO_16, abcdefgh);
/* now duplicate across the lanes */
imm = dup_const(MO_16, imm);
} else {
if (cmode == 0xf && is_neg && !is_q) {
unallocated_encoding(s);
return;
}
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
}
if (!fp_access_check(s)) {
return;
}
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
/* MOVI or MVNI, with MVNI negation handled above. */
tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
vec_full_reg_size(s), imm);
} else {
/* ORR or BIC, with BIC negation to AND handled above. */
if (is_neg) {
gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
} else {
gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
}
}
}
/*
* Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
*
@ -10635,8 +10618,10 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
/* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
assert(immh != 0);
if (immh == 0) {
unallocated_encoding(s);
return;
}
switch (opcode) {
case 0x08: /* SRI */
@ -11644,8 +11629,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
static const AArch64DecodeTable data_proc_simd[] = {
/* pattern , mask , fn */
{ 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
/* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
{ 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
{ 0x0f000400, 0x9f800400, disas_simd_shift_imm },
{ 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
{ 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },