mirror of https://github.com/xqemu/xqemu.git
target/arm: Use gvec for NEON VLD all lanes
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20181011205206.3552-18-richard.henderson@linaro.org [PMM: added parens in ?: expression] Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
ea580fa312
commit
7377c2c97e
|
@ -2993,19 +2993,6 @@ static void gen_vfp_msr(TCGv_i32 tmp)
|
||||||
tcg_temp_free_i32(tmp);
|
tcg_temp_free_i32(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_neon_dup_u8(TCGv_i32 var, int shift)
|
|
||||||
{
|
|
||||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
||||||
if (shift)
|
|
||||||
tcg_gen_shri_i32(var, var, shift);
|
|
||||||
tcg_gen_ext8u_i32(var, var);
|
|
||||||
tcg_gen_shli_i32(tmp, var, 8);
|
|
||||||
tcg_gen_or_i32(var, var, tmp);
|
|
||||||
tcg_gen_shli_i32(tmp, var, 16);
|
|
||||||
tcg_gen_or_i32(var, var, tmp);
|
|
||||||
tcg_temp_free_i32(tmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gen_neon_dup_low16(TCGv_i32 var)
|
static void gen_neon_dup_low16(TCGv_i32 var)
|
||||||
{
|
{
|
||||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||||
|
@ -3024,28 +3011,6 @@ static void gen_neon_dup_high16(TCGv_i32 var)
|
||||||
tcg_temp_free_i32(tmp);
|
tcg_temp_free_i32(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
|
|
||||||
{
|
|
||||||
/* Load a single Neon element and replicate into a 32 bit TCG reg */
|
|
||||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
||||||
switch (size) {
|
|
||||||
case 0:
|
|
||||||
gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
|
|
||||||
gen_neon_dup_u8(tmp, 0);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
|
|
||||||
gen_neon_dup_low16(tmp);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
|
||||||
break;
|
|
||||||
default: /* Avoid compiler warnings. */
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
return tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
|
static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
|
||||||
uint32_t dp)
|
uint32_t dp)
|
||||||
{
|
{
|
||||||
|
@ -4949,6 +4914,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
|
||||||
int load;
|
int load;
|
||||||
int shift;
|
int shift;
|
||||||
int n;
|
int n;
|
||||||
|
int vec_size;
|
||||||
TCGv_i32 addr;
|
TCGv_i32 addr;
|
||||||
TCGv_i32 tmp;
|
TCGv_i32 tmp;
|
||||||
TCGv_i32 tmp2;
|
TCGv_i32 tmp2;
|
||||||
|
@ -5118,28 +5084,33 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
|
||||||
}
|
}
|
||||||
addr = tcg_temp_new_i32();
|
addr = tcg_temp_new_i32();
|
||||||
load_reg_var(s, addr, rn);
|
load_reg_var(s, addr, rn);
|
||||||
if (nregs == 1) {
|
|
||||||
/* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
|
/* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
|
||||||
tmp = gen_load_and_replicate(s, addr, size);
|
* VLD2/3/4 to all lanes: bit 5 indicates register stride.
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
|
*/
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
|
stride = (insn & (1 << 5)) ? 2 : 1;
|
||||||
if (insn & (1 << 5)) {
|
vec_size = nregs == 1 ? stride * 8 : 8;
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
|
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
|
tmp = tcg_temp_new_i32();
|
||||||
}
|
for (reg = 0; reg < nregs; reg++) {
|
||||||
tcg_temp_free_i32(tmp);
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
||||||
} else {
|
s->be_data | size);
|
||||||
/* VLD2/3/4 to all lanes: bit 5 indicates register stride */
|
if ((rd & 1) && vec_size == 16) {
|
||||||
stride = (insn & (1 << 5)) ? 2 : 1;
|
/* We cannot write 16 bytes at once because the
|
||||||
for (reg = 0; reg < nregs; reg++) {
|
* destination is unaligned.
|
||||||
tmp = gen_load_and_replicate(s, addr, size);
|
*/
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
|
tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
|
||||||
tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
|
8, 8, tmp);
|
||||||
tcg_temp_free_i32(tmp);
|
tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
|
||||||
tcg_gen_addi_i32(addr, addr, 1 << size);
|
neon_reg_offset(rd, 0), 8, 8);
|
||||||
rd += stride;
|
} else {
|
||||||
|
tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
|
||||||
|
vec_size, vec_size, tmp);
|
||||||
}
|
}
|
||||||
|
tcg_gen_addi_i32(addr, addr, 1 << size);
|
||||||
|
rd += stride;
|
||||||
}
|
}
|
||||||
|
tcg_temp_free_i32(tmp);
|
||||||
tcg_temp_free_i32(addr);
|
tcg_temp_free_i32(addr);
|
||||||
stride = (1 << size) * nregs;
|
stride = (1 << size) * nregs;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue