mirror of https://github.com/xemu-project/xemu.git
target/arm: Enforce alignment for VLDn/VSTn (single)
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210419202257.161730-25-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
7c68c196cf
commit
88976ff0a4
|
@ -629,6 +629,7 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
|
|||
int nregs = a->n + 1;
|
||||
int vd = a->vd;
|
||||
TCGv_i32 addr, tmp;
|
||||
MemOp mop;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
|
@ -678,23 +679,58 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Pick up SCTLR settings */
|
||||
mop = finalize_memop(s, a->size);
|
||||
|
||||
if (a->align) {
|
||||
MemOp align_op;
|
||||
|
||||
switch (nregs) {
|
||||
case 1:
|
||||
/* For VLD1, use natural alignment. */
|
||||
align_op = MO_ALIGN;
|
||||
break;
|
||||
case 2:
|
||||
/* For VLD2, use double alignment. */
|
||||
align_op = pow2_align(a->size + 1);
|
||||
break;
|
||||
case 4:
|
||||
if (a->size == MO_32) {
|
||||
/*
|
||||
* For VLD4.32, align = 1 is double alignment, align = 2 is
|
||||
* quad alignment; align = 3 is rejected above.
|
||||
*/
|
||||
align_op = pow2_align(a->size + a->align);
|
||||
} else {
|
||||
/* For VLD4.8 and VLD.16, we want quad alignment. */
|
||||
align_op = pow2_align(a->size + 2);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* For VLD3, the alignment field is zero and rejected above. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
mop = (mop & ~MO_AMASK) | align_op;
|
||||
}
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
addr = tcg_temp_new_i32();
|
||||
load_reg_var(s, addr, a->rn);
|
||||
/*
|
||||
* TODO: if we implemented alignment exceptions, we should check
|
||||
* addr against the alignment encoded in a->align here.
|
||||
*/
|
||||
|
||||
for (reg = 0; reg < nregs; reg++) {
|
||||
if (a->l) {
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
|
||||
gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
|
||||
neon_store_element(vd, a->reg_idx, a->size, tmp);
|
||||
} else { /* Store */
|
||||
neon_load_element(tmp, vd, a->reg_idx, a->size);
|
||||
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
|
||||
gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
|
||||
}
|
||||
vd += a->stride;
|
||||
tcg_gen_addi_i32(addr, addr, 1 << a->size);
|
||||
|
||||
/* Subsequent memory operations inherit alignment */
|
||||
mop &= ~MO_AMASK;
|
||||
}
|
||||
tcg_temp_free_i32(addr);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
|
Loading…
Reference in New Issue