target/arm: Add helpers for VFP register loads and stores

The current VFP code has two different idioms for
loading and storing from the VFP register file:
 1 using the gen_mov_F0_vreg() and similar functions,
   which load and store to a fixed set of TCG globals
   cpu_F0s, CPU_F0d, etc
 2 by direct calls to tcg_gen_ld_f64() and friends

We want to phase out idiom 1 (because the use of the
fixed globals is a relic of a much older version of TCG),
but idiom 2 is quite longwinded:
 tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg))
requires us to specify the 64-bitness twice, once in
the function name and once by passing 'true' to
vfp_reg_offset(). There's no guard against accidentally
passing the wrong flag.

Instead, let's move to a convention of accessing 64-bit
registers via the existing neon_load_reg64() and
neon_store_reg64(), and provide new neon_load_reg32()
and neon_store_reg32() for the 32-bit equivalents.

Implement the new functions and use them in the code in
translate-vfp.inc.c. We will convert the rest of the VFP
code as we do the decodetree conversion in subsequent
commits.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Peter Maydell 2019-06-11 16:39:44 +01:00
parent f7bbb8f31f
commit 160f3b64c5
2 changed files with 30 additions and 20 deletions

View File

@ -179,8 +179,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_gen_ext_i32_i64(nf, cpu_NF); tcg_gen_ext_i32_i64(nf, cpu_NF);
tcg_gen_ext_i32_i64(vf, cpu_VF); tcg_gen_ext_i32_i64(vf, cpu_VF);
tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg64(frn, rn);
tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(frm, rm);
switch (a->cc) { switch (a->cc) {
case 0: /* eq: Z */ case 0: /* eq: Z */
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
@ -207,7 +207,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_temp_free_i64(tmp); tcg_temp_free_i64(tmp);
break; break;
} }
tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(dest, rd);
tcg_temp_free_i64(frn); tcg_temp_free_i64(frn);
tcg_temp_free_i64(frm); tcg_temp_free_i64(frm);
tcg_temp_free_i64(dest); tcg_temp_free_i64(dest);
@ -226,8 +226,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
frn = tcg_temp_new_i32(); frn = tcg_temp_new_i32();
frm = tcg_temp_new_i32(); frm = tcg_temp_new_i32();
dest = tcg_temp_new_i32(); dest = tcg_temp_new_i32();
tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg32(frn, rn);
tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(frm, rm);
switch (a->cc) { switch (a->cc) {
case 0: /* eq: Z */ case 0: /* eq: Z */
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
@ -254,7 +254,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
break; break;
} }
tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(dest, rd);
tcg_temp_free_i32(frn); tcg_temp_free_i32(frn);
tcg_temp_free_i32(frm); tcg_temp_free_i32(frm);
tcg_temp_free_i32(dest); tcg_temp_free_i32(dest);
@ -298,14 +298,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
frm = tcg_temp_new_i64(); frm = tcg_temp_new_i64();
dest = tcg_temp_new_i64(); dest = tcg_temp_new_i64();
tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg64(frn, rn);
tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(frm, rm);
if (vmin) { if (vmin) {
gen_helper_vfp_minnumd(dest, frn, frm, fpst); gen_helper_vfp_minnumd(dest, frn, frm, fpst);
} else { } else {
gen_helper_vfp_maxnumd(dest, frn, frm, fpst); gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
} }
tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(dest, rd);
tcg_temp_free_i64(frn); tcg_temp_free_i64(frn);
tcg_temp_free_i64(frm); tcg_temp_free_i64(frm);
tcg_temp_free_i64(dest); tcg_temp_free_i64(dest);
@ -316,14 +316,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
frm = tcg_temp_new_i32(); frm = tcg_temp_new_i32();
dest = tcg_temp_new_i32(); dest = tcg_temp_new_i32();
tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg32(frn, rn);
tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(frm, rm);
if (vmin) { if (vmin) {
gen_helper_vfp_minnums(dest, frn, frm, fpst); gen_helper_vfp_minnums(dest, frn, frm, fpst);
} else { } else {
gen_helper_vfp_maxnums(dest, frn, frm, fpst); gen_helper_vfp_maxnums(dest, frn, frm, fpst);
} }
tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(dest, rd);
tcg_temp_free_i32(frn); tcg_temp_free_i32(frn);
tcg_temp_free_i32(frm); tcg_temp_free_i32(frm);
tcg_temp_free_i32(dest); tcg_temp_free_i32(dest);
@ -379,9 +379,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
TCGv_i64 tcg_res; TCGv_i64 tcg_res;
tcg_op = tcg_temp_new_i64(); tcg_op = tcg_temp_new_i64();
tcg_res = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64();
tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(tcg_op, rm);
gen_helper_rintd(tcg_res, tcg_op, fpst); gen_helper_rintd(tcg_res, tcg_op, fpst);
tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(tcg_res, rd);
tcg_temp_free_i64(tcg_op); tcg_temp_free_i64(tcg_op);
tcg_temp_free_i64(tcg_res); tcg_temp_free_i64(tcg_res);
} else { } else {
@ -389,9 +389,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
TCGv_i32 tcg_res; TCGv_i32 tcg_res;
tcg_op = tcg_temp_new_i32(); tcg_op = tcg_temp_new_i32();
tcg_res = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32();
tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(tcg_op, rm);
gen_helper_rints(tcg_res, tcg_op, fpst); gen_helper_rints(tcg_res, tcg_op, fpst);
tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(tcg_res, rd);
tcg_temp_free_i32(tcg_op); tcg_temp_free_i32(tcg_op);
tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_res);
} }
@ -440,14 +440,14 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
tcg_double = tcg_temp_new_i64(); tcg_double = tcg_temp_new_i64();
tcg_res = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64();
tcg_tmp = tcg_temp_new_i32(); tcg_tmp = tcg_temp_new_i32();
tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm)); neon_load_reg64(tcg_double, rm);
if (is_signed) { if (is_signed) {
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
} else { } else {
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
} }
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd)); neon_store_reg32(tcg_tmp, rd);
tcg_temp_free_i32(tcg_tmp); tcg_temp_free_i32(tcg_tmp);
tcg_temp_free_i64(tcg_res); tcg_temp_free_i64(tcg_res);
tcg_temp_free_i64(tcg_double); tcg_temp_free_i64(tcg_double);
@ -455,13 +455,13 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
TCGv_i32 tcg_single, tcg_res; TCGv_i32 tcg_single, tcg_res;
tcg_single = tcg_temp_new_i32(); tcg_single = tcg_temp_new_i32();
tcg_res = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32();
tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm)); neon_load_reg32(tcg_single, rm);
if (is_signed) { if (is_signed) {
gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
} else { } else {
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
} }
tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd)); neon_store_reg32(tcg_res, rd);
tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_res);
tcg_temp_free_i32(tcg_single); tcg_temp_free_i32(tcg_single);
} }

View File

@ -1689,6 +1689,16 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
} }
static inline void neon_load_reg32(TCGv_i32 var, int reg)
{
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
}
static inline void neon_store_reg32(TCGv_i32 var, int reg)
{
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
}
static TCGv_ptr vfp_reg_ptr(bool dp, int reg) static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();