mirror of https://github.com/xemu-project/xemu.git
target/riscv: rvv-1.0: Add Zve64f support for scalar fp insns
Zve64f extension requires the scalar processor to implement the F extension and implement all vector floating-point instructions for floating-point operands with EEW=32 (i.e., no widening floating-point operations). Signed-off-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-id: 20220118014522.13613-7-frank.chang@sifive.com Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
13dbc826fd
commit
40d78c85f6
|
@ -66,6 +66,17 @@ static bool require_scale_rvf(DisasContext *s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool require_zve64f(DisasContext *s)
|
||||||
|
{
|
||||||
|
/* RVV + Zve64f = RVV. */
|
||||||
|
if (has_ext(s, RVV)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Zve64f doesn't support FP64. (Section 18.2) */
|
||||||
|
return s->ext_zve64f ? s->sew <= MO_32 : true;
|
||||||
|
}
|
||||||
|
|
||||||
/* Destination vector register group cannot overlap source mask register. */
|
/* Destination vector register group cannot overlap source mask register. */
|
||||||
static bool require_vm(int vm, int vd)
|
static bool require_vm(int vm, int vd)
|
||||||
{
|
{
|
||||||
|
@ -2206,7 +2217,8 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a)
|
||||||
return require_rvv(s) &&
|
return require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
|
vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* OPFVV without GVEC IR */
|
/* OPFVV without GVEC IR */
|
||||||
|
@ -2286,7 +2298,8 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a)
|
||||||
return require_rvv(s) &&
|
return require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* OPFVF without GVEC IR */
|
/* OPFVF without GVEC IR */
|
||||||
|
@ -2503,7 +2516,8 @@ static bool opfv_check(DisasContext *s, arg_rmr *a)
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
/* OPFV instructions ignore vs1 check */
|
/* OPFV instructions ignore vs1 check */
|
||||||
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool do_opfv(DisasContext *s, arg_rmr *a,
|
static bool do_opfv(DisasContext *s, arg_rmr *a,
|
||||||
|
@ -2568,7 +2582,8 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
|
||||||
return require_rvv(s) &&
|
return require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
vext_check_mss(s, a->rd, a->rs1, a->rs2);
|
vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
|
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
|
||||||
|
@ -2581,7 +2596,8 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
|
||||||
return require_rvv(s) &&
|
return require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
vext_check_ms(s, a->rd, a->rs2);
|
vext_check_ms(s, a->rd, a->rs2) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
|
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
|
||||||
|
@ -2602,7 +2618,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
|
||||||
if (require_rvv(s) &&
|
if (require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s) &&
|
vext_check_isa_ill(s) &&
|
||||||
require_align(a->rd, s->lmul)) {
|
require_align(a->rd, s->lmul) &&
|
||||||
|
require_zve64f(s)) {
|
||||||
gen_set_rm(s, RISCV_FRM_DYN);
|
gen_set_rm(s, RISCV_FRM_DYN);
|
||||||
|
|
||||||
TCGv_i64 t1;
|
TCGv_i64 t1;
|
||||||
|
@ -3328,7 +3345,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
|
||||||
{
|
{
|
||||||
if (require_rvv(s) &&
|
if (require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s)) {
|
vext_check_isa_ill(s) &&
|
||||||
|
require_zve64f(s)) {
|
||||||
gen_set_rm(s, RISCV_FRM_DYN);
|
gen_set_rm(s, RISCV_FRM_DYN);
|
||||||
|
|
||||||
unsigned int ofs = (8 << s->sew);
|
unsigned int ofs = (8 << s->sew);
|
||||||
|
@ -3354,7 +3372,8 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
|
||||||
{
|
{
|
||||||
if (require_rvv(s) &&
|
if (require_rvv(s) &&
|
||||||
require_rvf(s) &&
|
require_rvf(s) &&
|
||||||
vext_check_isa_ill(s)) {
|
vext_check_isa_ill(s) &&
|
||||||
|
require_zve64f(s)) {
|
||||||
gen_set_rm(s, RISCV_FRM_DYN);
|
gen_set_rm(s, RISCV_FRM_DYN);
|
||||||
|
|
||||||
/* The instructions ignore LMUL and vector register group. */
|
/* The instructions ignore LMUL and vector register group. */
|
||||||
|
@ -3405,13 +3424,15 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
|
||||||
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
|
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
|
||||||
{
|
{
|
||||||
return slideup_check(s, a) &&
|
return slideup_check(s, a) &&
|
||||||
require_rvf(s);
|
require_rvf(s) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
|
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
|
||||||
{
|
{
|
||||||
return slidedown_check(s, a) &&
|
return slidedown_check(s, a) &&
|
||||||
require_rvf(s);
|
require_rvf(s) &&
|
||||||
|
require_zve64f(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
|
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
|
||||||
|
|
Loading…
Reference in New Issue