mirror of https://github.com/xemu-project/xemu.git
target/loongarch: Implement xvmul/xvmuh/xvmulw{ev/od}
This patch includes: - XVMUL.{B/H/W/D}; - XVMUH.{B/H/W/D}[U]; - XVMULW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVMULW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D}. Signed-off-by: Song Gao <gaosong@loongson.cn> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20230914022645.1151356-26-gaosong@loongson.cn>
This commit is contained in:
parent
c09360faad
commit
342dc1cfcb
|
@ -1890,6 +1890,44 @@ INSN_LASX(xvmini_hu, vv_i)
|
||||||
INSN_LASX(xvmini_wu, vv_i)
|
INSN_LASX(xvmini_wu, vv_i)
|
||||||
INSN_LASX(xvmini_du, vv_i)
|
INSN_LASX(xvmini_du, vv_i)
|
||||||
|
|
||||||
|
INSN_LASX(xvmul_b, vvv)
|
||||||
|
INSN_LASX(xvmul_h, vvv)
|
||||||
|
INSN_LASX(xvmul_w, vvv)
|
||||||
|
INSN_LASX(xvmul_d, vvv)
|
||||||
|
INSN_LASX(xvmuh_b, vvv)
|
||||||
|
INSN_LASX(xvmuh_h, vvv)
|
||||||
|
INSN_LASX(xvmuh_w, vvv)
|
||||||
|
INSN_LASX(xvmuh_d, vvv)
|
||||||
|
INSN_LASX(xvmuh_bu, vvv)
|
||||||
|
INSN_LASX(xvmuh_hu, vvv)
|
||||||
|
INSN_LASX(xvmuh_wu, vvv)
|
||||||
|
INSN_LASX(xvmuh_du, vvv)
|
||||||
|
|
||||||
|
INSN_LASX(xvmulwev_h_b, vvv)
|
||||||
|
INSN_LASX(xvmulwev_w_h, vvv)
|
||||||
|
INSN_LASX(xvmulwev_d_w, vvv)
|
||||||
|
INSN_LASX(xvmulwev_q_d, vvv)
|
||||||
|
INSN_LASX(xvmulwod_h_b, vvv)
|
||||||
|
INSN_LASX(xvmulwod_w_h, vvv)
|
||||||
|
INSN_LASX(xvmulwod_d_w, vvv)
|
||||||
|
INSN_LASX(xvmulwod_q_d, vvv)
|
||||||
|
INSN_LASX(xvmulwev_h_bu, vvv)
|
||||||
|
INSN_LASX(xvmulwev_w_hu, vvv)
|
||||||
|
INSN_LASX(xvmulwev_d_wu, vvv)
|
||||||
|
INSN_LASX(xvmulwev_q_du, vvv)
|
||||||
|
INSN_LASX(xvmulwod_h_bu, vvv)
|
||||||
|
INSN_LASX(xvmulwod_w_hu, vvv)
|
||||||
|
INSN_LASX(xvmulwod_d_wu, vvv)
|
||||||
|
INSN_LASX(xvmulwod_q_du, vvv)
|
||||||
|
INSN_LASX(xvmulwev_h_bu_b, vvv)
|
||||||
|
INSN_LASX(xvmulwev_w_hu_h, vvv)
|
||||||
|
INSN_LASX(xvmulwev_d_wu_w, vvv)
|
||||||
|
INSN_LASX(xvmulwev_q_du_d, vvv)
|
||||||
|
INSN_LASX(xvmulwod_h_bu_b, vvv)
|
||||||
|
INSN_LASX(xvmulwod_w_hu_h, vvv)
|
||||||
|
INSN_LASX(xvmulwod_d_wu_w, vvv)
|
||||||
|
INSN_LASX(xvmulwod_q_du_d, vvv)
|
||||||
|
|
||||||
INSN_LASX(xvreplgr2vr_b, vr)
|
INSN_LASX(xvreplgr2vr_b, vr)
|
||||||
INSN_LASX(xvreplgr2vr_h, vr)
|
INSN_LASX(xvreplgr2vr_h, vr)
|
||||||
INSN_LASX(xvreplgr2vr_w, vr)
|
INSN_LASX(xvreplgr2vr_w, vr)
|
||||||
|
|
|
@ -1913,6 +1913,10 @@ TRANS(vmul_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_mul)
|
||||||
TRANS(vmul_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_mul)
|
TRANS(vmul_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_mul)
|
||||||
TRANS(vmul_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_mul)
|
TRANS(vmul_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_mul)
|
||||||
TRANS(vmul_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_mul)
|
TRANS(vmul_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_mul)
|
||||||
|
TRANS(xvmul_b, LASX, gvec_xxx, MO_8, tcg_gen_gvec_mul)
|
||||||
|
TRANS(xvmul_h, LASX, gvec_xxx, MO_16, tcg_gen_gvec_mul)
|
||||||
|
TRANS(xvmul_w, LASX, gvec_xxx, MO_32, tcg_gen_gvec_mul)
|
||||||
|
TRANS(xvmul_d, LASX, gvec_xxx, MO_64, tcg_gen_gvec_mul)
|
||||||
|
|
||||||
static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
|
static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
|
||||||
{
|
{
|
||||||
|
@ -1957,6 +1961,10 @@ TRANS(vmuh_b, LSX, gvec_vvv, MO_8, do_vmuh_s)
|
||||||
TRANS(vmuh_h, LSX, gvec_vvv, MO_16, do_vmuh_s)
|
TRANS(vmuh_h, LSX, gvec_vvv, MO_16, do_vmuh_s)
|
||||||
TRANS(vmuh_w, LSX, gvec_vvv, MO_32, do_vmuh_s)
|
TRANS(vmuh_w, LSX, gvec_vvv, MO_32, do_vmuh_s)
|
||||||
TRANS(vmuh_d, LSX, gvec_vvv, MO_64, do_vmuh_s)
|
TRANS(vmuh_d, LSX, gvec_vvv, MO_64, do_vmuh_s)
|
||||||
|
TRANS(xvmuh_b, LASX, gvec_xxx, MO_8, do_vmuh_s)
|
||||||
|
TRANS(xvmuh_h, LASX, gvec_xxx, MO_16, do_vmuh_s)
|
||||||
|
TRANS(xvmuh_w, LASX, gvec_xxx, MO_32, do_vmuh_s)
|
||||||
|
TRANS(xvmuh_d, LASX, gvec_xxx, MO_64, do_vmuh_s)
|
||||||
|
|
||||||
static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
|
static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
|
||||||
{
|
{
|
||||||
|
@ -2001,6 +2009,10 @@ TRANS(vmuh_bu, LSX, gvec_vvv, MO_8, do_vmuh_u)
|
||||||
TRANS(vmuh_hu, LSX, gvec_vvv, MO_16, do_vmuh_u)
|
TRANS(vmuh_hu, LSX, gvec_vvv, MO_16, do_vmuh_u)
|
||||||
TRANS(vmuh_wu, LSX, gvec_vvv, MO_32, do_vmuh_u)
|
TRANS(vmuh_wu, LSX, gvec_vvv, MO_32, do_vmuh_u)
|
||||||
TRANS(vmuh_du, LSX, gvec_vvv, MO_64, do_vmuh_u)
|
TRANS(vmuh_du, LSX, gvec_vvv, MO_64, do_vmuh_u)
|
||||||
|
TRANS(xvmuh_bu, LASX, gvec_xxx, MO_8, do_vmuh_u)
|
||||||
|
TRANS(xvmuh_hu, LASX, gvec_xxx, MO_16, do_vmuh_u)
|
||||||
|
TRANS(xvmuh_wu, LASX, gvec_xxx, MO_32, do_vmuh_u)
|
||||||
|
TRANS(xvmuh_du, LASX, gvec_xxx, MO_64, do_vmuh_u)
|
||||||
|
|
||||||
static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2073,6 +2085,9 @@ static void do_vmulwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwev_h_b, LSX, gvec_vvv, MO_8, do_vmulwev_s)
|
TRANS(vmulwev_h_b, LSX, gvec_vvv, MO_8, do_vmulwev_s)
|
||||||
TRANS(vmulwev_w_h, LSX, gvec_vvv, MO_16, do_vmulwev_s)
|
TRANS(vmulwev_w_h, LSX, gvec_vvv, MO_16, do_vmulwev_s)
|
||||||
TRANS(vmulwev_d_w, LSX, gvec_vvv, MO_32, do_vmulwev_s)
|
TRANS(vmulwev_d_w, LSX, gvec_vvv, MO_32, do_vmulwev_s)
|
||||||
|
TRANS(xvmulwev_h_b, LASX, gvec_xxx, MO_8, do_vmulwev_s)
|
||||||
|
TRANS(xvmulwev_w_h, LASX, gvec_xxx, MO_16, do_vmulwev_s)
|
||||||
|
TRANS(xvmulwev_d_w, LASX, gvec_xxx, MO_32, do_vmulwev_s)
|
||||||
|
|
||||||
static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
|
static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
|
||||||
TCGv_i64 arg1, TCGv_i64 arg2)
|
TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
|
@ -2080,37 +2095,62 @@ static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
|
||||||
tcg_gen_mulsu2_i64(rl, rh, arg2, arg1);
|
tcg_gen_mulsu2_i64(rl, rh, arg2, arg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define VMUL_Q(NAME, FN, idx1, idx2) \
|
static bool gen_vmul_q_vl(DisasContext *ctx,
|
||||||
static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
|
arg_vvv *a, uint32_t oprsz, int idx1, int idx2,
|
||||||
{ \
|
void (*func)(TCGv_i64, TCGv_i64,
|
||||||
TCGv_i64 rh, rl, arg1, arg2; \
|
TCGv_i64, TCGv_i64))
|
||||||
\
|
{
|
||||||
if (!avail_LSX(ctx)) { \
|
TCGv_i64 rh, rl, arg1, arg2;
|
||||||
return false; \
|
int i;
|
||||||
} \
|
|
||||||
\
|
if (!check_vec(ctx, oprsz)) {
|
||||||
rh = tcg_temp_new_i64(); \
|
return true;
|
||||||
rl = tcg_temp_new_i64(); \
|
}
|
||||||
arg1 = tcg_temp_new_i64(); \
|
|
||||||
arg2 = tcg_temp_new_i64(); \
|
rh = tcg_temp_new_i64();
|
||||||
\
|
rl = tcg_temp_new_i64();
|
||||||
get_vreg64(arg1, a->vj, idx1); \
|
arg1 = tcg_temp_new_i64();
|
||||||
get_vreg64(arg2, a->vk, idx2); \
|
arg2 = tcg_temp_new_i64();
|
||||||
\
|
|
||||||
tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \
|
for (i = 0; i < oprsz / 16; i++) {
|
||||||
\
|
get_vreg64(arg1, a->vj, 2 * i + idx1);
|
||||||
set_vreg64(rh, a->vd, 1); \
|
get_vreg64(arg2, a->vk, 2 * i + idx2);
|
||||||
set_vreg64(rl, a->vd, 0); \
|
|
||||||
\
|
func(rl, rh, arg1, arg2);
|
||||||
return true; \
|
|
||||||
|
set_vreg64(rh, a->vd, 2 * i + 1);
|
||||||
|
set_vreg64(rl, a->vd, 2 * i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
VMUL_Q(vmulwev_q_d, muls2, 0, 0)
|
static bool gen_vmul_q(DisasContext *ctx, arg_vvv *a, int idx1, int idx2,
|
||||||
VMUL_Q(vmulwod_q_d, muls2, 1, 1)
|
void (*func)(TCGv_i64, TCGv_i64,
|
||||||
VMUL_Q(vmulwev_q_du, mulu2, 0, 0)
|
TCGv_i64, TCGv_i64))
|
||||||
VMUL_Q(vmulwod_q_du, mulu2, 1, 1)
|
{
|
||||||
VMUL_Q(vmulwev_q_du_d, mulus2, 0, 0)
|
return gen_vmul_q_vl(ctx, a, 16, idx1, idx2, func);
|
||||||
VMUL_Q(vmulwod_q_du_d, mulus2, 1, 1)
|
}
|
||||||
|
|
||||||
|
static bool gen_xvmul_q(DisasContext *ctx, arg_vvv *a, int idx1, int idx2,
|
||||||
|
void (*func)(TCGv_i64, TCGv_i64,
|
||||||
|
TCGv_i64, TCGv_i64))
|
||||||
|
{
|
||||||
|
return gen_vmul_q_vl(ctx, a, 32, idx1, idx2, func);
|
||||||
|
}
|
||||||
|
|
||||||
|
TRANS(vmulwev_q_d, LSX, gen_vmul_q, 0, 0, tcg_gen_muls2_i64)
|
||||||
|
TRANS(vmulwod_q_d, LSX, gen_vmul_q, 1, 1, tcg_gen_muls2_i64)
|
||||||
|
TRANS(vmulwev_q_du, LSX, gen_vmul_q, 0, 0, tcg_gen_mulu2_i64)
|
||||||
|
TRANS(vmulwod_q_du, LSX, gen_vmul_q, 1, 1, tcg_gen_mulu2_i64)
|
||||||
|
TRANS(vmulwev_q_du_d, LSX, gen_vmul_q, 0, 0, tcg_gen_mulus2_i64)
|
||||||
|
TRANS(vmulwod_q_du_d, LSX, gen_vmul_q, 1, 1, tcg_gen_mulus2_i64)
|
||||||
|
TRANS(xvmulwev_q_d, LASX, gen_xvmul_q, 0, 0, tcg_gen_muls2_i64)
|
||||||
|
TRANS(xvmulwod_q_d, LASX, gen_xvmul_q, 1, 1, tcg_gen_muls2_i64)
|
||||||
|
TRANS(xvmulwev_q_du, LASX, gen_xvmul_q, 0, 0, tcg_gen_mulu2_i64)
|
||||||
|
TRANS(xvmulwod_q_du, LASX, gen_xvmul_q, 1, 1, tcg_gen_mulu2_i64)
|
||||||
|
TRANS(xvmulwev_q_du_d, LASX, gen_xvmul_q, 0, 0, tcg_gen_mulus2_i64)
|
||||||
|
TRANS(xvmulwod_q_du_d, LASX, gen_xvmul_q, 1, 1, tcg_gen_mulus2_i64)
|
||||||
|
|
||||||
static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2181,6 +2221,9 @@ static void do_vmulwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwod_h_b, LSX, gvec_vvv, MO_8, do_vmulwod_s)
|
TRANS(vmulwod_h_b, LSX, gvec_vvv, MO_8, do_vmulwod_s)
|
||||||
TRANS(vmulwod_w_h, LSX, gvec_vvv, MO_16, do_vmulwod_s)
|
TRANS(vmulwod_w_h, LSX, gvec_vvv, MO_16, do_vmulwod_s)
|
||||||
TRANS(vmulwod_d_w, LSX, gvec_vvv, MO_32, do_vmulwod_s)
|
TRANS(vmulwod_d_w, LSX, gvec_vvv, MO_32, do_vmulwod_s)
|
||||||
|
TRANS(xvmulwod_h_b, LASX, gvec_xxx, MO_8, do_vmulwod_s)
|
||||||
|
TRANS(xvmulwod_w_h, LASX, gvec_xxx, MO_16, do_vmulwod_s)
|
||||||
|
TRANS(xvmulwod_d_w, LASX, gvec_xxx, MO_32, do_vmulwod_s)
|
||||||
|
|
||||||
static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2251,6 +2294,9 @@ static void do_vmulwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwev_h_bu, LSX, gvec_vvv, MO_8, do_vmulwev_u)
|
TRANS(vmulwev_h_bu, LSX, gvec_vvv, MO_8, do_vmulwev_u)
|
||||||
TRANS(vmulwev_w_hu, LSX, gvec_vvv, MO_16, do_vmulwev_u)
|
TRANS(vmulwev_w_hu, LSX, gvec_vvv, MO_16, do_vmulwev_u)
|
||||||
TRANS(vmulwev_d_wu, LSX, gvec_vvv, MO_32, do_vmulwev_u)
|
TRANS(vmulwev_d_wu, LSX, gvec_vvv, MO_32, do_vmulwev_u)
|
||||||
|
TRANS(xvmulwev_h_bu, LASX, gvec_xxx, MO_8, do_vmulwev_u)
|
||||||
|
TRANS(xvmulwev_w_hu, LASX, gvec_xxx, MO_16, do_vmulwev_u)
|
||||||
|
TRANS(xvmulwev_d_wu, LASX, gvec_xxx, MO_32, do_vmulwev_u)
|
||||||
|
|
||||||
static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2321,6 +2367,9 @@ static void do_vmulwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwod_h_bu, LSX, gvec_vvv, MO_8, do_vmulwod_u)
|
TRANS(vmulwod_h_bu, LSX, gvec_vvv, MO_8, do_vmulwod_u)
|
||||||
TRANS(vmulwod_w_hu, LSX, gvec_vvv, MO_16, do_vmulwod_u)
|
TRANS(vmulwod_w_hu, LSX, gvec_vvv, MO_16, do_vmulwod_u)
|
||||||
TRANS(vmulwod_d_wu, LSX, gvec_vvv, MO_32, do_vmulwod_u)
|
TRANS(vmulwod_d_wu, LSX, gvec_vvv, MO_32, do_vmulwod_u)
|
||||||
|
TRANS(xvmulwod_h_bu, LASX, gvec_xxx, MO_8, do_vmulwod_u)
|
||||||
|
TRANS(xvmulwod_w_hu, LASX, gvec_xxx, MO_16, do_vmulwod_u)
|
||||||
|
TRANS(xvmulwod_d_wu, LASX, gvec_xxx, MO_32, do_vmulwod_u)
|
||||||
|
|
||||||
static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2393,6 +2442,9 @@ static void do_vmulwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwev_u_s)
|
TRANS(vmulwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwev_u_s)
|
||||||
TRANS(vmulwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwev_u_s)
|
TRANS(vmulwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwev_u_s)
|
||||||
TRANS(vmulwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwev_u_s)
|
TRANS(vmulwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwev_u_s)
|
||||||
|
TRANS(xvmulwev_h_bu_b, LASX, gvec_xxx, MO_8, do_vmulwev_u_s)
|
||||||
|
TRANS(xvmulwev_w_hu_h, LASX, gvec_xxx, MO_16, do_vmulwev_u_s)
|
||||||
|
TRANS(xvmulwev_d_wu_w, LASX, gvec_xxx, MO_32, do_vmulwev_u_s)
|
||||||
|
|
||||||
static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
@ -2462,6 +2514,9 @@ static void do_vmulwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
|
||||||
TRANS(vmulwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwod_u_s)
|
TRANS(vmulwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwod_u_s)
|
||||||
TRANS(vmulwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwod_u_s)
|
TRANS(vmulwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwod_u_s)
|
||||||
TRANS(vmulwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwod_u_s)
|
TRANS(vmulwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwod_u_s)
|
||||||
|
TRANS(xvmulwod_h_bu_b, LASX, gvec_xxx, MO_8, do_vmulwod_u_s)
|
||||||
|
TRANS(xvmulwod_w_hu_h, LASX, gvec_xxx, MO_16, do_vmulwod_u_s)
|
||||||
|
TRANS(xvmulwod_d_wu_w, LASX, gvec_xxx, MO_32, do_vmulwod_u_s)
|
||||||
|
|
||||||
static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1473,6 +1473,44 @@ xvmini_hu 0111 01101001 01101 ..... ..... ..... @vv_ui5
|
||||||
xvmini_wu 0111 01101001 01110 ..... ..... ..... @vv_ui5
|
xvmini_wu 0111 01101001 01110 ..... ..... ..... @vv_ui5
|
||||||
xvmini_du 0111 01101001 01111 ..... ..... ..... @vv_ui5
|
xvmini_du 0111 01101001 01111 ..... ..... ..... @vv_ui5
|
||||||
|
|
||||||
|
xvmul_b 0111 01001000 01000 ..... ..... ..... @vvv
|
||||||
|
xvmul_h 0111 01001000 01001 ..... ..... ..... @vvv
|
||||||
|
xvmul_w 0111 01001000 01010 ..... ..... ..... @vvv
|
||||||
|
xvmul_d 0111 01001000 01011 ..... ..... ..... @vvv
|
||||||
|
xvmuh_b 0111 01001000 01100 ..... ..... ..... @vvv
|
||||||
|
xvmuh_h 0111 01001000 01101 ..... ..... ..... @vvv
|
||||||
|
xvmuh_w 0111 01001000 01110 ..... ..... ..... @vvv
|
||||||
|
xvmuh_d 0111 01001000 01111 ..... ..... ..... @vvv
|
||||||
|
xvmuh_bu 0111 01001000 10000 ..... ..... ..... @vvv
|
||||||
|
xvmuh_hu 0111 01001000 10001 ..... ..... ..... @vvv
|
||||||
|
xvmuh_wu 0111 01001000 10010 ..... ..... ..... @vvv
|
||||||
|
xvmuh_du 0111 01001000 10011 ..... ..... ..... @vvv
|
||||||
|
|
||||||
|
xvmulwev_h_b 0111 01001001 00000 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_w_h 0111 01001001 00001 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_d_w 0111 01001001 00010 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_q_d 0111 01001001 00011 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_h_b 0111 01001001 00100 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_w_h 0111 01001001 00101 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_d_w 0111 01001001 00110 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_q_d 0111 01001001 00111 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_h_bu 0111 01001001 10000 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_w_hu 0111 01001001 10001 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_d_wu 0111 01001001 10010 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_q_du 0111 01001001 10011 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_h_bu 0111 01001001 10100 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_w_hu 0111 01001001 10101 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_d_wu 0111 01001001 10110 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_q_du 0111 01001001 10111 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_h_bu_b 0111 01001010 00000 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_w_hu_h 0111 01001010 00001 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_d_wu_w 0111 01001010 00010 ..... ..... ..... @vvv
|
||||||
|
xvmulwev_q_du_d 0111 01001010 00011 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_h_bu_b 0111 01001010 00100 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_w_hu_h 0111 01001010 00101 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_d_wu_w 0111 01001010 00110 ..... ..... ..... @vvv
|
||||||
|
xvmulwod_q_du_d 0111 01001010 00111 ..... ..... ..... @vvv
|
||||||
|
|
||||||
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
|
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
|
||||||
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
|
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
|
||||||
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
|
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
|
||||||
|
|
|
@ -447,50 +447,53 @@ VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
|
||||||
VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
|
VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
|
||||||
VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
|
VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
|
||||||
|
|
||||||
#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
|
#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
|
||||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
|
||||||
{ \
|
{ \
|
||||||
int i; \
|
int i; \
|
||||||
VReg *Vd = (VReg *)vd; \
|
VReg *Vd = (VReg *)vd; \
|
||||||
VReg *Vj = (VReg *)vj; \
|
VReg *Vj = (VReg *)vj; \
|
||||||
VReg *Vk = (VReg *)vk; \
|
VReg *Vk = (VReg *)vk; \
|
||||||
typedef __typeof(Vd->E1(0)) T; \
|
typedef __typeof(Vd->E1(0)) T; \
|
||||||
\
|
int oprsz = simd_oprsz(desc); \
|
||||||
for (i = 0; i < LSX_LEN/BIT; i++) { \
|
\
|
||||||
Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
|
for (i = 0; i < oprsz / (BIT / 8); i++) { \
|
||||||
} \
|
Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
|
||||||
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v)
|
void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||||
{
|
{
|
||||||
uint64_t l, h1, h2;
|
int i;
|
||||||
|
uint64_t l, h;
|
||||||
VReg *Vd = (VReg *)vd;
|
VReg *Vd = (VReg *)vd;
|
||||||
VReg *Vj = (VReg *)vj;
|
VReg *Vj = (VReg *)vj;
|
||||||
VReg *Vk = (VReg *)vk;
|
VReg *Vk = (VReg *)vk;
|
||||||
|
int oprsz = simd_oprsz(desc);
|
||||||
|
|
||||||
muls64(&l, &h1, Vj->D(0), Vk->D(0));
|
for (i = 0; i < oprsz / 8; i++) {
|
||||||
muls64(&l, &h2, Vj->D(1), Vk->D(1));
|
muls64(&l, &h, Vj->D(i), Vk->D(i));
|
||||||
|
Vd->D(i) = h;
|
||||||
Vd->D(0) = h1;
|
}
|
||||||
Vd->D(1) = h2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
|
DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
|
||||||
DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
|
DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
|
||||||
DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
|
DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
|
||||||
|
|
||||||
void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v)
|
void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||||
{
|
{
|
||||||
uint64_t l, h1, h2;
|
int i;
|
||||||
|
uint64_t l, h;
|
||||||
VReg *Vd = (VReg *)vd;
|
VReg *Vd = (VReg *)vd;
|
||||||
VReg *Vj = (VReg *)vj;
|
VReg *Vj = (VReg *)vj;
|
||||||
VReg *Vk = (VReg *)vk;
|
VReg *Vk = (VReg *)vk;
|
||||||
|
int oprsz = simd_oprsz(desc);
|
||||||
|
|
||||||
mulu64(&l, &h1, Vj->D(0), Vk->D(0));
|
for (i = 0; i < oprsz / 8; i++) {
|
||||||
mulu64(&l, &h2, Vj->D(1), Vk->D(1));
|
mulu64(&l, &h, Vj->D(i), Vk->D(i));
|
||||||
|
Vd->D(i) = h;
|
||||||
Vd->D(0) = h1;
|
}
|
||||||
Vd->D(1) = h2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
|
DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
|
||||||
|
|
Loading…
Reference in New Issue