target/i386: Dot product AVX helper prep

Make the dpps and dppd helpers AVX-ready

I can't see any obvious reason why dppd shouldn't work on 256 bit ymm
registers, but both AMD and Intel agree that it's xmm only.

Signed-off-by: Paul Brook <paul@nowt.org>
Message-Id: <20220424220204.2493824-17-paul@nowt.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paul Brook 2022-04-24 23:01:38 +01:00 committed by Paolo Bonzini
parent cbf4ad5498
commit 6f218d6e99
1 changed files with 45 additions and 35 deletions

View File

@ -1903,55 +1903,64 @@ SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mask)
{ {
Reg *v = d;
float32 prod1, prod2, temp2, temp3, temp4; float32 prod1, prod2, temp2, temp3, temp4;
int i;
/* for (i = 0; i < 2 << SHIFT; i += 4) {
* We must evaluate (A+B)+(C+D), not ((A+B)+C)+D /*
* to correctly round the intermediate results * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
*/ * to correctly round the intermediate results
if (mask & (1 << 4)) { */
prod1 = float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status); if (mask & (1 << 4)) {
} else { prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status);
prod1 = float32_zero; } else {
} prod1 = float32_zero;
if (mask & (1 << 5)) { }
prod2 = float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status); if (mask & (1 << 5)) {
} else { prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status);
prod2 = float32_zero; } else {
} prod2 = float32_zero;
temp2 = float32_add(prod1, prod2, &env->sse_status); }
if (mask & (1 << 6)) { temp2 = float32_add(prod1, prod2, &env->sse_status);
prod1 = float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status); if (mask & (1 << 6)) {
} else { prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status);
prod1 = float32_zero; } else {
} prod1 = float32_zero;
if (mask & (1 << 7)) { }
prod2 = float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status); if (mask & (1 << 7)) {
} else { prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status);
prod2 = float32_zero; } else {
} prod2 = float32_zero;
temp3 = float32_add(prod1, prod2, &env->sse_status); }
temp4 = float32_add(temp2, temp3, &env->sse_status); temp3 = float32_add(prod1, prod2, &env->sse_status);
temp4 = float32_add(temp2, temp3, &env->sse_status);
d->ZMM_S(0) = (mask & (1 << 0)) ? temp4 : float32_zero; d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero;
d->ZMM_S(1) = (mask & (1 << 1)) ? temp4 : float32_zero; d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero;
d->ZMM_S(2) = (mask & (1 << 2)) ? temp4 : float32_zero; d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero;
d->ZMM_S(3) = (mask & (1 << 3)) ? temp4 : float32_zero; d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero;
}
} }
void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) #if SHIFT == 1
/* Oddly, there is no ymm version of dppd */
void glue(helper_dppd, SUFFIX)(CPUX86State *env,
Reg *d, Reg *s, uint32_t mask)
{ {
Reg *v = d;
float64 prod1, prod2, temp2; float64 prod1, prod2, temp2;
if (mask & (1 << 4)) { if (mask & (1 << 4)) {
prod1 = float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status); prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
} else { } else {
prod1 = float64_zero; prod1 = float64_zero;
} }
if (mask & (1 << 5)) { if (mask & (1 << 5)) {
prod2 = float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status); prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
} else { } else {
prod2 = float64_zero; prod2 = float64_zero;
} }
@ -1959,6 +1968,7 @@ void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero; d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero;
d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero; d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero;
} }
#endif
void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t offset) uint32_t offset)