mirror of https://github.com/xemu-project/xemu.git
target/arm: Use clmul_64
Use generic routine for 64-bit carry-less multiply. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
00f463b38a
commit
a50cfdf0be
|
@ -2003,28 +2003,14 @@ void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
|
|||
*/
|
||||
void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
|
||||
{
|
||||
intptr_t i, j, opr_sz = simd_oprsz(desc);
|
||||
intptr_t i, opr_sz = simd_oprsz(desc);
|
||||
intptr_t hi = simd_data(desc);
|
||||
uint64_t *d = vd, *n = vn, *m = vm;
|
||||
|
||||
for (i = 0; i < opr_sz / 8; i += 2) {
|
||||
uint64_t nn = n[i + hi];
|
||||
uint64_t mm = m[i + hi];
|
||||
uint64_t rhi = 0;
|
||||
uint64_t rlo = 0;
|
||||
|
||||
/* Bit 0 can only influence the low 64-bit result. */
|
||||
if (nn & 1) {
|
||||
rlo = mm;
|
||||
}
|
||||
|
||||
for (j = 1; j < 64; ++j) {
|
||||
uint64_t mask = -((nn >> j) & 1);
|
||||
rlo ^= (mm << j) & mask;
|
||||
rhi ^= (mm >> (64 - j)) & mask;
|
||||
}
|
||||
d[i] = rlo;
|
||||
d[i + 1] = rhi;
|
||||
Int128 r = clmul_64(n[i + hi], m[i + hi]);
|
||||
d[i] = int128_getlo(r);
|
||||
d[i + 1] = int128_gethi(r);
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue