mirror of https://github.com/xemu-project/xemu.git
crypto: Add generic 16-bit carry-less multiply routines
Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
cec4090d94
commit
cf1b2cab83
|
@ -58,3 +58,24 @@ uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
|
|||
{
|
||||
return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
|
||||
}
|
||||
|
||||
uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
|
||||
{
|
||||
uint64_t r = 0;
|
||||
|
||||
n &= 0x0000ffff0000ffffull;
|
||||
m &= 0x0000ffff0000ffffull;
|
||||
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
|
||||
r ^= m & mask;
|
||||
n >>= 1;
|
||||
m <<= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
|
||||
{
|
||||
return clmul_16x2_even(n >> 16, m >> 16);
|
||||
}
|
||||
|
|
|
@ -38,4 +38,20 @@ uint64_t clmul_8x4_odd(uint64_t, uint64_t);
|
|||
*/
|
||||
uint64_t clmul_8x4_packed(uint32_t, uint32_t);
|
||||
|
||||
/**
|
||||
* clmul_16x2_even:
|
||||
*
|
||||
* Perform two 16x16->32 carry-less multiplies.
|
||||
* The odd words of the inputs are ignored.
|
||||
*/
|
||||
uint64_t clmul_16x2_even(uint64_t, uint64_t);
|
||||
|
||||
/**
|
||||
* clmul_16x2_odd:
|
||||
*
|
||||
* Perform two 16x16->32 carry-less multiplies.
|
||||
* The even words of the inputs are ignored.
|
||||
*/
|
||||
uint64_t clmul_16x2_odd(uint64_t, uint64_t);
|
||||
|
||||
#endif /* CRYPTO_CLMUL_H */
|
||||
|
|
Loading…
Reference in New Issue