util/bufferiszero: Use i386 host/cpuinfo.h

Use cpuinfo_init() during init_accel(), and the variable cpuinfo
during test_buffer_is_zero_next_accel().  Adjust the logic that
cycles through the set of accelerators for testing.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-17 19:10:59 -07:00
parent dbedadbaad
commit 51f4d916b5
1 changed files with 46 additions and 81 deletions

View File

@ -24,6 +24,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/cutils.h" #include "qemu/cutils.h"
#include "qemu/bswap.h" #include "qemu/bswap.h"
#include "host/cpuinfo.h"
static bool static bool
buffer_zero_int(const void *buf, size_t len) buffer_zero_int(const void *buf, size_t len)
@ -184,111 +185,75 @@ buffer_zero_avx512(const void *buf, size_t len)
} }
#endif /* CONFIG_AVX512F_OPT */ #endif /* CONFIG_AVX512F_OPT */
/*
/* Note that for test_buffer_is_zero_next_accel, the most preferred * Make sure that these variables are appropriately initialized when
* ISA must have the least significant bit.
*/
#define CACHE_AVX512F 1
#define CACHE_AVX2 2
#define CACHE_SSE4 4
#define CACHE_SSE2 8
/* Make sure that these variables are appropriately initialized when
* SSE2 is enabled on the compiler command-line, but the compiler is * SSE2 is enabled on the compiler command-line, but the compiler is
* too old to support CONFIG_AVX2_OPT. * too old to support CONFIG_AVX2_OPT.
*/ */
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
# define INIT_CACHE 0 # define INIT_USED 0
# define INIT_ACCEL buffer_zero_int # define INIT_LENGTH 0
# define INIT_ACCEL buffer_zero_int
#else #else
# ifndef __SSE2__ # ifndef __SSE2__
# error "ISA selection confusion" # error "ISA selection confusion"
# endif # endif
# define INIT_CACHE CACHE_SSE2 # define INIT_USED CPUINFO_SSE2
# define INIT_ACCEL buffer_zero_sse2 # define INIT_LENGTH 64
# define INIT_ACCEL buffer_zero_sse2
#endif #endif
static unsigned cpuid_cache = INIT_CACHE; static unsigned used_accel = INIT_USED;
static unsigned length_to_accel = INIT_LENGTH;
static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL; static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
static int length_to_accel = 64;
static void init_accel(unsigned cache) static unsigned __attribute__((noinline))
select_accel_cpuinfo(unsigned info)
{ {
bool (*fn)(const void *, size_t) = buffer_zero_int; /* Array is sorted in order of algorithm preference. */
if (cache & CACHE_SSE2) { static const struct {
fn = buffer_zero_sse2; unsigned bit;
length_to_accel = 64; unsigned len;
} bool (*fn)(const void *, size_t);
#ifdef CONFIG_AVX2_OPT } all[] = {
if (cache & CACHE_SSE4) {
fn = buffer_zero_sse4;
length_to_accel = 64;
}
if (cache & CACHE_AVX2) {
fn = buffer_zero_avx2;
length_to_accel = 128;
}
#endif
#ifdef CONFIG_AVX512F_OPT #ifdef CONFIG_AVX512F_OPT
if (cache & CACHE_AVX512F) { { CPUINFO_AVX512F, 256, buffer_zero_avx512 },
fn = buffer_zero_avx512;
length_to_accel = 256;
}
#endif #endif
buffer_accel = fn; #ifdef CONFIG_AVX2_OPT
{ CPUINFO_AVX2, 128, buffer_zero_avx2 },
{ CPUINFO_SSE4, 64, buffer_zero_sse4 },
#endif
{ CPUINFO_SSE2, 64, buffer_zero_sse2 },
{ CPUINFO_ALWAYS, 0, buffer_zero_int },
};
for (unsigned i = 0; i < ARRAY_SIZE(all); ++i) {
if (info & all[i].bit) {
length_to_accel = all[i].len;
buffer_accel = all[i].fn;
return all[i].bit;
}
}
return 0;
} }
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
#include "qemu/cpuid.h" static void __attribute__((constructor)) init_accel(void)
static void __attribute__((constructor)) init_cpuid_cache(void)
{ {
unsigned max = __get_cpuid_max(0, NULL); used_accel = select_accel_cpuinfo(cpuinfo_init());
int a, b, c, d;
unsigned cache = 0;
if (max >= 1) {
__cpuid(1, a, b, c, d);
if (d & bit_SSE2) {
cache |= CACHE_SSE2;
}
if (c & bit_SSE4_1) {
cache |= CACHE_SSE4;
}
/* We must check that AVX is not just available, but usable. */
if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) {
unsigned bv = xgetbv_low(0);
__cpuid_count(7, 0, a, b, c, d);
if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) {
cache |= CACHE_AVX2;
}
/* 0xe6:
* XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15
* and ZMM16-ZMM31 state are enabled by OS)
* XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS)
*/
if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512F)) {
cache |= CACHE_AVX512F;
}
}
}
cpuid_cache = cache;
init_accel(cache);
} }
#endif /* CONFIG_AVX2_OPT */ #endif /* CONFIG_AVX2_OPT */
bool test_buffer_is_zero_next_accel(void) bool test_buffer_is_zero_next_accel(void)
{ {
/* If no bits set, we just tested buffer_zero_int, and there /*
are no more acceleration options to test. */ * Accumulate the accelerators that we've already tested, and
if (cpuid_cache == 0) { * remove them from the set to test this round. We'll get back
return false; * a zero from select_accel_cpuinfo when there are no more.
} */
/* Disable the accelerator we used before and select a new one. */ unsigned used = select_accel_cpuinfo(cpuinfo & ~used_accel);
cpuid_cache &= cpuid_cache - 1; used_accel |= used;
init_accel(cpuid_cache); return used;
return true;
} }
static bool select_accel_fn(const void *buf, size_t len) static bool select_accel_fn(const void *buf, size_t len)