mirror of https://github.com/xemu-project/xemu.git
i386/cpu: Use CPUCacheInfo.share_level to encode CPUID[4]
CPUID[4].EAX[bits 25:14] is used to represent the cache topology for Intel CPUs. After cache models have topology information, we can use CPUCacheInfo.share_level to decide which topology level to be encoded into CPUID[4].EAX[bits 25:14]. And since with the helper max_processor_ids_for_cache(), the filed CPUID[4].EAX[bits 25:14] (original virable "num_apic_ids") is parsed based on cpu topology levels, which are verified when parsing -smp, it's no need to check this value by "assert(num_apic_ids > 0)" again, so remove this assert(). Additionally, wrap the encoding of CPUID[4].EAX[bits 31:26] into a helper to make the code cleaner. Tested-by: Yongwei Ma <yongwei.ma@intel.com> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Tested-by: Babu Moger <babu.moger@amd.com> Message-ID: <20240424154929.1487382-21-zhao1.liu@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9fcba76ab9
commit
f602eb925a
|
@ -235,22 +235,53 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
|
||||||
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
|
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
|
||||||
0 /* Invalid value */)
|
0 /* Invalid value */)
|
||||||
|
|
||||||
|
static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
|
||||||
|
enum CPUTopoLevel share_level)
|
||||||
|
{
|
||||||
|
uint32_t num_ids = 0;
|
||||||
|
|
||||||
|
switch (share_level) {
|
||||||
|
case CPU_TOPO_LEVEL_CORE:
|
||||||
|
num_ids = 1 << apicid_core_offset(topo_info);
|
||||||
|
break;
|
||||||
|
case CPU_TOPO_LEVEL_DIE:
|
||||||
|
num_ids = 1 << apicid_die_offset(topo_info);
|
||||||
|
break;
|
||||||
|
case CPU_TOPO_LEVEL_PACKAGE:
|
||||||
|
num_ids = 1 << apicid_pkg_offset(topo_info);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/*
|
||||||
|
* Currently there is no use case for SMT and MODULE, so use
|
||||||
|
* assert directly to facilitate debugging.
|
||||||
|
*/
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
|
return num_ids - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
|
||||||
|
{
|
||||||
|
uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) -
|
||||||
|
apicid_core_offset(topo_info));
|
||||||
|
return num_cores - 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* Encode cache info for CPUID[4] */
|
/* Encode cache info for CPUID[4] */
|
||||||
static void encode_cache_cpuid4(CPUCacheInfo *cache,
|
static void encode_cache_cpuid4(CPUCacheInfo *cache,
|
||||||
int num_apic_ids, int num_cores,
|
X86CPUTopoInfo *topo_info,
|
||||||
uint32_t *eax, uint32_t *ebx,
|
uint32_t *eax, uint32_t *ebx,
|
||||||
uint32_t *ecx, uint32_t *edx)
|
uint32_t *ecx, uint32_t *edx)
|
||||||
{
|
{
|
||||||
assert(cache->size == cache->line_size * cache->associativity *
|
assert(cache->size == cache->line_size * cache->associativity *
|
||||||
cache->partitions * cache->sets);
|
cache->partitions * cache->sets);
|
||||||
|
|
||||||
assert(num_apic_ids > 0);
|
|
||||||
*eax = CACHE_TYPE(cache->type) |
|
*eax = CACHE_TYPE(cache->type) |
|
||||||
CACHE_LEVEL(cache->level) |
|
CACHE_LEVEL(cache->level) |
|
||||||
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
|
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
|
||||||
((num_cores - 1) << 26) |
|
(max_core_ids_in_package(topo_info) << 26) |
|
||||||
((num_apic_ids - 1) << 14);
|
(max_thread_ids_for_cache(topo_info, cache->share_level) << 14);
|
||||||
|
|
||||||
assert(cache->line_size > 0);
|
assert(cache->line_size > 0);
|
||||||
assert(cache->partitions > 0);
|
assert(cache->partitions > 0);
|
||||||
|
@ -6392,18 +6423,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||||
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
|
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
|
||||||
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
|
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
|
||||||
break;
|
break;
|
||||||
case 4: {
|
case 4:
|
||||||
/*
|
|
||||||
* CPUID.04H:EAX[bits 25:14]: Maximum number of addressable IDs for
|
|
||||||
* logical processors sharing this cache.
|
|
||||||
*/
|
|
||||||
int addressable_threads_width;
|
|
||||||
/*
|
|
||||||
* CPUID.04H:EAX[bits 31:26]: Maximum number of addressable IDs for
|
|
||||||
* processor cores in the physical package.
|
|
||||||
*/
|
|
||||||
int addressable_cores_width;
|
|
||||||
|
|
||||||
/* cache info: needed for Core compatibility */
|
/* cache info: needed for Core compatibility */
|
||||||
if (cpu->cache_info_passthrough) {
|
if (cpu->cache_info_passthrough) {
|
||||||
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
|
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
|
||||||
|
@ -6415,59 +6435,48 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||||
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
|
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
|
||||||
|
|
||||||
if (cores_per_pkg > 1) {
|
if (cores_per_pkg > 1) {
|
||||||
addressable_cores_width = apicid_pkg_offset(&topo_info) -
|
|
||||||
apicid_core_offset(&topo_info);
|
|
||||||
|
|
||||||
*eax &= ~0xFC000000;
|
*eax &= ~0xFC000000;
|
||||||
*eax |= ((1 << addressable_cores_width) - 1) << 26;
|
*eax |= max_core_ids_in_package(&topo_info) << 26;
|
||||||
}
|
}
|
||||||
if (host_vcpus_per_cache > threads_per_pkg) {
|
if (host_vcpus_per_cache > threads_per_pkg) {
|
||||||
/* Share the cache at package level. */
|
|
||||||
addressable_threads_width = apicid_pkg_offset(&topo_info);
|
|
||||||
|
|
||||||
*eax &= ~0x3FFC000;
|
*eax &= ~0x3FFC000;
|
||||||
*eax |= ((1 << addressable_threads_width) - 1) << 14;
|
|
||||||
|
/* Share the cache at package level. */
|
||||||
|
*eax |= max_thread_ids_for_cache(&topo_info,
|
||||||
|
CPU_TOPO_LEVEL_PACKAGE) << 14;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
|
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
|
||||||
*eax = *ebx = *ecx = *edx = 0;
|
*eax = *ebx = *ecx = *edx = 0;
|
||||||
} else {
|
} else {
|
||||||
*eax = 0;
|
*eax = 0;
|
||||||
addressable_cores_width = apicid_pkg_offset(&topo_info) -
|
|
||||||
apicid_core_offset(&topo_info);
|
|
||||||
|
|
||||||
switch (count) {
|
switch (count) {
|
||||||
case 0: /* L1 dcache info */
|
case 0: /* L1 dcache info */
|
||||||
addressable_threads_width = cpu->l1_cache_per_core
|
|
||||||
? apicid_core_offset(&topo_info)
|
|
||||||
: 0;
|
|
||||||
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
|
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
|
||||||
(1 << addressable_threads_width),
|
&topo_info,
|
||||||
(1 << addressable_cores_width),
|
|
||||||
eax, ebx, ecx, edx);
|
eax, ebx, ecx, edx);
|
||||||
|
if (!cpu->l1_cache_per_core) {
|
||||||
|
*eax &= ~MAKE_64BIT_MASK(14, 12);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 1: /* L1 icache info */
|
case 1: /* L1 icache info */
|
||||||
addressable_threads_width = cpu->l1_cache_per_core
|
|
||||||
? apicid_core_offset(&topo_info)
|
|
||||||
: 0;
|
|
||||||
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
|
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
|
||||||
(1 << addressable_threads_width),
|
&topo_info,
|
||||||
(1 << addressable_cores_width),
|
|
||||||
eax, ebx, ecx, edx);
|
eax, ebx, ecx, edx);
|
||||||
|
if (!cpu->l1_cache_per_core) {
|
||||||
|
*eax &= ~MAKE_64BIT_MASK(14, 12);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 2: /* L2 cache info */
|
case 2: /* L2 cache info */
|
||||||
addressable_threads_width = apicid_core_offset(&topo_info);
|
|
||||||
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
|
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
|
||||||
(1 << addressable_threads_width),
|
&topo_info,
|
||||||
(1 << addressable_cores_width),
|
|
||||||
eax, ebx, ecx, edx);
|
eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
case 3: /* L3 cache info */
|
case 3: /* L3 cache info */
|
||||||
if (cpu->enable_l3_cache) {
|
if (cpu->enable_l3_cache) {
|
||||||
addressable_threads_width = apicid_die_offset(&topo_info);
|
|
||||||
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
|
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
|
||||||
(1 << addressable_threads_width),
|
&topo_info,
|
||||||
(1 << addressable_cores_width),
|
|
||||||
eax, ebx, ecx, edx);
|
eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -6478,7 +6487,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
case 5:
|
case 5:
|
||||||
/* MONITOR/MWAIT Leaf */
|
/* MONITOR/MWAIT Leaf */
|
||||||
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
|
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
|
||||||
|
|
|
@ -2011,6 +2011,11 @@ struct ArchCPU {
|
||||||
*/
|
*/
|
||||||
bool enable_l3_cache;
|
bool enable_l3_cache;
|
||||||
|
|
||||||
|
/* Compatibility bits for old machine types.
|
||||||
|
* If true present L1 cache as per-thread, not per-core.
|
||||||
|
*/
|
||||||
|
bool l1_cache_per_core;
|
||||||
|
|
||||||
/* Compatibility bits for old machine types.
|
/* Compatibility bits for old machine types.
|
||||||
* If true present the old cache topology information
|
* If true present the old cache topology information
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue