mirror of https://github.com/xemu-project/xemu.git
hvf: complete 1G page support
Hvf on x86 only supported 2MiB large pages, but never bothered to strip out the 1GiB page size capability from -cpu host. With QEMU 8.0.0 this became a problem because OVMF started to use 1GiB pages by default. Let's just unconditionally add 1GiB page walk support to the walker. With this fix applied, I can successfully run OVMF again. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1603 Signed-off-by: Alexander Graf <agraf@csgraf.de> Reported-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> Reported-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Phil Dennis-Jordan <phil@philjordan.eu> Link: https://lore.kernel.org/r/20230420225258.58009-1-agraf@csgraf.de Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0266aef8cd
commit
654076bc20
|
@ -38,6 +38,7 @@
|
|||
#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
|
||||
#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
|
||||
#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
|
||||
#define PAE_PTE_SUPER_PAGE_MASK ((-1llu << (30)) & ((1llu << 52) - 1))
|
||||
|
||||
struct gpt_translation {
|
||||
target_ulong gva;
|
||||
|
@ -96,7 +97,7 @@ static bool get_pt_entry(CPUState *cpu, struct gpt_translation *pt,
|
|||
|
||||
/* test page table entry */
|
||||
static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt,
|
||||
int level, bool *is_large, bool pae)
|
||||
int level, int *largeness, bool pae)
|
||||
{
|
||||
uint64_t pte = pt->pte[level];
|
||||
|
||||
|
@ -118,9 +119,9 @@ static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (1 == level && pte_large_page(pte)) {
|
||||
if (level && pte_large_page(pte)) {
|
||||
pt->err_code |= MMU_PAGE_PT;
|
||||
*is_large = true;
|
||||
*largeness = level;
|
||||
}
|
||||
if (!level) {
|
||||
pt->err_code |= MMU_PAGE_PT;
|
||||
|
@ -152,9 +153,18 @@ static inline uint64_t pse_pte_to_page(uint64_t pte)
|
|||
return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
|
||||
}
|
||||
|
||||
static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
|
||||
static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae,
|
||||
int largeness)
|
||||
{
|
||||
VM_PANIC_ON(!pte_large_page(pt->pte[1]))
|
||||
VM_PANIC_ON(!pte_large_page(pt->pte[largeness]))
|
||||
|
||||
/* 1Gib large page */
|
||||
if (pae && largeness == 2) {
|
||||
return (pt->pte[2] & PAE_PTE_SUPER_PAGE_MASK) | (pt->gva & 0x3fffffff);
|
||||
}
|
||||
|
||||
VM_PANIC_ON(largeness != 1)
|
||||
|
||||
/* 2Mb large page */
|
||||
if (pae) {
|
||||
return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
|
||||
|
@ -170,7 +180,7 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code,
|
|||
struct gpt_translation *pt, bool pae)
|
||||
{
|
||||
int top_level, level;
|
||||
bool is_large = false;
|
||||
int largeness = 0;
|
||||
target_ulong cr3 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR3);
|
||||
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
|
||||
|
||||
|
@ -186,19 +196,19 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code,
|
|||
for (level = top_level; level > 0; level--) {
|
||||
get_pt_entry(cpu, pt, level, pae);
|
||||
|
||||
if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
|
||||
if (!test_pt_entry(cpu, pt, level - 1, &largeness, pae)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_large) {
|
||||
if (largeness) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_large) {
|
||||
if (!largeness) {
|
||||
pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
|
||||
} else {
|
||||
pt->gpa = large_page_gpa(pt, pae);
|
||||
pt->gpa = large_page_gpa(pt, pae, largeness);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue