mirror of https://github.com/xemu-project/xemu.git
linux-user: Do not adjust zero_bss for host page size
Rely on target_mmap to handle guest vs host page size mismatch. Tested-by: Helge Deller <deller@gmx.de> Reviewed-by: Helge Deller <deller@gmx.de> Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
e3d97d5c5d
commit
2d385be615
|
@ -2211,47 +2211,37 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map and zero the bss. We need to explicitly zero any fractional pages
|
/**
|
||||||
after the data section (i.e. bss). */
|
* zero_bss:
|
||||||
static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
|
*
|
||||||
|
* Map and zero the bss. We need to explicitly zero any fractional pages
|
||||||
|
* after the data section (i.e. bss). Return false on mapping failure.
|
||||||
|
*/
|
||||||
|
static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, int prot)
|
||||||
{
|
{
|
||||||
uintptr_t host_start, host_map_start, host_end;
|
abi_ulong align_bss;
|
||||||
|
|
||||||
last_bss = TARGET_PAGE_ALIGN(last_bss);
|
align_bss = TARGET_PAGE_ALIGN(start_bss);
|
||||||
|
end_bss = TARGET_PAGE_ALIGN(end_bss);
|
||||||
|
|
||||||
/* ??? There is confusion between qemu_real_host_page_size and
|
if (start_bss < align_bss) {
|
||||||
qemu_host_page_size here and elsewhere in target_mmap, which
|
int flags = page_get_flags(start_bss);
|
||||||
may lead to the end of the data section mapping from the file
|
|
||||||
not being mapped. At least there was an explicit test and
|
|
||||||
comment for that here, suggesting that "the file size must
|
|
||||||
be known". The comment probably pre-dates the introduction
|
|
||||||
of the fstat system call in target_mmap which does in fact
|
|
||||||
find out the size. What isn't clear is if the workaround
|
|
||||||
here is still actually needed. For now, continue with it,
|
|
||||||
but merge it with the "normal" mmap that would allocate the bss. */
|
|
||||||
|
|
||||||
host_start = (uintptr_t) g2h_untagged(elf_bss);
|
if (!(flags & PAGE_VALID)) {
|
||||||
host_end = (uintptr_t) g2h_untagged(last_bss);
|
/* Map the start of the bss. */
|
||||||
host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
|
align_bss -= TARGET_PAGE_SIZE;
|
||||||
|
} else if (flags & PAGE_WRITE) {
|
||||||
if (host_map_start < host_end) {
|
/* The page is already mapped writable. */
|
||||||
void *p = mmap((void *)host_map_start, host_end - host_map_start,
|
memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
|
||||||
prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
} else {
|
||||||
if (p == MAP_FAILED) {
|
/* Read-only zeros? */
|
||||||
perror("cannot mmap brk");
|
g_assert_not_reached();
|
||||||
exit(-1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure that the bss page(s) are valid */
|
return align_bss >= end_bss ||
|
||||||
if ((page_get_flags(last_bss-1) & prot) != prot) {
|
target_mmap(align_bss, end_bss - align_bss, prot,
|
||||||
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
|
MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) != -1;
|
||||||
prot | PAGE_VALID);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (host_start < host_map_start) {
|
|
||||||
memset((void *)host_start, 0, host_map_start - host_start);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(TARGET_ARM)
|
#if defined(TARGET_ARM)
|
||||||
|
@ -3255,8 +3245,9 @@ static void load_elf_image(const char *image_name, int image_fd,
|
||||||
/*
|
/*
|
||||||
* If the load segment requests extra zeros (e.g. bss), map it.
|
* If the load segment requests extra zeros (e.g. bss), map it.
|
||||||
*/
|
*/
|
||||||
if (eppnt->p_filesz < eppnt->p_memsz) {
|
if (eppnt->p_filesz < eppnt->p_memsz &&
|
||||||
zero_bss(vaddr_ef, vaddr_em, elf_prot);
|
!zero_bss(vaddr_ef, vaddr_em, elf_prot)) {
|
||||||
|
goto exit_mmap;
|
||||||
}
|
}
|
||||||
} else if (eppnt->p_memsz != 0) {
|
} else if (eppnt->p_memsz != 0) {
|
||||||
vaddr_len = eppnt->p_memsz + vaddr_po;
|
vaddr_len = eppnt->p_memsz + vaddr_po;
|
||||||
|
|
Loading…
Reference in New Issue