linux-user: Fix formatting of mmap.c

Fix all checkpatch.pl errors within mmap.c.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230707204054.8792-5-richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-07-07 21:40:32 +01:00
parent d28b3c90cf
commit 2b730f797e
1 changed files with 122 additions and 77 deletions

View File

@ -56,10 +56,11 @@ void mmap_fork_start(void)
void mmap_fork_end(int child) void mmap_fork_end(int child)
{ {
if (child) if (child) {
pthread_mutex_init(&mmap_mutex, NULL); pthread_mutex_init(&mmap_mutex, NULL);
else } else {
pthread_mutex_unlock(&mmap_mutex); pthread_mutex_unlock(&mmap_mutex);
}
} }
/* /*
@ -203,40 +204,47 @@ static int mmap_frag(abi_ulong real_start,
/* get the protection of the target pages outside the mapping */ /* get the protection of the target pages outside the mapping */
prot1 = 0; prot1 = 0;
for(addr = real_start; addr < real_end; addr++) { for (addr = real_start; addr < real_end; addr++) {
if (addr < start || addr >= end) if (addr < start || addr >= end) {
prot1 |= page_get_flags(addr); prot1 |= page_get_flags(addr);
}
} }
if (prot1 == 0) { if (prot1 == 0) {
/* no page was there, so we allocate one */ /* no page was there, so we allocate one */
void *p = mmap(host_start, qemu_host_page_size, prot, void *p = mmap(host_start, qemu_host_page_size, prot,
flags | MAP_ANONYMOUS, -1, 0); flags | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) if (p == MAP_FAILED) {
return -1; return -1;
}
prot1 = prot; prot1 = prot;
} }
prot1 &= PAGE_BITS; prot1 &= PAGE_BITS;
prot_new = prot | prot1; prot_new = prot | prot1;
if (!(flags & MAP_ANONYMOUS)) { if (!(flags & MAP_ANONYMOUS)) {
/* msync() won't work here, so we return an error if write is /*
possible while it is a shared mapping */ * msync() won't work here, so we return an error if write is
if ((flags & MAP_TYPE) == MAP_SHARED && * possible while it is a shared mapping.
(prot & PROT_WRITE)) */
if ((flags & MAP_TYPE) == MAP_SHARED && (prot & PROT_WRITE)) {
return -1; return -1;
}
/* adjust protection to be able to read */ /* adjust protection to be able to read */
if (!(prot1 & PROT_WRITE)) if (!(prot1 & PROT_WRITE)) {
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
}
/* read the corresponding file data */ /* read the corresponding file data */
if (pread(fd, g2h_untagged(start), end - start, offset) == -1) if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
return -1; return -1;
}
/* put final protection */ /* put final protection */
if (prot_new != (prot1 | PROT_WRITE)) if (prot_new != (prot1 | PROT_WRITE)) {
mprotect(host_start, qemu_host_page_size, prot_new); mprotect(host_start, qemu_host_page_size, prot_new);
}
} else { } else {
if (prot_new != prot1) { if (prot_new != prot1) {
mprotect(host_start, qemu_host_page_size, prot_new); mprotect(host_start, qemu_host_page_size, prot_new);
@ -265,8 +273,10 @@ abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
unsigned long last_brk; unsigned long last_brk;
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk /*
of guest address space. */ * Subroutine of mmap_find_vma, used when we have pre-allocated
* a chunk of guest address space.
*/
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align) abi_ulong align)
{ {
@ -362,15 +372,17 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
* - shmat() with SHM_REMAP flag * - shmat() with SHM_REMAP flag
*/ */
ptr = mmap(g2h_untagged(addr), size, PROT_NONE, ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */ /* ENOMEM, if host address space has no memory */
if (ptr == MAP_FAILED) { if (ptr == MAP_FAILED) {
return (abi_ulong)-1; return (abi_ulong)-1;
} }
/* Count the number of sequential returns of the same address. /*
This is used to modify the search algorithm below. */ * Count the number of sequential returns of the same address.
* This is used to modify the search algorithm below.
*/
repeat = (ptr == prev ? repeat + 1 : 0); repeat = (ptr == prev ? repeat + 1 : 0);
if (h2g_valid(ptr + size - 1)) { if (h2g_valid(ptr + size - 1)) {
@ -387,14 +399,18 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
/* The address is not properly aligned for the target. */ /* The address is not properly aligned for the target. */
switch (repeat) { switch (repeat) {
case 0: case 0:
/* Assume the result that the kernel gave us is the /*
first with enough free space, so start again at the * Assume the result that the kernel gave us is the
next higher target page. */ * first with enough free space, so start again at the
* next higher target page.
*/
addr = ROUND_UP(addr, align); addr = ROUND_UP(addr, align);
break; break;
case 1: case 1:
/* Sometimes the kernel decides to perform the allocation /*
at the top end of memory instead. */ * Sometimes the kernel decides to perform the allocation
* at the top end of memory instead.
*/
addr &= -align; addr &= -align;
break; break;
case 2: case 2:
@ -407,8 +423,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
break; break;
} }
} else { } else {
/* Since the result the kernel gave didn't fit, start /*
again at low memory. If any repetition, fail. */ * Since the result the kernel gave didn't fit, start
* again at low memory. If any repetition, fail.
*/
addr = (repeat ? -1 : 0); addr = (repeat ? -1 : 0);
} }
@ -423,8 +441,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
return (abi_ulong)-1; return (abi_ulong)-1;
} }
wrapped = 1; wrapped = 1;
/* Don't actually use 0 when wrapping, instead indicate /*
that we'd truly like an allocation in low memory. */ * Don't actually use 0 when wrapping, instead indicate
* that we'd truly like an allocation in low memory.
*/
addr = (mmap_min_addr > TARGET_PAGE_SIZE addr = (mmap_min_addr > TARGET_PAGE_SIZE
? TARGET_PAGE_ALIGN(mmap_min_addr) ? TARGET_PAGE_ALIGN(mmap_min_addr)
: TARGET_PAGE_SIZE); : TARGET_PAGE_SIZE);
@ -485,8 +505,10 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
real_start = start & qemu_host_page_mask; real_start = start & qemu_host_page_mask;
host_offset = offset & qemu_host_page_mask; host_offset = offset & qemu_host_page_mask;
/* If the user is asking for the kernel to find a location, do that /*
before we truncate the length for mapping files below. */ * If the user is asking for the kernel to find a location, do that
* before we truncate the length for mapping files below.
*/
if (!(flags & MAP_FIXED)) { if (!(flags & MAP_FIXED)) {
host_len = len + offset - host_offset; host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len); host_len = HOST_PAGE_ALIGN(host_len);
@ -497,32 +519,36 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
} }
} }
/* When mapping files into a memory area larger than the file, accesses /*
to pages beyond the file size will cause a SIGBUS. * When mapping files into a memory area larger than the file, accesses
* to pages beyond the file size will cause a SIGBUS.
For example, if mmaping a file of 100 bytes on a host with 4K pages *
emulating a target with 8K pages, the target expects to be able to * For example, if mmaping a file of 100 bytes on a host with 4K pages
access the first 8K. But the host will trap us on any access beyond * emulating a target with 8K pages, the target expects to be able to
4K. * access the first 8K. But the host will trap us on any access beyond
* 4K.
When emulating a target with a larger page-size than the hosts, we *
may need to truncate file maps at EOF and add extra anonymous pages * When emulating a target with a larger page-size than the hosts, we
up to the targets page boundary. */ * may need to truncate file maps at EOF and add extra anonymous pages
* up to the targets page boundary.
*/
if ((qemu_real_host_page_size() < qemu_host_page_size) && if ((qemu_real_host_page_size() < qemu_host_page_size) &&
!(flags & MAP_ANONYMOUS)) { !(flags & MAP_ANONYMOUS)) {
struct stat sb; struct stat sb;
if (fstat (fd, &sb) == -1) if (fstat(fd, &sb) == -1) {
goto fail; goto fail;
}
/* Are we trying to create a map beyond EOF?. */ /* Are we trying to create a map beyond EOF?. */
if (offset + len > sb.st_size) { if (offset + len > sb.st_size) {
/* If so, truncate the file map at eof aligned with /*
the hosts real pagesize. Additional anonymous maps * If so, truncate the file map at eof aligned with
will be created beyond EOF. */ * the hosts real pagesize. Additional anonymous maps
len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); * will be created beyond EOF.
} */
len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
}
} }
if (!(flags & MAP_FIXED)) { if (!(flags & MAP_FIXED)) {
@ -532,9 +558,11 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
host_len = len + offset - host_offset; host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len); host_len = HOST_PAGE_ALIGN(host_len);
/* Note: we prefer to control the mapping address. It is /*
especially important if qemu_host_page_size > * Note: we prefer to control the mapping address. It is
qemu_real_host_page_size */ * especially important if qemu_host_page_size >
* qemu_real_host_page_size.
*/
p = mmap(g2h_untagged(start), host_len, host_prot, p = mmap(g2h_untagged(start), host_len, host_prot,
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0); flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) { if (p == MAP_FAILED) {
@ -572,45 +600,52 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail; goto fail;
} }
/* worst case: we cannot map the file because the offset is not /*
aligned, so we read it */ * worst case: we cannot map the file because the offset is not
* aligned, so we read it
*/
if (!(flags & MAP_ANONYMOUS) && if (!(flags & MAP_ANONYMOUS) &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
/* msync() won't work here, so we return an error if write is /*
possible while it is a shared mapping */ * msync() won't work here, so we return an error if write is
if ((flags & MAP_TYPE) == MAP_SHARED && * possible while it is a shared mapping
(host_prot & PROT_WRITE)) { */
if ((flags & MAP_TYPE) == MAP_SHARED && (host_prot & PROT_WRITE)) {
errno = EINVAL; errno = EINVAL;
goto fail; goto fail;
} }
retaddr = target_mmap(start, len, target_prot | PROT_WRITE, retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0); -1, 0);
if (retaddr == -1) if (retaddr == -1) {
goto fail; goto fail;
if (pread(fd, g2h_untagged(start), len, offset) == -1) }
if (pread(fd, g2h_untagged(start), len, offset) == -1) {
goto fail; goto fail;
}
if (!(host_prot & PROT_WRITE)) { if (!(host_prot & PROT_WRITE)) {
ret = target_mprotect(start, len, target_prot); ret = target_mprotect(start, len, target_prot);
assert(ret == 0); assert(ret == 0);
} }
goto the_end; goto the_end;
} }
/* handle the start of the mapping */ /* handle the start of the mapping */
if (start > real_start) { if (start > real_start) {
if (real_end == real_start + qemu_host_page_size) { if (real_end == real_start + qemu_host_page_size) {
/* one single host page */ /* one single host page */
ret = mmap_frag(real_start, start, end, ret = mmap_frag(real_start, start, end,
host_prot, flags, fd, offset); host_prot, flags, fd, offset);
if (ret == -1) if (ret == -1) {
goto fail; goto fail;
}
goto the_end1; goto the_end1;
} }
ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
host_prot, flags, fd, offset); host_prot, flags, fd, offset);
if (ret == -1) if (ret == -1) {
goto fail; goto fail;
}
real_start += qemu_host_page_size; real_start += qemu_host_page_size;
} }
/* handle the end of the mapping */ /* handle the end of the mapping */
@ -619,8 +654,9 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
real_end - qemu_host_page_size, end, real_end - qemu_host_page_size, end,
host_prot, flags, fd, host_prot, flags, fd,
offset + real_end - qemu_host_page_size - start); offset + real_end - qemu_host_page_size - start);
if (ret == -1) if (ret == -1) {
goto fail; goto fail;
}
real_end -= qemu_host_page_size; real_end -= qemu_host_page_size;
} }
@ -628,14 +664,16 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (real_start < real_end) { if (real_start < real_end) {
void *p; void *p;
unsigned long offset1; unsigned long offset1;
if (flags & MAP_ANONYMOUS) if (flags & MAP_ANONYMOUS) {
offset1 = 0; offset1 = 0;
else } else {
offset1 = offset + real_start - start; offset1 = offset + real_start - start;
}
p = mmap(g2h_untagged(real_start), real_end - real_start, p = mmap(g2h_untagged(real_start), real_end - real_start,
host_prot, flags, fd, offset1); host_prot, flags, fd, offset1);
if (p == MAP_FAILED) if (p == MAP_FAILED) {
goto fail; goto fail;
}
passthrough_start = real_start; passthrough_start = real_start;
passthrough_end = real_end; passthrough_end = real_end;
} }
@ -697,16 +735,18 @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
} }
end = real_end; end = real_end;
} }
if (prot != 0) if (prot != 0) {
real_start += qemu_host_page_size; real_start += qemu_host_page_size;
}
} }
if (end < real_end) { if (end < real_end) {
prot = 0; prot = 0;
for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr); prot |= page_get_flags(addr);
} }
if (prot != 0) if (prot != 0) {
real_end -= qemu_host_page_size; real_end -= qemu_host_page_size;
}
} }
if (real_start != real_end) { if (real_start != real_end) {
mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
@ -722,8 +762,9 @@ int target_munmap(abi_ulong start, abi_ulong len)
trace_target_munmap(start, len); trace_target_munmap(start, len);
if (start & ~TARGET_PAGE_MASK) if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL; return -TARGET_EINVAL;
}
len = TARGET_PAGE_ALIGN(len); len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) { if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL; return -TARGET_EINVAL;
@ -737,25 +778,27 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (start > real_start) { if (start > real_start) {
/* handle host page containing start */ /* handle host page containing start */
prot = 0; prot = 0;
for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr); prot |= page_get_flags(addr);
} }
if (real_end == real_start + qemu_host_page_size) { if (real_end == real_start + qemu_host_page_size) {
for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr); prot |= page_get_flags(addr);
} }
end = real_end; end = real_end;
} }
if (prot != 0) if (prot != 0) {
real_start += qemu_host_page_size; real_start += qemu_host_page_size;
}
} }
if (end < real_end) { if (end < real_end) {
prot = 0; prot = 0;
for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr); prot |= page_get_flags(addr);
} }
if (prot != 0) if (prot != 0) {
real_end -= qemu_host_page_size; real_end -= qemu_host_page_size;
}
} }
ret = 0; ret = 0;
@ -798,8 +841,10 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags, g2h_untagged(new_addr)); flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) { if (reserved_va && host_addr != MAP_FAILED) {
/* If new and old addresses overlap then the above mremap will /*
already have failed with EINVAL. */ * If new and old addresses overlap then the above mremap will
* already have failed with EINVAL.
*/
mmap_reserve(old_addr, old_size); mmap_reserve(old_addr, old_size);
} }
} else if (flags & MREMAP_MAYMOVE) { } else if (flags & MREMAP_MAYMOVE) {