[x64] Further simplification / fix buffer overrun in code cache.

- [x64] Further simplify padding of code / unwind reservation in code cache.
- [x64] Fix accidental buffer overrun caused by previous simplification.
This commit is contained in:
gibbed 2019-08-26 12:19:00 -05:00 committed by Rick Gibbed
parent c4ff8d7c58
commit e9802a9f3b
1 changed files with 7 additions and 5 deletions

View File

@ -138,7 +138,6 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code,
// unwind table requires entries AND code to be sorted in order. // unwind table requires entries AND code to be sorted in order.
size_t low_mark; size_t low_mark;
size_t high_mark; size_t high_mark;
size_t code_offset;
uint8_t* code_address; uint8_t* code_address;
UnwindReservation unwind_reservation; UnwindReservation unwind_reservation;
{ {
@ -148,10 +147,11 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code,
// Reserve code. // Reserve code.
// Always move the code to land on 16b alignment. // Always move the code to land on 16b alignment.
code_offset = generated_code_offset_; code_address = generated_code_base_ + generated_code_offset_;
code_address = generated_code_base_ + code_offset;
generated_code_offset_ += xe::round_up(func_info.code_size.total, 16); generated_code_offset_ += xe::round_up(func_info.code_size.total, 16);
auto tail_address = generated_code_base_ + generated_code_offset_;
// Reserve unwind info. // Reserve unwind info.
// We go on the high size of the unwind info as we don't know how big we // We go on the high size of the unwind info as we don't know how big we
// need it, and a few extra bytes of padding isn't the worst thing. // need it, and a few extra bytes of padding isn't the worst thing.
@ -159,6 +159,8 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code,
RequestUnwindReservation(generated_code_base_ + generated_code_offset_); RequestUnwindReservation(generated_code_base_ + generated_code_offset_);
generated_code_offset_ += xe::round_up(unwind_reservation.data_size, 16); generated_code_offset_ += xe::round_up(unwind_reservation.data_size, 16);
auto end_address = generated_code_base_ + generated_code_offset_;
high_mark = generated_code_offset_; high_mark = generated_code_offset_;
// Store in map. It is maintained in sorted order of host PC dependent on // Store in map. It is maintained in sorted order of host PC dependent on
@ -191,8 +193,8 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code,
std::memcpy(code_address, machine_code, func_info.code_size.total); std::memcpy(code_address, machine_code, func_info.code_size.total);
// Fill unused slots with 0xCC // Fill unused slots with 0xCC
std::memset(code_address + func_info.code_size.total, 0xCC, std::memset(tail_address, 0xCC,
generated_code_offset_ - code_offset); static_cast<size_t>(end_address - tail_address));
// Notify subclasses of placed code. // Notify subclasses of placed code.
PlaceCode(guest_address, machine_code, func_info, code_address, PlaceCode(guest_address, machine_code, func_info, code_address,