Fixing stack allocation alignment.
This commit is contained in:
parent
c69ee78c27
commit
37804d692c
|
@ -44,29 +44,30 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
|
||||||
}
|
}
|
||||||
backend_data_ = processor->backend()->AllocThreadData();
|
backend_data_ = processor->backend()->AllocThreadData();
|
||||||
|
|
||||||
uint32_t stack_position;
|
|
||||||
if (!stack_address) {
|
if (!stack_address) {
|
||||||
stack_size = (stack_size + 0xFFF) & 0xFFFFF000;
|
stack_size = (stack_size + 0xFFF) & 0xFFFFF000;
|
||||||
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
|
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
|
||||||
uint32_t stack_padding = stack_alignment * 1;
|
uint32_t stack_padding = stack_alignment * 1;
|
||||||
uint32_t actual_stack_size = stack_padding + stack_size;
|
uint32_t actual_stack_size = stack_padding + stack_size;
|
||||||
memory()
|
memory()
|
||||||
->LookupHeapByType(false, 0x10000)
|
->LookupHeapByType(false, stack_alignment)
|
||||||
->Alloc(actual_stack_size, stack_alignment,
|
->Alloc(actual_stack_size, stack_alignment,
|
||||||
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
||||||
kMemoryProtectRead | kMemoryProtectWrite, true,
|
kMemoryProtectRead | kMemoryProtectWrite, true,
|
||||||
&stack_address_);
|
&stack_address_);
|
||||||
assert_true(!(stack_address_ & 0xFFF)); // just to be safe
|
assert_true(!(stack_address_ & 0xFFF)); // just to be safe
|
||||||
stack_position = stack_address_ + actual_stack_size;
|
|
||||||
stack_allocated_ = true;
|
stack_allocated_ = true;
|
||||||
memset(memory()->TranslateVirtual(stack_address_), 0xBE, actual_stack_size);
|
stack_base_ = stack_address_ + actual_stack_size;
|
||||||
|
stack_limit_ = stack_address_ + stack_padding;
|
||||||
|
memory()->Fill(stack_address_, actual_stack_size, 0xBE);
|
||||||
memory()
|
memory()
|
||||||
->LookupHeap(stack_address_)
|
->LookupHeap(stack_address_)
|
||||||
->Protect(stack_address_, stack_padding, kMemoryProtectNoAccess);
|
->Protect(stack_address_, stack_padding, kMemoryProtectNoAccess);
|
||||||
} else {
|
} else {
|
||||||
stack_address_ = stack_address;
|
stack_address_ = stack_address;
|
||||||
stack_position = stack_address_ + stack_size;
|
|
||||||
stack_allocated_ = false;
|
stack_allocated_ = false;
|
||||||
|
stack_base_ = stack_address_ + stack_size;
|
||||||
|
stack_limit_ = stack_address_;
|
||||||
}
|
}
|
||||||
assert_not_zero(stack_address_);
|
assert_not_zero(stack_address_);
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
|
||||||
context_->thread_id = thread_id_;
|
context_->thread_id = thread_id_;
|
||||||
|
|
||||||
// Set initial registers.
|
// Set initial registers.
|
||||||
context_->r[1] = stack_position;
|
context_->r[1] = stack_base_;
|
||||||
context_->r[13] = pcr_address_;
|
context_->r[13] = pcr_address_;
|
||||||
|
|
||||||
// Pad out stack a bit, as some games seem to overwrite the caller by about
|
// Pad out stack a bit, as some games seem to overwrite the caller by about
|
||||||
|
|
|
@ -33,6 +33,8 @@ class ThreadState {
|
||||||
void* backend_data() const { return backend_data_; }
|
void* backend_data() const { return backend_data_; }
|
||||||
uint32_t stack_address() const { return stack_address_; }
|
uint32_t stack_address() const { return stack_address_; }
|
||||||
uint32_t stack_size() const { return stack_size_; }
|
uint32_t stack_size() const { return stack_size_; }
|
||||||
|
uint32_t stack_base() const { return stack_base_; }
|
||||||
|
uint32_t stack_limit() const { return stack_limit_; }
|
||||||
uint32_t pcr_address() const { return pcr_address_; }
|
uint32_t pcr_address() const { return pcr_address_; }
|
||||||
xe::cpu::frontend::PPCContext* context() const { return context_; }
|
xe::cpu::frontend::PPCContext* context() const { return context_; }
|
||||||
|
|
||||||
|
@ -53,6 +55,8 @@ class ThreadState {
|
||||||
uint32_t stack_address_;
|
uint32_t stack_address_;
|
||||||
bool stack_allocated_;
|
bool stack_allocated_;
|
||||||
uint32_t stack_size_;
|
uint32_t stack_size_;
|
||||||
|
uint32_t stack_base_;
|
||||||
|
uint32_t stack_limit_;
|
||||||
uint32_t pcr_address_;
|
uint32_t pcr_address_;
|
||||||
|
|
||||||
// NOTE: must be 64b aligned for SSE ops.
|
// NOTE: must be 64b aligned for SSE ops.
|
||||||
|
|
|
@ -174,8 +174,8 @@ X_STATUS XThread::Create() {
|
||||||
thread_state_ = new ThreadState(kernel_state()->processor(), thread_id_, 0,
|
thread_state_ = new ThreadState(kernel_state()->processor(), thread_id_, 0,
|
||||||
creation_params_.stack_size, pcr_address_);
|
creation_params_.stack_size, pcr_address_);
|
||||||
XELOGI("XThread%04X (%X) Stack: %.8X-%.8X", handle(),
|
XELOGI("XThread%04X (%X) Stack: %.8X-%.8X", handle(),
|
||||||
thread_state_->thread_id(), thread_state_->stack_address(),
|
thread_state_->thread_id(), thread_state_->stack_limit(),
|
||||||
thread_state_->stack_address() + thread_state_->stack_size());
|
thread_state_->stack_base());
|
||||||
|
|
||||||
uint8_t* pcr = memory()->TranslateVirtual(pcr_address_);
|
uint8_t* pcr = memory()->TranslateVirtual(pcr_address_);
|
||||||
xe::store_and_swap<uint32_t>(pcr + 0x000, tls_address_);
|
xe::store_and_swap<uint32_t>(pcr + 0x000, tls_address_);
|
||||||
|
|
|
@ -522,7 +522,7 @@ void BaseHeap::DumpMap() {
|
||||||
heap_base_ + (i + page.region_page_count) * page_size_,
|
heap_base_ + (i + page.region_page_count) * page_size_,
|
||||||
page.region_page_count, page.region_page_count * page_size_,
|
page.region_page_count, page.region_page_count * page_size_,
|
||||||
state_name, access_r, access_w);
|
state_name, access_r, access_w);
|
||||||
i += page.region_page_count;
|
i += page.region_page_count - 1;
|
||||||
}
|
}
|
||||||
if (is_empty_span) {
|
if (is_empty_span) {
|
||||||
XELOGE(" %.8X-%.8X - %d unreserved pages)",
|
XELOGE(" %.8X-%.8X - %d unreserved pages)",
|
||||||
|
|
Loading…
Reference in New Issue