From b5ddd305725cecd31f3a9fdb5f9a3ef2fe80661d Mon Sep 17 00:00:00 2001 From: disjtqz Date: Tue, 10 Oct 2023 08:50:10 -0400 Subject: [PATCH] moved xsemaphore to xthread.d add typed guest pointer template add X_KSPINLOCK, rework spinlock functions. rework irql related code, use irql on pcr instead of on XThread add guest linked list helper functions renamed ProcessInfoBlock to X_KPROCESS assigned names to many kernel structure fields --- src/xenia/cpu/ppc/ppc_context.h | 19 ++ src/xenia/guest_pointers.h | 51 ++++ src/xenia/kernel/kernel_state.cc | 8 +- src/xenia/kernel/kernel_state.h | 2 +- src/xenia/kernel/util/kernel_fwd.h | 2 +- src/xenia/kernel/util/native_list.h | 127 ++++++++ src/xenia/kernel/xboxkrnl/xboxkrnl_misc.cc | 4 +- .../kernel/xboxkrnl/xboxkrnl_threading.cc | 207 ++++++------- .../kernel/xboxkrnl/xboxkrnl_threading.h | 10 + src/xenia/kernel/xsemaphore.h | 8 +- src/xenia/kernel/xthread.cc | 11 +- src/xenia/kernel/xthread.h | 278 ++++++++++++------ src/xenia/memory.h | 6 +- src/xenia/xbox.h | 4 + 14 files changed, 507 insertions(+), 230 deletions(-) create mode 100644 src/xenia/guest_pointers.h diff --git a/src/xenia/cpu/ppc/ppc_context.h b/src/xenia/cpu/ppc/ppc_context.h index 629b28392..37c14d10e 100644 --- a/src/xenia/cpu/ppc/ppc_context.h +++ b/src/xenia/cpu/ppc/ppc_context.h @@ -16,6 +16,7 @@ #include "xenia/base/mutex.h" #include "xenia/base/vec128.h" +#include "xenia/guest_pointers.h" namespace xe { namespace cpu { class Processor; @@ -449,6 +450,24 @@ typedef struct alignas(64) PPCContext_s { } + template + inline T* TranslateVirtual(TypedGuestPointer guest_address) { + return TranslateVirtual(guest_address.m_ptr); + } + template + inline uint32_t HostToGuestVirtual(T* host_ptr) XE_RESTRICT const { +#if XE_PLATFORM_WIN32 == 1 + uint32_t guest_tmp = static_cast( + reinterpret_cast(host_ptr) - virtual_membase); + if (guest_tmp >= static_cast(reinterpret_cast(this))) { + guest_tmp -= 0x1000; + } + return guest_tmp; +#else + return processor->memory()->HostToGuestVirtual( + reinterpret_cast(host_ptr)); +#endif + } static std::string GetRegisterName(PPCRegister reg); std::string GetStringFromValue(PPCRegister reg) const; void SetValueFromString(PPCRegister reg, std::string value); diff --git a/src/xenia/guest_pointers.h b/src/xenia/guest_pointers.h new file mode 100644 index 000000000..6794e11f5 --- /dev/null +++ b/src/xenia/guest_pointers.h @@ -0,0 +1,51 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2020 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_GUEST_POINTERS_H_ +#define XENIA_GUEST_POINTERS_H_ + +namespace xe { +template +struct ShiftedPointer { + using this_type = ShiftedPointer; + TBase* m_base; + inline TBase* operator->() { return m_base; } + + inline TBase& operator*() { return *m_base; } + inline this_type& operator=(TBase* base) { + m_base = base; + return *this; + } + + inline this_type& operator=(this_type other) { + m_base = other.m_base; + return *this; + } + + TAdj* GetAdjacent() { + return reinterpret_cast( + &reinterpret_cast(m_base)[-static_cast(offset)]); + } +}; + +template +struct TypedGuestPointer { + xe::be m_ptr; + inline TypedGuestPointer& operator=(uint32_t ptr) { + m_ptr = ptr; + return *this; + } + inline bool operator==(uint32_t ptr) const { return m_ptr == ptr; } + inline bool operator!=(uint32_t ptr) const { return m_ptr != ptr; } + // use value directly, no endian swap needed + inline bool operator!() const { return !m_ptr.value; } +}; +} // namespace xe + +#endif // XENIA_GUEST_POINTERS_H_ \ No newline at end of file diff --git a/src/xenia/kernel/kernel_state.cc b/src/xenia/kernel/kernel_state.cc index 9e6be4764..a0c9f6a2c 100644 --- a/src/xenia/kernel/kernel_state.cc +++ b/src/xenia/kernel/kernel_state.cc @@ -138,13 +138,13 @@ util::XdbfGameData KernelState::module_xdbf( uint32_t KernelState::process_type() const { auto pib = - memory_->TranslateVirtual(process_info_block_address_); + memory_->TranslateVirtual(process_info_block_address_); return pib->process_type; } void KernelState::set_process_type(uint32_t value) { auto pib = - memory_->TranslateVirtual(process_info_block_address_); + memory_->TranslateVirtual(process_info_block_address_); pib->process_type = uint8_t(value); } @@ -328,7 +328,7 @@ void KernelState::SetExecutableModule(object_ref module) { process_info_block_address_ = memory_->SystemHeapAlloc(0x60); auto pib = - memory_->TranslateVirtual(process_info_block_address_); + memory_->TranslateVirtual(process_info_block_address_); // TODO(benvanik): figure out what this list is. pib->unk_04 = pib->unk_08 = 0; pib->unk_0C = 0x0000007F; @@ -343,7 +343,7 @@ void KernelState::SetExecutableModule(object_ref module) { xex2_opt_tls_info* tls_header = nullptr; executable_module_->GetOptHeader(XEX_HEADER_TLS_INFO, &tls_header); if (tls_header) { - auto pib = memory_->TranslateVirtual( + auto pib = memory_->TranslateVirtual( process_info_block_address_); pib->tls_data_size = tls_header->data_size; pib->tls_raw_data_size = tls_header->raw_data_size; diff --git a/src/xenia/kernel/kernel_state.h b/src/xenia/kernel/kernel_state.h index 9c8e23766..200ed75a1 100644 --- a/src/xenia/kernel/kernel_state.h +++ b/src/xenia/kernel/kernel_state.h @@ -51,7 +51,7 @@ constexpr uint32_t X_PROCTYPE_IDLE = 0; constexpr uint32_t X_PROCTYPE_USER = 1; constexpr uint32_t X_PROCTYPE_SYSTEM = 2; -struct ProcessInfoBlock { +struct X_KPROCESS { xe::be unk_00; xe::be unk_04; // blink xe::be unk_08; // flink diff --git a/src/xenia/kernel/util/kernel_fwd.h b/src/xenia/kernel/util/kernel_fwd.h index 4cae57a82..4dff6c2ac 100644 --- a/src/xenia/kernel/util/kernel_fwd.h +++ b/src/xenia/kernel/util/kernel_fwd.h @@ -9,7 +9,7 @@ class XModule; class XNotifyListener; class XThread; class UserModule; -struct ProcessInfoBlock; +struct X_KPROCESS; struct TerminateNotification; struct X_TIME_STAMP_BUNDLE; class KernelState; diff --git a/src/xenia/kernel/util/native_list.h b/src/xenia/kernel/util/native_list.h index f16ad5c27..70b5f00e8 100644 --- a/src/xenia/kernel/util/native_list.h +++ b/src/xenia/kernel/util/native_list.h @@ -50,7 +50,134 @@ class NativeList { Memory* memory_ = nullptr; uint32_t head_; }; +template +static X_LIST_ENTRY* XeHostList(uint32_t ptr, VirtualTranslator context) { + return context->TranslateVirtual(ptr); +} +template +static uint32_t XeGuestList(X_LIST_ENTRY* ptr, VirtualTranslator context) { + return context->HostToGuestVirtual(ptr); +} +// can either pass an object that adheres to the +// HostToGuestVirtual/TranslateVirtual interface, or the original guest ptr for +// arg 2 +template +static void XeInitializeListHead(X_LIST_ENTRY* entry, + VirtualTranslator context) { + // is just a guest ptr? + if constexpr (std::is_unsigned_v) { + entry->blink_ptr = context; + entry->flink_ptr = context; + } else { + uint32_t orig_ptr = XeGuestList(entry, context); + entry->blink_ptr = orig_ptr; + entry->flink_ptr = orig_ptr; + } +} + +template +static bool XeIsListEmpty(X_LIST_ENTRY* entry, VirtualTranslator context) { + return XeHostList(entry->flink_ptr, context) == entry; +} +template +static void XeRemoveEntryList(X_LIST_ENTRY* entry, VirtualTranslator context) { + uint32_t front = entry->flink_ptr; + uint32_t back = entry->blink_ptr; + XeHostList(back, context)->flink_ptr = front; + XeHostList(front, context)->blink_ptr = back; +} +template +static void XeRemoveEntryList(uint32_t entry, VirtualTranslator context) { + XeRemoveEntryList(XeHostList(entry, context), context); +} +template +static uint32_t XeRemoveHeadList(X_LIST_ENTRY* entry, + VirtualTranslator context) { + uint32_t result = entry->flink_ptr; + XeRemoveEntryList(result, context); + return result; +} +template +static uint32_t XeRemoveTailList(X_LIST_ENTRY* entry, + VirtualTranslator context) { + uint32_t result = entry->blink_ptr; + XeRemoveEntryList(result, context); + return result; +} +template +static void XeInsertTailList(X_LIST_ENTRY* list_head, uint32_t list_head_guest, + X_LIST_ENTRY* host_entry, uint32_t entry, + VirtualTranslator context) { + uint32_t old_tail = list_head->blink_ptr; + host_entry->flink_ptr = list_head_guest; + host_entry->blink_ptr = old_tail; + XeHostList(old_tail, context)->flink_ptr = entry; + list_head->blink_ptr = entry; +} +template +static void XeInsertTailList(uint32_t list_head, uint32_t entry, + VirtualTranslator context) { + XeInsertTailList(XeHostList(list_head, context), list_head, + XeHostList(entry, context), entry, context); +} +template +static void XeInsertTailList(X_LIST_ENTRY* list_head, uint32_t entry, + VirtualTranslator context) { + XeInsertTailList(list_head, XeGuestList(list_head, context), + XeHostList(entry, context), entry, context); +} + +template +static void XeInsertTailList(X_LIST_ENTRY* list_head, X_LIST_ENTRY* entry, + VirtualTranslator context) { + XeInsertTailList(list_head, XeGuestList(list_head, context), entry, + XeGuestList(entry, context), context); +} + +template +static void XeInsertHeadList(X_LIST_ENTRY* list_head, uint32_t list_head_guest, + X_LIST_ENTRY* host_entry, uint32_t entry, + VirtualTranslator context) { + uint32_t old_list_head_flink = list_head->flink_ptr; + host_entry->flink_ptr = old_list_head_flink; + host_entry->blink_ptr = list_head_guest; + XeHostList(old_list_head_flink, context)->blink_ptr = entry; + list_head->flink_ptr = entry; +} + +template +static void XeInsertHeadList(uint32_t list_head, uint32_t entry, + VirtualTranslator context) { + XeInsertHeadList(XeHostList(list_head, context), list_head, + XeHostList(entry, context), entry, context); +} +template +static void XeInsertHeadList(X_LIST_ENTRY* list_head, uint32_t entry, + VirtualTranslator context) { + XeInsertHeadList(list_head, XeGuestList(list_head, context), + XeHostList(entry, context), entry, context); +} +template +struct X_TYPED_LIST : public X_LIST_ENTRY { + public: + X_LIST_ENTRY* ObjectListEntry(TObject* obj) { + return reinterpret_cast( + &reinterpret_cast(obj)[static_cast(EntryListOffset)]); + } + TObject* ListEntryObject(X_LIST_ENTRY* entry) { + return reinterpret_cast(&reinterpret_cast( + entry)[-static_cast(EntryListOffset)]); + } + template + void Initialize(VirtualTranslator* translator) { + XeInitializeListHead(this, translator); + } + template + void InsertHead(TObject* entry, VirtualTranslator* translator) { + XeInsertHeadList(this, ObjectListEntry(entry), translator); + } +}; } // namespace util } // namespace kernel } // namespace xe diff --git a/src/xenia/kernel/xboxkrnl/xboxkrnl_misc.cc b/src/xenia/kernel/xboxkrnl/xboxkrnl_misc.cc index 0df1744f8..ca345ccd2 100644 --- a/src/xenia/kernel/xboxkrnl/xboxkrnl_misc.cc +++ b/src/xenia/kernel/xboxkrnl/xboxkrnl_misc.cc @@ -26,8 +26,8 @@ void KeEnableFpuExceptions_entry( // has to be saved to kthread, the irql changes, the machine state register is // changed to enable exceptions - X_KTHREAD* kthread = ctx->TranslateVirtual( - ctx->TranslateVirtualGPR(ctx->r[13])->current_thread); + X_KTHREAD* kthread = ctx->TranslateVirtual( + ctx->TranslateVirtualGPR(ctx->r[13])->prcb_data.current_thread); kthread->fpu_exceptions_on = static_cast(ctx->r[3]) != 0; } DECLARE_XBOXKRNL_EXPORT1(KeEnableFpuExceptions, kNone, kStub); diff --git a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc index e826f2598..662b77ca7 100644 --- a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc +++ b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc @@ -234,7 +234,7 @@ dword_result_t NtSuspendThread_entry(dword_t handle, if (thread->type() == XObject::Type::Thread) { auto current_pcr = context->TranslateVirtualGPR(context->r[13]); - if (current_pcr->current_thread == thread->guest_object() || + if (current_pcr->prcb_data.current_thread == thread->guest_object() || !thread->guest_object()->terminated) { result = thread->Suspend(&suspend_count); } else { @@ -1041,88 +1041,68 @@ DECLARE_XBOXKRNL_EXPORT3(NtSignalAndWaitForSingleObjectEx, kThreading, static void PrefetchForCAS(const void* value) { swcache::PrefetchW(value); } -uint32_t xeKeKfAcquireSpinLock(uint32_t* lock, uint64_t r13 = 1) { - // XELOGD( - // "KfAcquireSpinLock({:08X})", - // lock_ptr); +uint32_t xeKeKfAcquireSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, + bool change_irql) +{ + auto old_irql = change_irql ? xeKfRaiseIrql(ctx, 2) : 0; + PrefetchForCAS(lock); - assert_true(*lock != static_cast(r13)); + assert_true(lock->prcb_of_owner != static_cast(ctx->r[13])); // Lock. - while (!xe::atomic_cas(0, xe::byte_swap(static_cast(r13)), lock)) { + while (!xe::atomic_cas(0, xe::byte_swap(static_cast(ctx->r[13])), + &lock->prcb_of_owner.value)) { // Spin! // TODO(benvanik): error on deadlock? xe::threading::MaybeYield(); } - // Raise IRQL to DISPATCH. - XThread* thread = XThread::GetCurrentThread(); - auto old_irql = thread->RaiseIrql(2); - return old_irql; } -dword_result_t KfAcquireSpinLock_entry(lpdword_t lock_ptr, - const ppc_context_t& ppc_context) { - auto lock = reinterpret_cast(lock_ptr.host_address()); - return xeKeKfAcquireSpinLock(lock, ppc_context->r[13]); +dword_result_t KfAcquireSpinLock_entry(pointer_t lock_ptr, + const ppc_context_t& context) { + return xeKeKfAcquireSpinLock(context, lock_ptr, true); } DECLARE_XBOXKRNL_EXPORT3(KfAcquireSpinLock, kThreading, kImplemented, kBlocking, kHighFrequency); -void xeKeKfReleaseSpinLock(uint32_t* lock, dword_t old_irql) { +void xeKeKfReleaseSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, dword_t old_irql, + bool change_irql) { + assert_true(lock->prcb_of_owner == static_cast(ctx->r[13])); // Unlock. - *lock = 0; - if (old_irql >= 2) { - return; + lock->prcb_of_owner.value = 0; + + if (change_irql) { + // Unlock. + if (old_irql >= 2) { + return; + } + + // Restore IRQL. + xeKfLowerIrql(ctx, old_irql); } - // Restore IRQL. - XThread* thread = XThread::GetCurrentThread(); - thread->LowerIrql(old_irql); } -void KfReleaseSpinLock_entry(lpdword_t lock_ptr, dword_t old_irql, +void KfReleaseSpinLock_entry(pointer_t lock_ptr, dword_t old_irql, const ppc_context_t& ppc_ctx) { - assert_true(*lock_ptr == static_cast(ppc_ctx->r[13])); - - *lock_ptr = 0; - if (old_irql >= 2) { - return; - } - // Restore IRQL. - XThread* thread = XThread::GetCurrentThread(); - thread->LowerIrql(old_irql); + xeKeKfReleaseSpinLock(ppc_ctx, lock_ptr, old_irql, true); } + DECLARE_XBOXKRNL_EXPORT2(KfReleaseSpinLock, kThreading, kImplemented, kHighFrequency); // todo: this is not accurate -void KeAcquireSpinLockAtRaisedIrql_entry(lpdword_t lock_ptr, +void KeAcquireSpinLockAtRaisedIrql_entry(pointer_t lock_ptr, const ppc_context_t& ppc_ctx) { - // Lock. - auto lock = reinterpret_cast(lock_ptr.host_address()); - // must not be our own thread - assert_true(*lock_ptr != static_cast(ppc_ctx->r[13])); - - PrefetchForCAS(lock); - while (!xe::atomic_cas( - 0, xe::byte_swap(static_cast(ppc_ctx->r[13])), lock)) { -#if XE_ARCH_AMD64 == 1 - // todo: this is just a nop if they don't have SMT, which is not great - // either... - - _mm_pause(); -#endif - // Spin! - // TODO(benvanik): error on deadlock? - } + xeKeKfAcquireSpinLock(ppc_ctx, lock_ptr, false); } DECLARE_XBOXKRNL_EXPORT3(KeAcquireSpinLockAtRaisedIrql, kThreading, kImplemented, kBlocking, kHighFrequency); dword_result_t KeTryToAcquireSpinLockAtRaisedIrql_entry( - lpdword_t lock_ptr, const ppc_context_t& ppc_ctx) { + pointer_t lock_ptr, const ppc_context_t& ppc_ctx) { // Lock. auto lock = reinterpret_cast(lock_ptr.host_address()); - assert_true(*lock_ptr != static_cast(ppc_ctx->r[13])); + assert_true(lock_ptr->prcb_of_owner != static_cast(ppc_ctx->r[13])); PrefetchForCAS(lock); if (!xe::atomic_cas(0, xe::byte_swap(static_cast(ppc_ctx->r[13])), lock)) { @@ -1133,12 +1113,11 @@ dword_result_t KeTryToAcquireSpinLockAtRaisedIrql_entry( DECLARE_XBOXKRNL_EXPORT4(KeTryToAcquireSpinLockAtRaisedIrql, kThreading, kImplemented, kBlocking, kHighFrequency, kSketchy); -void KeReleaseSpinLockFromRaisedIrql_entry(lpdword_t lock_ptr, +void KeReleaseSpinLockFromRaisedIrql_entry(pointer_t lock_ptr, const ppc_context_t& ppc_ctx) { - // Unlock. - assert_true(*lock_ptr == static_cast(ppc_ctx->r[13])); - *lock_ptr = 0; + xeKeKfReleaseSpinLock(ppc_ctx, lock_ptr, 0, false); } + DECLARE_XBOXKRNL_EXPORT2(KeReleaseSpinLockFromRaisedIrql, kThreading, kImplemented, kHighFrequency); @@ -1168,29 +1147,26 @@ dword_result_t KeRaiseIrqlToDpcLevel_entry(const ppc_context_t& ctx) { } DECLARE_XBOXKRNL_EXPORT2(KeRaiseIrqlToDpcLevel, kThreading, kImplemented, kHighFrequency); - -// irql is supposed to be per thread afaik... -void KfLowerIrql_entry(dword_t new_irql, const ppc_context_t& ctx) { - X_KPCR* kpcr = ctx.GetPCR(); +void xeKfLowerIrql(PPCContext* ctx, unsigned char new_irql) { + X_KPCR* kpcr = ctx->TranslateVirtualGPR(ctx->r[13]); if (new_irql > kpcr->current_irql) { XELOGE("KfLowerIrql : new_irql > kpcr->current_irql!"); } kpcr->current_irql = new_irql; if (new_irql < 2) { - // this actually calls a function that eventually calls checkapcs. // the called function does a ton of other stuff including changing the - // irql and interrupt_related + // irql and interrupt_related } } +// irql is supposed to be per thread afaik... +void KfLowerIrql_entry(dword_t new_irql, const ppc_context_t& ctx) { + xeKfLowerIrql(ctx, static_cast(new_irql)); +} DECLARE_XBOXKRNL_EXPORT2(KfLowerIrql, kThreading, kImplemented, kHighFrequency); -// used by aurora's nova plugin -// like the other irql related functions, writes to an unknown mmio range ( -// 0x7FFF ). The range is indexed by the low 16 bits of the KPCR's pointer (so -// r13) -dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) { - X_KPCR* v1 = ctx.GetPCR(); +unsigned char xeKfRaiseIrql(PPCContext* ctx, unsigned char new_irql) { + X_KPCR* v1 = ctx->TranslateVirtualGPR(ctx->r[13]); uint32_t old_irql = v1->current_irql; v1->current_irql = new_irql; @@ -1200,6 +1176,13 @@ dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) { } return old_irql; } +// used by aurora's nova plugin +// like the other irql related functions, writes to an unknown mmio range ( +// 0x7FFF ). The range is indexed by the low 16 bits of the KPCR's pointer (so +// r13) +dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) { + return xeKfRaiseIrql(ctx, new_irql); +} DECLARE_XBOXKRNL_EXPORT2(KfRaiseIrql, kThreading, kImplemented, kHighFrequency); @@ -1222,19 +1205,29 @@ void NtQueueApcThread_entry(dword_t thread_handle, lpvoid_t apc_routine, thread->EnqueueApc(apc_routine, apc_routine_context, arg1, arg2); } DECLARE_XBOXKRNL_EXPORT1(NtQueueApcThread, kThreading, kImplemented); - +void xeKeInitializeApc(XAPC* apc, uint32_t thread_ptr, uint32_t kernel_routine, + uint32_t rundown_routine, uint32_t normal_routine, + uint32_t apc_mode, uint32_t normal_context) { + apc->thread_ptr = thread_ptr; + apc->kernel_routine = kernel_routine; + apc->rundown_routine = rundown_routine; + apc->normal_routine = normal_routine; + apc->type = 18; + if (normal_routine) { + apc->apc_mode = apc_mode; + apc->normal_context = normal_context; + } else { + apc->apc_mode = 0; + apc->normal_context = 0; + } + apc->enqueued = 0; +} void KeInitializeApc_entry(pointer_t apc, lpvoid_t thread_ptr, lpvoid_t kernel_routine, lpvoid_t rundown_routine, lpvoid_t normal_routine, dword_t processor_mode, lpvoid_t normal_context) { - apc->Initialize(); - apc->processor_mode = processor_mode; - apc->thread_ptr = thread_ptr.guest_address(); - apc->kernel_routine = kernel_routine.guest_address(); - apc->rundown_routine = rundown_routine.guest_address(); - apc->normal_routine = normal_routine.guest_address(); - apc->normal_context = - normal_routine.guest_address() ? normal_context.guest_address() : 0; + xeKeInitializeApc(apc, thread_ptr, kernel_routine, rundown_routine, + normal_routine, processor_mode, normal_context); } DECLARE_XBOXKRNL_EXPORT1(KeInitializeApc, kThreading, kImplemented); @@ -1310,29 +1303,9 @@ dword_result_t KiApcNormalRoutineNop_entry(dword_t unk0 /* output? */, } DECLARE_XBOXKRNL_EXPORT1(KiApcNormalRoutineNop, kThreading, kStub); -typedef struct { - xe::be unknown; - xe::be flink; - xe::be blink; - xe::be routine; - xe::be context; - xe::be arg1; - xe::be arg2; -} XDPC; - void KeInitializeDpc_entry(pointer_t dpc, lpvoid_t routine, lpvoid_t context) { - // KDPC (maybe) 0x18 bytes? - uint32_t type = 19; // DpcObject - uint32_t importance = 0; - uint32_t number = 0; // ? - dpc->unknown = (type << 24) | (importance << 16) | (number); - dpc->flink = 0; - dpc->blink = 0; - dpc->routine = routine.guest_address(); - dpc->context = context.guest_address(); - dpc->arg1 = 0; - dpc->arg2 = 0; + dpc->Initialize(routine, context); } DECLARE_XBOXKRNL_EXPORT2(KeInitializeDpc, kThreading, kImplemented, kSketchy); @@ -1385,7 +1358,7 @@ struct X_ERWLOCK { be readers_entry_count; // 0xC X_KEVENT writer_event; // 0x10 X_KSEMAPHORE reader_semaphore; // 0x20 - uint32_t spin_lock; // 0x34 + X_KSPINLOCK spin_lock; // 0x34 }; static_assert_size(X_ERWLOCK, 0x38); @@ -1396,24 +1369,23 @@ void ExInitializeReadWriteLock_entry(pointer_t lock_ptr) { lock_ptr->readers_entry_count = 0; KeInitializeEvent_entry(&lock_ptr->writer_event, 1, 0); KeInitializeSemaphore_entry(&lock_ptr->reader_semaphore, 0, 0x7FFFFFFF); - lock_ptr->spin_lock = 0; + lock_ptr->spin_lock.prcb_of_owner = 0; } DECLARE_XBOXKRNL_EXPORT1(ExInitializeReadWriteLock, kThreading, kImplemented); void ExAcquireReadWriteLockExclusive_entry(pointer_t lock_ptr, const ppc_context_t& ppc_context) { - auto old_irql = - xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); + auto old_irql = xeKeKfAcquireSpinLock(ppc_context, &lock_ptr->spin_lock); int32_t lock_count = ++lock_ptr->lock_count; if (!lock_count) { - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); return; } lock_ptr->writers_waiting_count++; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); xeKeWaitForSingleObject(&lock_ptr->writer_event, 7, 0, 0, nullptr); } DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockExclusive, kThreading, @@ -1422,7 +1394,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockExclusive, kThreading, dword_result_t ExTryToAcquireReadWriteLockExclusive_entry( pointer_t lock_ptr, const ppc_context_t& ppc_context) { auto old_irql = - xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); + xeKeKfAcquireSpinLock(ppc_context, &lock_ptr->spin_lock); uint32_t result; if (lock_ptr->lock_count < 0) { @@ -1432,7 +1404,7 @@ dword_result_t ExTryToAcquireReadWriteLockExclusive_entry( result = 0; } - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); return result; } DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockExclusive, kThreading, @@ -1440,20 +1412,19 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockExclusive, kThreading, void ExAcquireReadWriteLockShared_entry(pointer_t lock_ptr, const ppc_context_t& ppc_context) { - auto old_irql = - xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); + auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock); int32_t lock_count = ++lock_ptr->lock_count; if (!lock_count || (lock_ptr->readers_entry_count && !lock_ptr->writers_waiting_count)) { lock_ptr->readers_entry_count++; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, & lock_ptr->spin_lock, old_irql); return; } lock_ptr->readers_waiting_count++; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); xeKeWaitForSingleObject(&lock_ptr->reader_semaphore, 7, 0, 0, nullptr); } DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockShared, kThreading, kImplemented, @@ -1461,8 +1432,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockShared, kThreading, kImplemented, dword_result_t ExTryToAcquireReadWriteLockShared_entry( pointer_t lock_ptr, const ppc_context_t& ppc_context) { - auto old_irql = - xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); + auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock); uint32_t result; if (lock_ptr->lock_count < 0 || @@ -1474,7 +1444,7 @@ dword_result_t ExTryToAcquireReadWriteLockShared_entry( result = 0; } - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); return result; } DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockShared, kThreading, @@ -1482,14 +1452,13 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockShared, kThreading, void ExReleaseReadWriteLock_entry(pointer_t lock_ptr, const ppc_context_t& ppc_context) { - auto old_irql = - xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); + auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock); int32_t lock_count = --lock_ptr->lock_count; if (lock_count < 0) { lock_ptr->readers_entry_count = 0; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); return; } @@ -1498,7 +1467,7 @@ void ExReleaseReadWriteLock_entry(pointer_t lock_ptr, if (readers_waiting_count) { lock_ptr->readers_waiting_count = 0; lock_ptr->readers_entry_count = readers_waiting_count; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); xeKeReleaseSemaphore(&lock_ptr->reader_semaphore, 1, readers_waiting_count, 0); return; @@ -1507,12 +1476,12 @@ void ExReleaseReadWriteLock_entry(pointer_t lock_ptr, auto readers_entry_count = --lock_ptr->readers_entry_count; if (readers_entry_count) { - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); return; } lock_ptr->writers_waiting_count--; - xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql); + xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql); xeKeSetEvent(&lock_ptr->writer_event, 1, 0); } DECLARE_XBOXKRNL_EXPORT1(ExReleaseReadWriteLock, kThreading, kImplemented); diff --git a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.h b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.h index b4aee7071..d8bce91df 100644 --- a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.h +++ b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.h @@ -50,6 +50,16 @@ uint32_t ExTerminateThread(uint32_t exit_code); uint32_t NtResumeThread(uint32_t handle, uint32_t* suspend_count_ptr); uint32_t NtClose(uint32_t handle); +void xeKeInitializeApc(XAPC* apc, uint32_t thread_ptr, uint32_t kernel_routine, + uint32_t rundown_routine, uint32_t normal_routine, + uint32_t apc_mode, uint32_t normal_context); + +void xeKfLowerIrql(PPCContext* ctx, unsigned char new_irql); +unsigned char xeKfRaiseIrql(PPCContext* ctx, unsigned char new_irql); + +void xeKeKfReleaseSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, dword_t old_irql, bool change_irql=true); +uint32_t xeKeKfAcquireSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, bool change_irql=true); + } // namespace xboxkrnl } // namespace kernel diff --git a/src/xenia/kernel/xsemaphore.h b/src/xenia/kernel/xsemaphore.h index 40261398b..db06f9ee2 100644 --- a/src/xenia/kernel/xsemaphore.h +++ b/src/xenia/kernel/xsemaphore.h @@ -13,16 +13,10 @@ #include "xenia/base/threading.h" #include "xenia/kernel/xobject.h" #include "xenia/xbox.h" - +#include "xenia/kernel/xthread.h" namespace xe { namespace kernel { -struct X_KSEMAPHORE { - X_DISPATCH_HEADER header; - xe::be limit; -}; -static_assert_size(X_KSEMAPHORE, 0x14); - class XSemaphore : public XObject { public: static const XObject::Type kObjectType = XObject::Type::Semaphore; diff --git a/src/xenia/kernel/xthread.cc b/src/xenia/kernel/xthread.cc index 5f6c2d2a0..67ad38f4d 100644 --- a/src/xenia/kernel/xthread.cc +++ b/src/xenia/kernel/xthread.cc @@ -365,12 +365,12 @@ X_STATUS XThread::Create() { pcr->tls_ptr = tls_static_address_; pcr->pcr_ptr = pcr_address_; - pcr->current_thread = guest_object(); + pcr->prcb_data.current_thread = guest_object(); pcr->stack_base_ptr = stack_base_; pcr->stack_end_ptr = stack_limit_; - pcr->dpc_active = 0; // DPC active bool? + pcr->prcb_data.dpc_active = 0; // DPC active bool? // Always retain when starting - the thread owns itself until exited. RetainHandle(); @@ -623,7 +623,8 @@ void XThread::EnqueueApc(uint32_t normal_routine, uint32_t normal_context, uint32_t apc_ptr = memory()->SystemHeapAlloc(XAPC::kSize); auto apc = reinterpret_cast(memory()->TranslateVirtual(apc_ptr)); - apc->Initialize(); + apc->type = 18; + apc->apc_mode = 1; apc->kernel_routine = XAPC::kDummyKernelRoutine; apc->rundown_routine = XAPC::kDummyRundownRoutine; apc->normal_routine = normal_routine; @@ -768,7 +769,7 @@ void XThread::SetAffinity(uint32_t affinity) { uint8_t XThread::active_cpu() const { const X_KPCR& pcr = *memory()->TranslateVirtual(pcr_address_); - return pcr.current_cpu; + return pcr.prcb_data.current_cpu; } void XThread::SetActiveCpu(uint8_t cpu_index) { @@ -777,7 +778,7 @@ void XThread::SetActiveCpu(uint8_t cpu_index) { assert_true(cpu_index < 6); X_KPCR& pcr = *memory()->TranslateVirtual(pcr_address_); - pcr.current_cpu = cpu_index; + pcr.prcb_data.current_cpu = cpu_index; if (is_guest_thread()) { X_KTHREAD& thread_object = diff --git a/src/xenia/kernel/xthread.h b/src/xenia/kernel/xthread.h index 74139e679..10dcccd8a 100644 --- a/src/xenia/kernel/xthread.h +++ b/src/xenia/kernel/xthread.h @@ -32,6 +32,24 @@ class XEvent; constexpr uint32_t X_CREATE_SUSPENDED = 0x00000001; constexpr uint32_t X_TLS_OUT_OF_INDEXES = UINT32_MAX; +struct XDPC { + xe::be type; + uint8_t selected_cpu_number; + uint8_t desired_cpu_number; + X_LIST_ENTRY list_entry; + xe::be routine; + xe::be context; + xe::be arg1; + xe::be arg2; + + void Initialize(uint32_t guest_func, uint32_t guest_context) { + type = 19; + selected_cpu_number = 0; + desired_cpu_number = 0; + routine = guest_func; + context = guest_context; + } +}; struct XAPC { static const uint32_t kSize = 40; @@ -42,118 +60,198 @@ struct XAPC { // This is 4b shorter than NT - looks like the reserved dword at +4 is gone. // NOTE: stored in guest memory. uint16_t type; // +0 - uint8_t processor_mode; // +2 + uint8_t apc_mode; // +2 uint8_t enqueued; // +3 xe::be thread_ptr; // +4 - xe::be flink; // +8 - xe::be blink; // +12 + X_LIST_ENTRY list_entry; // +8 xe::be kernel_routine; // +16 xe::be rundown_routine; // +20 xe::be normal_routine; // +24 xe::be normal_context; // +28 xe::be arg1; // +32 xe::be arg2; // +36 - - void Initialize() { - type = 18; // ApcObject - processor_mode = 0; - enqueued = 0; - thread_ptr = 0; - flink = blink = 0; - kernel_routine = 0; - normal_routine = 0; - normal_context = 0; - arg1 = arg2 = 0; - } }; +struct X_KSEMAPHORE { + X_DISPATCH_HEADER header; + xe::be limit; +}; +static_assert_size(X_KSEMAPHORE, 0x14); + +struct X_KTHREAD; +struct X_KPROCESS; +struct X_KPRCB { + TypedGuestPointer current_thread; // 0x0 + TypedGuestPointer unk_4; // 0x4 + TypedGuestPointer idle_thread; // 0x8 + uint8_t current_cpu; // 0xC + uint8_t unk_D[3]; // 0xD + // should only have 1 bit set, used for ipis + xe::be processor_mask; // 0x10 + // incremented in clock interrupt + xe::be dpc_clock; // 0x14 + xe::be interrupt_clock; // 0x18 + xe::be unk_1C; // 0x1C + xe::be unk_20; // 0x20 + // various fields used by KeIpiGenericCall + xe::be ipi_args[3]; // 0x24 + // looks like the target cpus clear their corresponding bit + // in this mask to signal completion to the initiator + xe::be targeted_ipi_cpus_mask; // 0x30 + xe::be ipi_function; // 0x34 + // used to synchronize? + TypedGuestPointer ipi_initiator_prcb; // 0x38 + xe::be unk_3C; // 0x3C + xe::be dpc_related_40; // 0x40 + // must be held to modify any dpc-related fields in the kprcb + xe::be dpc_lock; // 0x44 + X_LIST_ENTRY queued_dpcs_list_head; // 0x48 + xe::be dpc_active; // 0x50 + xe::be unk_54; // 0x54 + xe::be unk_58; // 0x58 + // definitely scheduler related + X_SINGLE_LIST_ENTRY unk_5C; // 0x5C + xe::be unk_60; // 0x60 + // i think the following mask has something to do with the array that comes + // after + xe::be unk_mask_64; // 0x64 + + X_LIST_ENTRY unk_68[32]; // 0x68 + // ExTerminateThread tail calls a function that does KeInsertQueueDpc of this + // dpc + XDPC thread_exit_dpc; // 0x168 + // thread_exit_dpc's routine drains this list and frees each threads threadid, + // kernel stack and dereferences the thread + X_LIST_ENTRY terminating_threads_list; // 0x184 + XDPC unk_18C; // 0x18C +}; // Processor Control Region struct X_KPCR { - xe::be tls_ptr; // 0x0 - xe::be msr_mask; // 0x4 - xe::be interrupt_related; // 0x8 - uint8_t unk_08[0xE]; // 0xA - uint8_t current_irql; // 0x18 - uint8_t unk_19[0x17]; // 0x19 - xe::be pcr_ptr; // 0x30 - uint8_t unk_34[0x38]; // 0x34 - xe::be use_alternative_stack; //0x6C + xe::be tls_ptr; // 0x0 + xe::be msr_mask; // 0x4 + union { + xe::be software_interrupt_state; // 0x8 + struct { + uint8_t unknown_8; // 0x8 + uint8_t apc_software_interrupt_state; // 0x9 + }; + }; + uint8_t unk_0A[2]; // 0xA + uint8_t processtype_value_in_dpc; // 0xC + uint8_t unk_0D[3]; // 0xD + // used in KeSaveFloatingPointState / its vmx counterpart + xe::be thread_fpu_related; // 0x10 + xe::be thread_vmx_related; // 0x14 + uint8_t current_irql; // 0x18 + uint8_t unk_19[0x17]; // 0x19 + xe::be pcr_ptr; // 0x30 + + // this seems to be just garbage data? we can stash a pointer to context here + // as a hack for now + union { + uint8_t unk_38[8]; // 0x38 + uint64_t host_stash; // 0x38 + }; + uint8_t unk_40[28]; // 0x40 + xe::be unk_stack_5c; // 0x5C + uint8_t unk_60[12]; // 0x60 + xe::be use_alternative_stack; // 0x6C xe::be stack_base_ptr; // 0x70 Stack base address (high addr) xe::be stack_end_ptr; // 0x74 Stack end (low addr) - //maybe these are the stacks used in apcs? - //i know they're stacks, RtlGetStackLimits returns them if another var here is set + // maybe these are the stacks used in apcs? + // i know they're stacks, RtlGetStackLimits returns them if another var here + // is set xe::be alt_stack_base_ptr; // 0x78 xe::be alt_stack_end_ptr; // 0x7C - uint8_t unk_80[0x80]; // 0x80 - xe::be current_thread; // 0x100 - uint8_t unk_104[0x8]; // 0x104 - uint8_t current_cpu; // 0x10C - uint8_t unk_10D[0x43]; // 0x10D - xe::be dpc_active; // 0x150 + // if bit 1 is set in a handler pointer, it actually points to a KINTERRUPT + // otherwise, it points to a function to execute + xe::be interrupt_handlers[32]; // 0x80 + X_KPRCB prcb_data; // 0x100 + // pointer to KPCRB? + TypedGuestPointer prcb; // 0x2A8 + uint8_t unk_2AC[0x2C]; // 0x2AC }; struct X_KTHREAD { - X_DISPATCH_HEADER header; // 0x0 - xe::be unk_10; // 0x10 - xe::be unk_14; // 0x14 - uint8_t unk_18[0x28]; // 0x10 - xe::be unk_40; // 0x40 - xe::be unk_44; // 0x44 - xe::be unk_48; // 0x48 - xe::be unk_4C; // 0x4C - uint8_t unk_50[0x4]; // 0x50 - xe::be unk_54; // 0x54 - xe::be unk_56; // 0x56 - uint8_t unk_58[0x4]; // 0x58 - xe::be stack_base; // 0x5C - xe::be stack_limit; // 0x60 - xe::be stack_kernel; // 0x64 - xe::be tls_address; // 0x68 - uint8_t unk_6C; // 0x6C - //0x70 = priority? - uint8_t unk_6D[0x3]; // 0x6D - uint8_t priority; // 0x70 - uint8_t fpu_exceptions_on; // 0x71 - uint8_t unk_72; - uint8_t unk_73; - xe::be unk_74; // 0x74 - xe::be unk_78; // 0x78 - xe::be unk_7C; // 0x7C - xe::be unk_80; // 0x80 - xe::be unk_84; // 0x84 - uint8_t unk_88[0x3]; // 0x88 - uint8_t unk_8B; // 0x8B - uint8_t unk_8C[0x10]; // 0x8C - xe::be unk_9C; // 0x9C - uint8_t unk_A0[0x10]; // 0xA0 - int32_t apc_disable_count; // 0xB0 - uint8_t unk_B4[0x8]; // 0xB4 - uint8_t suspend_count; // 0xBC - uint8_t unk_BD; // 0xBD + X_DISPATCH_HEADER header; // 0x0 + xe::be unk_10; // 0x10 + xe::be unk_14; // 0x14 + uint8_t unk_18[0x28]; // 0x10 + xe::be unk_40; // 0x40 + xe::be unk_44; // 0x44 + xe::be unk_48; // 0x48 + xe::be unk_4C; // 0x4C + uint8_t unk_50[0x4]; // 0x50 + xe::be unk_54; // 0x54 + xe::be unk_56; // 0x56 + uint8_t unk_58[0x4]; // 0x58 + xe::be stack_base; // 0x5C + xe::be stack_limit; // 0x60 + xe::be stack_kernel; // 0x64 + xe::be tls_address; // 0x68 + // state = is thread running, suspended, etc + uint8_t thread_state; // 0x6C + // 0x70 = priority? + uint8_t unk_6D[0x3]; // 0x6D + uint8_t priority; // 0x70 + uint8_t fpu_exceptions_on; // 0x71 + // these two process types both get set to the same thing, process_type is + // referenced most frequently, however process_type_dup gets referenced a few + // times while the process is being created + uint8_t process_type_dup; + uint8_t process_type; + //apc_mode determines which list an apc goes into + util::X_TYPED_LIST apc_lists[2]; + TypedGuestPointer process; // 0x84 + uint8_t unk_88[0x3]; // 0x88 + uint8_t apc_related; // 0x8B + uint8_t unk_8C[0x10]; // 0x8C + xe::be msr_mask; // 0x9C + uint8_t unk_A0[4]; // 0xA0 + uint8_t unk_A4; // 0xA4 + uint8_t unk_A5[0xB]; // 0xA5 + int32_t apc_disable_count; // 0xB0 + uint8_t unk_B4[0x8]; // 0xB4 + uint8_t suspend_count; // 0xBC + uint8_t unk_BD; // 0xBD uint8_t terminated; // 0xBE - uint8_t current_cpu; // 0xBF - uint8_t unk_C0[0x10]; // 0xC0 - xe::be stack_alloc_base; // 0xD0 - uint8_t unk_D4[0x5C]; // 0xD4 - xe::be create_time; // 0x130 - xe::be exit_time; // 0x138 - xe::be exit_status; // 0x140 - xe::be unk_144; // 0x144 - xe::be unk_148; // 0x148 - xe::be thread_id; // 0x14C - xe::be start_address; // 0x150 - xe::be unk_154; // 0x154 - xe::be unk_158; // 0x158 - uint8_t unk_15C[0x4]; // 0x15C - xe::be last_error; // 0x160 - xe::be fiber_ptr; // 0x164 - uint8_t unk_168[0x4]; // 0x168 - xe::be creation_flags; // 0x16C - uint8_t unk_170[0xC]; // 0x170 - xe::be unk_17C; // 0x17C - uint8_t unk_180[0x930]; // 0x180 + uint8_t current_cpu; // 0xBF + // these two pointers point to KPRCBs, but seem to be rarely referenced, if at + // all + TypedGuestPointer a_prcb_ptr; // 0xC0 + TypedGuestPointer another_prcb_ptr; // 0xC4 + uint8_t unk_C8[8]; // 0xC8 + xe::be stack_alloc_base; // 0xD0 + // uint8_t unk_D4[0x5C]; // 0xD4 + XAPC on_suspend; // 0xD4 + X_KSEMAPHORE unk_FC; // 0xFC + // this is an entry in + X_LIST_ENTRY process_threads; // 0x110 + xe::be unk_118; // 0x118 + xe::be unk_11C; // 0x11C + xe::be unk_120; // 0x120 + xe::be unk_124; // 0x124 + xe::be unk_128; // 0x128 + xe::be unk_12C; // 0x12C + xe::be create_time; // 0x130 + xe::be exit_time; // 0x138 + xe::be exit_status; // 0x140 + xe::be unk_144; // 0x144 + xe::be unk_148; // 0x148 + xe::be thread_id; // 0x14C + xe::be start_address; // 0x150 + xe::be unk_154; // 0x154 + xe::be unk_158; // 0x158 + uint8_t unk_15C[0x4]; // 0x15C + xe::be last_error; // 0x160 + xe::be fiber_ptr; // 0x164 + uint8_t unk_168[0x4]; // 0x168 + xe::be creation_flags; // 0x16C + uint8_t unk_170[0xC]; // 0x170 + xe::be unk_17C; // 0x17C + uint8_t unk_180[0x930]; // 0x180 // This struct is actually quite long... so uh, not filling this out! }; diff --git a/src/xenia/memory.h b/src/xenia/memory.h index 77b8ff44f..b4195fcd4 100644 --- a/src/xenia/memory.h +++ b/src/xenia/memory.h @@ -20,7 +20,7 @@ #include "xenia/base/memory.h" #include "xenia/base/mutex.h" #include "xenia/cpu/mmio_handler.h" - +#include "xenia/guest_pointers.h" namespace xe { class ByteStream; } // namespace xe @@ -369,6 +369,10 @@ class Memory { #endif } + template + inline T* TranslateVirtual(TypedGuestPointer guest_address) { + return TranslateVirtual(guest_address.m_ptr); + } // Base address of physical memory in the host address space. // This is often something like 0x200000000. diff --git a/src/xenia/xbox.h b/src/xenia/xbox.h index 98a7137a9..8ee0462d6 100644 --- a/src/xenia/xbox.h +++ b/src/xenia/xbox.h @@ -329,6 +329,10 @@ typedef struct { } X_EXCEPTION_RECORD; static_assert_size(X_EXCEPTION_RECORD, 0x50); +struct X_KSPINLOCK { + xe::be prcb_of_owner; +}; +static_assert_size(X_KSPINLOCK, 4); #pragma pack(pop) // Found by dumping the kSectionStringTable sections of various games: