moved xsemaphore to xthread.d

add typed guest pointer template
add X_KSPINLOCK, rework spinlock functions.
rework irql related code, use irql on pcr instead of on XThread
add guest linked list helper functions
renamed ProcessInfoBlock to X_KPROCESS
assigned names to many kernel structure fields
This commit is contained in:
disjtqz 2023-10-10 08:50:10 -04:00 committed by Radosław Gliński
parent 32f7241526
commit b5ddd30572
14 changed files with 507 additions and 230 deletions

View File

@ -16,6 +16,7 @@
#include "xenia/base/mutex.h"
#include "xenia/base/vec128.h"
#include "xenia/guest_pointers.h"
namespace xe {
namespace cpu {
class Processor;
@ -449,6 +450,24 @@ typedef struct alignas(64) PPCContext_s {
}
template <typename T>
inline T* TranslateVirtual(TypedGuestPointer<T> guest_address) {
return TranslateVirtual<T*>(guest_address.m_ptr);
}
template <typename T>
inline uint32_t HostToGuestVirtual(T* host_ptr) XE_RESTRICT const {
#if XE_PLATFORM_WIN32 == 1
uint32_t guest_tmp = static_cast<uint32_t>(
reinterpret_cast<const uint8_t*>(host_ptr) - virtual_membase);
if (guest_tmp >= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this))) {
guest_tmp -= 0x1000;
}
return guest_tmp;
#else
return processor->memory()->HostToGuestVirtual(
reinterpret_cast<void*>(host_ptr));
#endif
}
static std::string GetRegisterName(PPCRegister reg);
std::string GetStringFromValue(PPCRegister reg) const;
void SetValueFromString(PPCRegister reg, std::string value);

View File

@ -0,0 +1,51 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2020 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef XENIA_GUEST_POINTERS_H_
#define XENIA_GUEST_POINTERS_H_
namespace xe {
template <typename TBase, typename TAdj, size_t offset>
struct ShiftedPointer {
using this_type = ShiftedPointer<TBase, TAdj, offset>;
TBase* m_base;
inline TBase* operator->() { return m_base; }
inline TBase& operator*() { return *m_base; }
inline this_type& operator=(TBase* base) {
m_base = base;
return *this;
}
inline this_type& operator=(this_type other) {
m_base = other.m_base;
return *this;
}
TAdj* GetAdjacent() {
return reinterpret_cast<TAdj*>(
&reinterpret_cast<uint8_t*>(m_base)[-static_cast<ptrdiff_t>(offset)]);
}
};
template <typename T>
struct TypedGuestPointer {
xe::be<uint32_t> m_ptr;
inline TypedGuestPointer<T>& operator=(uint32_t ptr) {
m_ptr = ptr;
return *this;
}
inline bool operator==(uint32_t ptr) const { return m_ptr == ptr; }
inline bool operator!=(uint32_t ptr) const { return m_ptr != ptr; }
// use value directly, no endian swap needed
inline bool operator!() const { return !m_ptr.value; }
};
} // namespace xe
#endif // XENIA_GUEST_POINTERS_H_

View File

@ -138,13 +138,13 @@ util::XdbfGameData KernelState::module_xdbf(
uint32_t KernelState::process_type() const {
auto pib =
memory_->TranslateVirtual<ProcessInfoBlock*>(process_info_block_address_);
memory_->TranslateVirtual<X_KPROCESS*>(process_info_block_address_);
return pib->process_type;
}
void KernelState::set_process_type(uint32_t value) {
auto pib =
memory_->TranslateVirtual<ProcessInfoBlock*>(process_info_block_address_);
memory_->TranslateVirtual<X_KPROCESS*>(process_info_block_address_);
pib->process_type = uint8_t(value);
}
@ -328,7 +328,7 @@ void KernelState::SetExecutableModule(object_ref<UserModule> module) {
process_info_block_address_ = memory_->SystemHeapAlloc(0x60);
auto pib =
memory_->TranslateVirtual<ProcessInfoBlock*>(process_info_block_address_);
memory_->TranslateVirtual<X_KPROCESS*>(process_info_block_address_);
// TODO(benvanik): figure out what this list is.
pib->unk_04 = pib->unk_08 = 0;
pib->unk_0C = 0x0000007F;
@ -343,7 +343,7 @@ void KernelState::SetExecutableModule(object_ref<UserModule> module) {
xex2_opt_tls_info* tls_header = nullptr;
executable_module_->GetOptHeader(XEX_HEADER_TLS_INFO, &tls_header);
if (tls_header) {
auto pib = memory_->TranslateVirtual<ProcessInfoBlock*>(
auto pib = memory_->TranslateVirtual<X_KPROCESS*>(
process_info_block_address_);
pib->tls_data_size = tls_header->data_size;
pib->tls_raw_data_size = tls_header->raw_data_size;

View File

@ -51,7 +51,7 @@ constexpr uint32_t X_PROCTYPE_IDLE = 0;
constexpr uint32_t X_PROCTYPE_USER = 1;
constexpr uint32_t X_PROCTYPE_SYSTEM = 2;
struct ProcessInfoBlock {
struct X_KPROCESS {
xe::be<uint32_t> unk_00;
xe::be<uint32_t> unk_04; // blink
xe::be<uint32_t> unk_08; // flink

View File

@ -9,7 +9,7 @@ class XModule;
class XNotifyListener;
class XThread;
class UserModule;
struct ProcessInfoBlock;
struct X_KPROCESS;
struct TerminateNotification;
struct X_TIME_STAMP_BUNDLE;
class KernelState;

View File

@ -50,7 +50,134 @@ class NativeList {
Memory* memory_ = nullptr;
uint32_t head_;
};
template <typename VirtualTranslator>
static X_LIST_ENTRY* XeHostList(uint32_t ptr, VirtualTranslator context) {
return context->TranslateVirtual<X_LIST_ENTRY*>(ptr);
}
template <typename VirtualTranslator>
static uint32_t XeGuestList(X_LIST_ENTRY* ptr, VirtualTranslator context) {
return context->HostToGuestVirtual(ptr);
}
// can either pass an object that adheres to the
// HostToGuestVirtual/TranslateVirtual interface, or the original guest ptr for
// arg 2
template <typename VirtualTranslator>
static void XeInitializeListHead(X_LIST_ENTRY* entry,
VirtualTranslator context) {
// is just a guest ptr?
if constexpr (std::is_unsigned_v<VirtualTranslator>) {
entry->blink_ptr = context;
entry->flink_ptr = context;
} else {
uint32_t orig_ptr = XeGuestList(entry, context);
entry->blink_ptr = orig_ptr;
entry->flink_ptr = orig_ptr;
}
}
template <typename VirtualTranslator>
static bool XeIsListEmpty(X_LIST_ENTRY* entry, VirtualTranslator context) {
return XeHostList(entry->flink_ptr, context) == entry;
}
template <typename VirtualTranslator>
static void XeRemoveEntryList(X_LIST_ENTRY* entry, VirtualTranslator context) {
uint32_t front = entry->flink_ptr;
uint32_t back = entry->blink_ptr;
XeHostList(back, context)->flink_ptr = front;
XeHostList(front, context)->blink_ptr = back;
}
template <typename VirtualTranslator>
static void XeRemoveEntryList(uint32_t entry, VirtualTranslator context) {
XeRemoveEntryList(XeHostList(entry, context), context);
}
template <typename VirtualTranslator>
static uint32_t XeRemoveHeadList(X_LIST_ENTRY* entry,
VirtualTranslator context) {
uint32_t result = entry->flink_ptr;
XeRemoveEntryList(result, context);
return result;
}
template <typename VirtualTranslator>
static uint32_t XeRemoveTailList(X_LIST_ENTRY* entry,
VirtualTranslator context) {
uint32_t result = entry->blink_ptr;
XeRemoveEntryList(result, context);
return result;
}
template <typename VirtualTranslator>
static void XeInsertTailList(X_LIST_ENTRY* list_head, uint32_t list_head_guest,
X_LIST_ENTRY* host_entry, uint32_t entry,
VirtualTranslator context) {
uint32_t old_tail = list_head->blink_ptr;
host_entry->flink_ptr = list_head_guest;
host_entry->blink_ptr = old_tail;
XeHostList(old_tail, context)->flink_ptr = entry;
list_head->blink_ptr = entry;
}
template <typename VirtualTranslator>
static void XeInsertTailList(uint32_t list_head, uint32_t entry,
VirtualTranslator context) {
XeInsertTailList(XeHostList(list_head, context), list_head,
XeHostList(entry, context), entry, context);
}
template <typename VirtualTranslator>
static void XeInsertTailList(X_LIST_ENTRY* list_head, uint32_t entry,
VirtualTranslator context) {
XeInsertTailList(list_head, XeGuestList(list_head, context),
XeHostList(entry, context), entry, context);
}
template <typename VirtualTranslator>
static void XeInsertTailList(X_LIST_ENTRY* list_head, X_LIST_ENTRY* entry,
VirtualTranslator context) {
XeInsertTailList(list_head, XeGuestList(list_head, context), entry,
XeGuestList(entry, context), context);
}
template <typename VirtualTranslator>
static void XeInsertHeadList(X_LIST_ENTRY* list_head, uint32_t list_head_guest,
X_LIST_ENTRY* host_entry, uint32_t entry,
VirtualTranslator context) {
uint32_t old_list_head_flink = list_head->flink_ptr;
host_entry->flink_ptr = old_list_head_flink;
host_entry->blink_ptr = list_head_guest;
XeHostList(old_list_head_flink, context)->blink_ptr = entry;
list_head->flink_ptr = entry;
}
template <typename VirtualTranslator>
static void XeInsertHeadList(uint32_t list_head, uint32_t entry,
VirtualTranslator context) {
XeInsertHeadList(XeHostList(list_head, context), list_head,
XeHostList(entry, context), entry, context);
}
template <typename VirtualTranslator>
static void XeInsertHeadList(X_LIST_ENTRY* list_head, uint32_t entry,
VirtualTranslator context) {
XeInsertHeadList(list_head, XeGuestList(list_head, context),
XeHostList(entry, context), entry, context);
}
template <typename TObject, size_t EntryListOffset>
struct X_TYPED_LIST : public X_LIST_ENTRY {
public:
X_LIST_ENTRY* ObjectListEntry(TObject* obj) {
return reinterpret_cast<X_LIST_ENTRY*>(
&reinterpret_cast<char*>(obj)[static_cast<ptrdiff_t>(EntryListOffset)]);
}
TObject* ListEntryObject(X_LIST_ENTRY* entry) {
return reinterpret_cast<TObject*>(&reinterpret_cast<char*>(
entry)[-static_cast<ptrdiff_t>(EntryListOffset)]);
}
template <typename VirtualTranslator>
void Initialize(VirtualTranslator* translator) {
XeInitializeListHead(this, translator);
}
template <typename VirtualTranslator>
void InsertHead(TObject* entry, VirtualTranslator* translator) {
XeInsertHeadList(this, ObjectListEntry(entry), translator);
}
};
} // namespace util
} // namespace kernel
} // namespace xe

View File

@ -26,8 +26,8 @@ void KeEnableFpuExceptions_entry(
// has to be saved to kthread, the irql changes, the machine state register is
// changed to enable exceptions
X_KTHREAD* kthread = ctx->TranslateVirtual<X_KTHREAD*>(
ctx->TranslateVirtualGPR<X_KPCR*>(ctx->r[13])->current_thread);
X_KTHREAD* kthread = ctx->TranslateVirtual(
ctx->TranslateVirtualGPR<X_KPCR*>(ctx->r[13])->prcb_data.current_thread);
kthread->fpu_exceptions_on = static_cast<uint32_t>(ctx->r[3]) != 0;
}
DECLARE_XBOXKRNL_EXPORT1(KeEnableFpuExceptions, kNone, kStub);

View File

@ -234,7 +234,7 @@ dword_result_t NtSuspendThread_entry(dword_t handle,
if (thread->type() == XObject::Type::Thread) {
auto current_pcr = context->TranslateVirtualGPR<X_KPCR*>(context->r[13]);
if (current_pcr->current_thread == thread->guest_object() ||
if (current_pcr->prcb_data.current_thread == thread->guest_object() ||
!thread->guest_object<X_KTHREAD>()->terminated) {
result = thread->Suspend(&suspend_count);
} else {
@ -1041,88 +1041,68 @@ DECLARE_XBOXKRNL_EXPORT3(NtSignalAndWaitForSingleObjectEx, kThreading,
static void PrefetchForCAS(const void* value) { swcache::PrefetchW(value); }
uint32_t xeKeKfAcquireSpinLock(uint32_t* lock, uint64_t r13 = 1) {
// XELOGD(
// "KfAcquireSpinLock({:08X})",
// lock_ptr);
uint32_t xeKeKfAcquireSpinLock(PPCContext* ctx, X_KSPINLOCK* lock,
bool change_irql)
{
auto old_irql = change_irql ? xeKfRaiseIrql(ctx, 2) : 0;
PrefetchForCAS(lock);
assert_true(*lock != static_cast<uint32_t>(r13));
assert_true(lock->prcb_of_owner != static_cast<uint32_t>(ctx->r[13]));
// Lock.
while (!xe::atomic_cas(0, xe::byte_swap(static_cast<uint32_t>(r13)), lock)) {
while (!xe::atomic_cas(0, xe::byte_swap(static_cast<uint32_t>(ctx->r[13])),
&lock->prcb_of_owner.value)) {
// Spin!
// TODO(benvanik): error on deadlock?
xe::threading::MaybeYield();
}
// Raise IRQL to DISPATCH.
XThread* thread = XThread::GetCurrentThread();
auto old_irql = thread->RaiseIrql(2);
return old_irql;
}
dword_result_t KfAcquireSpinLock_entry(lpdword_t lock_ptr,
const ppc_context_t& ppc_context) {
auto lock = reinterpret_cast<uint32_t*>(lock_ptr.host_address());
return xeKeKfAcquireSpinLock(lock, ppc_context->r[13]);
dword_result_t KfAcquireSpinLock_entry(pointer_t<X_KSPINLOCK> lock_ptr,
const ppc_context_t& context) {
return xeKeKfAcquireSpinLock(context, lock_ptr, true);
}
DECLARE_XBOXKRNL_EXPORT3(KfAcquireSpinLock, kThreading, kImplemented, kBlocking,
kHighFrequency);
void xeKeKfReleaseSpinLock(uint32_t* lock, dword_t old_irql) {
void xeKeKfReleaseSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, dword_t old_irql,
bool change_irql) {
assert_true(lock->prcb_of_owner == static_cast<uint32_t>(ctx->r[13]));
// Unlock.
lock->prcb_of_owner.value = 0;
if (change_irql) {
// Unlock.
*lock = 0;
if (old_irql >= 2) {
return;
}
// Restore IRQL.
XThread* thread = XThread::GetCurrentThread();
thread->LowerIrql(old_irql);
xeKfLowerIrql(ctx, old_irql);
}
}
void KfReleaseSpinLock_entry(lpdword_t lock_ptr, dword_t old_irql,
void KfReleaseSpinLock_entry(pointer_t<X_KSPINLOCK> lock_ptr, dword_t old_irql,
const ppc_context_t& ppc_ctx) {
assert_true(*lock_ptr == static_cast<uint32_t>(ppc_ctx->r[13]));
xeKeKfReleaseSpinLock(ppc_ctx, lock_ptr, old_irql, true);
}
*lock_ptr = 0;
if (old_irql >= 2) {
return;
}
// Restore IRQL.
XThread* thread = XThread::GetCurrentThread();
thread->LowerIrql(old_irql);
}
DECLARE_XBOXKRNL_EXPORT2(KfReleaseSpinLock, kThreading, kImplemented,
kHighFrequency);
// todo: this is not accurate
void KeAcquireSpinLockAtRaisedIrql_entry(lpdword_t lock_ptr,
void KeAcquireSpinLockAtRaisedIrql_entry(pointer_t<X_KSPINLOCK> lock_ptr,
const ppc_context_t& ppc_ctx) {
// Lock.
auto lock = reinterpret_cast<uint32_t*>(lock_ptr.host_address());
// must not be our own thread
assert_true(*lock_ptr != static_cast<uint32_t>(ppc_ctx->r[13]));
PrefetchForCAS(lock);
while (!xe::atomic_cas(
0, xe::byte_swap(static_cast<uint32_t>(ppc_ctx->r[13])), lock)) {
#if XE_ARCH_AMD64 == 1
// todo: this is just a nop if they don't have SMT, which is not great
// either...
_mm_pause();
#endif
// Spin!
// TODO(benvanik): error on deadlock?
}
xeKeKfAcquireSpinLock(ppc_ctx, lock_ptr, false);
}
DECLARE_XBOXKRNL_EXPORT3(KeAcquireSpinLockAtRaisedIrql, kThreading,
kImplemented, kBlocking, kHighFrequency);
dword_result_t KeTryToAcquireSpinLockAtRaisedIrql_entry(
lpdword_t lock_ptr, const ppc_context_t& ppc_ctx) {
pointer_t<X_KSPINLOCK> lock_ptr, const ppc_context_t& ppc_ctx) {
// Lock.
auto lock = reinterpret_cast<uint32_t*>(lock_ptr.host_address());
assert_true(*lock_ptr != static_cast<uint32_t>(ppc_ctx->r[13]));
assert_true(lock_ptr->prcb_of_owner != static_cast<uint32_t>(ppc_ctx->r[13]));
PrefetchForCAS(lock);
if (!xe::atomic_cas(0, xe::byte_swap(static_cast<uint32_t>(ppc_ctx->r[13])),
lock)) {
@ -1133,12 +1113,11 @@ dword_result_t KeTryToAcquireSpinLockAtRaisedIrql_entry(
DECLARE_XBOXKRNL_EXPORT4(KeTryToAcquireSpinLockAtRaisedIrql, kThreading,
kImplemented, kBlocking, kHighFrequency, kSketchy);
void KeReleaseSpinLockFromRaisedIrql_entry(lpdword_t lock_ptr,
void KeReleaseSpinLockFromRaisedIrql_entry(pointer_t<X_KSPINLOCK> lock_ptr,
const ppc_context_t& ppc_ctx) {
// Unlock.
assert_true(*lock_ptr == static_cast<uint32_t>(ppc_ctx->r[13]));
*lock_ptr = 0;
xeKeKfReleaseSpinLock(ppc_ctx, lock_ptr, 0, false);
}
DECLARE_XBOXKRNL_EXPORT2(KeReleaseSpinLockFromRaisedIrql, kThreading,
kImplemented, kHighFrequency);
@ -1168,29 +1147,26 @@ dword_result_t KeRaiseIrqlToDpcLevel_entry(const ppc_context_t& ctx) {
}
DECLARE_XBOXKRNL_EXPORT2(KeRaiseIrqlToDpcLevel, kThreading, kImplemented,
kHighFrequency);
// irql is supposed to be per thread afaik...
void KfLowerIrql_entry(dword_t new_irql, const ppc_context_t& ctx) {
X_KPCR* kpcr = ctx.GetPCR();
void xeKfLowerIrql(PPCContext* ctx, unsigned char new_irql) {
X_KPCR* kpcr = ctx->TranslateVirtualGPR<X_KPCR*>(ctx->r[13]);
if (new_irql > kpcr->current_irql) {
XELOGE("KfLowerIrql : new_irql > kpcr->current_irql!");
}
kpcr->current_irql = new_irql;
if (new_irql < 2) {
// this actually calls a function that eventually calls checkapcs.
// the called function does a ton of other stuff including changing the
// irql and interrupt_related
}
}
// irql is supposed to be per thread afaik...
void KfLowerIrql_entry(dword_t new_irql, const ppc_context_t& ctx) {
xeKfLowerIrql(ctx, static_cast<unsigned char>(new_irql));
}
DECLARE_XBOXKRNL_EXPORT2(KfLowerIrql, kThreading, kImplemented, kHighFrequency);
// used by aurora's nova plugin
// like the other irql related functions, writes to an unknown mmio range (
// 0x7FFF ). The range is indexed by the low 16 bits of the KPCR's pointer (so
// r13)
dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) {
X_KPCR* v1 = ctx.GetPCR();
unsigned char xeKfRaiseIrql(PPCContext* ctx, unsigned char new_irql) {
X_KPCR* v1 = ctx->TranslateVirtualGPR<X_KPCR*>(ctx->r[13]);
uint32_t old_irql = v1->current_irql;
v1->current_irql = new_irql;
@ -1200,6 +1176,13 @@ dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) {
}
return old_irql;
}
// used by aurora's nova plugin
// like the other irql related functions, writes to an unknown mmio range (
// 0x7FFF ). The range is indexed by the low 16 bits of the KPCR's pointer (so
// r13)
dword_result_t KfRaiseIrql_entry(dword_t new_irql, const ppc_context_t& ctx) {
return xeKfRaiseIrql(ctx, new_irql);
}
DECLARE_XBOXKRNL_EXPORT2(KfRaiseIrql, kThreading, kImplemented, kHighFrequency);
@ -1222,19 +1205,29 @@ void NtQueueApcThread_entry(dword_t thread_handle, lpvoid_t apc_routine,
thread->EnqueueApc(apc_routine, apc_routine_context, arg1, arg2);
}
DECLARE_XBOXKRNL_EXPORT1(NtQueueApcThread, kThreading, kImplemented);
void xeKeInitializeApc(XAPC* apc, uint32_t thread_ptr, uint32_t kernel_routine,
uint32_t rundown_routine, uint32_t normal_routine,
uint32_t apc_mode, uint32_t normal_context) {
apc->thread_ptr = thread_ptr;
apc->kernel_routine = kernel_routine;
apc->rundown_routine = rundown_routine;
apc->normal_routine = normal_routine;
apc->type = 18;
if (normal_routine) {
apc->apc_mode = apc_mode;
apc->normal_context = normal_context;
} else {
apc->apc_mode = 0;
apc->normal_context = 0;
}
apc->enqueued = 0;
}
void KeInitializeApc_entry(pointer_t<XAPC> apc, lpvoid_t thread_ptr,
lpvoid_t kernel_routine, lpvoid_t rundown_routine,
lpvoid_t normal_routine, dword_t processor_mode,
lpvoid_t normal_context) {
apc->Initialize();
apc->processor_mode = processor_mode;
apc->thread_ptr = thread_ptr.guest_address();
apc->kernel_routine = kernel_routine.guest_address();
apc->rundown_routine = rundown_routine.guest_address();
apc->normal_routine = normal_routine.guest_address();
apc->normal_context =
normal_routine.guest_address() ? normal_context.guest_address() : 0;
xeKeInitializeApc(apc, thread_ptr, kernel_routine, rundown_routine,
normal_routine, processor_mode, normal_context);
}
DECLARE_XBOXKRNL_EXPORT1(KeInitializeApc, kThreading, kImplemented);
@ -1310,29 +1303,9 @@ dword_result_t KiApcNormalRoutineNop_entry(dword_t unk0 /* output? */,
}
DECLARE_XBOXKRNL_EXPORT1(KiApcNormalRoutineNop, kThreading, kStub);
typedef struct {
xe::be<uint32_t> unknown;
xe::be<uint32_t> flink;
xe::be<uint32_t> blink;
xe::be<uint32_t> routine;
xe::be<uint32_t> context;
xe::be<uint32_t> arg1;
xe::be<uint32_t> arg2;
} XDPC;
void KeInitializeDpc_entry(pointer_t<XDPC> dpc, lpvoid_t routine,
lpvoid_t context) {
// KDPC (maybe) 0x18 bytes?
uint32_t type = 19; // DpcObject
uint32_t importance = 0;
uint32_t number = 0; // ?
dpc->unknown = (type << 24) | (importance << 16) | (number);
dpc->flink = 0;
dpc->blink = 0;
dpc->routine = routine.guest_address();
dpc->context = context.guest_address();
dpc->arg1 = 0;
dpc->arg2 = 0;
dpc->Initialize(routine, context);
}
DECLARE_XBOXKRNL_EXPORT2(KeInitializeDpc, kThreading, kImplemented, kSketchy);
@ -1385,7 +1358,7 @@ struct X_ERWLOCK {
be<uint32_t> readers_entry_count; // 0xC
X_KEVENT writer_event; // 0x10
X_KSEMAPHORE reader_semaphore; // 0x20
uint32_t spin_lock; // 0x34
X_KSPINLOCK spin_lock; // 0x34
};
static_assert_size(X_ERWLOCK, 0x38);
@ -1396,24 +1369,23 @@ void ExInitializeReadWriteLock_entry(pointer_t<X_ERWLOCK> lock_ptr) {
lock_ptr->readers_entry_count = 0;
KeInitializeEvent_entry(&lock_ptr->writer_event, 1, 0);
KeInitializeSemaphore_entry(&lock_ptr->reader_semaphore, 0, 0x7FFFFFFF);
lock_ptr->spin_lock = 0;
lock_ptr->spin_lock.prcb_of_owner = 0;
}
DECLARE_XBOXKRNL_EXPORT1(ExInitializeReadWriteLock, kThreading, kImplemented);
void ExAcquireReadWriteLockExclusive_entry(pointer_t<X_ERWLOCK> lock_ptr,
const ppc_context_t& ppc_context) {
auto old_irql =
xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]);
auto old_irql = xeKeKfAcquireSpinLock(ppc_context, &lock_ptr->spin_lock);
int32_t lock_count = ++lock_ptr->lock_count;
if (!lock_count) {
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
return;
}
lock_ptr->writers_waiting_count++;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
xeKeWaitForSingleObject(&lock_ptr->writer_event, 7, 0, 0, nullptr);
}
DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockExclusive, kThreading,
@ -1422,7 +1394,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockExclusive, kThreading,
dword_result_t ExTryToAcquireReadWriteLockExclusive_entry(
pointer_t<X_ERWLOCK> lock_ptr, const ppc_context_t& ppc_context) {
auto old_irql =
xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]);
xeKeKfAcquireSpinLock(ppc_context, &lock_ptr->spin_lock);
uint32_t result;
if (lock_ptr->lock_count < 0) {
@ -1432,7 +1404,7 @@ dword_result_t ExTryToAcquireReadWriteLockExclusive_entry(
result = 0;
}
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
return result;
}
DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockExclusive, kThreading,
@ -1440,20 +1412,19 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockExclusive, kThreading,
void ExAcquireReadWriteLockShared_entry(pointer_t<X_ERWLOCK> lock_ptr,
const ppc_context_t& ppc_context) {
auto old_irql =
xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]);
auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock);
int32_t lock_count = ++lock_ptr->lock_count;
if (!lock_count ||
(lock_ptr->readers_entry_count && !lock_ptr->writers_waiting_count)) {
lock_ptr->readers_entry_count++;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, & lock_ptr->spin_lock, old_irql);
return;
}
lock_ptr->readers_waiting_count++;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
xeKeWaitForSingleObject(&lock_ptr->reader_semaphore, 7, 0, 0, nullptr);
}
DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockShared, kThreading, kImplemented,
@ -1461,8 +1432,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockShared, kThreading, kImplemented,
dword_result_t ExTryToAcquireReadWriteLockShared_entry(
pointer_t<X_ERWLOCK> lock_ptr, const ppc_context_t& ppc_context) {
auto old_irql =
xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]);
auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock);
uint32_t result;
if (lock_ptr->lock_count < 0 ||
@ -1474,7 +1444,7 @@ dword_result_t ExTryToAcquireReadWriteLockShared_entry(
result = 0;
}
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
return result;
}
DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockShared, kThreading,
@ -1482,14 +1452,13 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockShared, kThreading,
void ExReleaseReadWriteLock_entry(pointer_t<X_ERWLOCK> lock_ptr,
const ppc_context_t& ppc_context) {
auto old_irql =
xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]);
auto old_irql = xeKeKfAcquireSpinLock(ppc_context, & lock_ptr->spin_lock);
int32_t lock_count = --lock_ptr->lock_count;
if (lock_count < 0) {
lock_ptr->readers_entry_count = 0;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
return;
}
@ -1498,7 +1467,7 @@ void ExReleaseReadWriteLock_entry(pointer_t<X_ERWLOCK> lock_ptr,
if (readers_waiting_count) {
lock_ptr->readers_waiting_count = 0;
lock_ptr->readers_entry_count = readers_waiting_count;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
xeKeReleaseSemaphore(&lock_ptr->reader_semaphore, 1,
readers_waiting_count, 0);
return;
@ -1507,12 +1476,12 @@ void ExReleaseReadWriteLock_entry(pointer_t<X_ERWLOCK> lock_ptr,
auto readers_entry_count = --lock_ptr->readers_entry_count;
if (readers_entry_count) {
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
return;
}
lock_ptr->writers_waiting_count--;
xeKeKfReleaseSpinLock(&lock_ptr->spin_lock, old_irql);
xeKeKfReleaseSpinLock(ppc_context, &lock_ptr->spin_lock, old_irql);
xeKeSetEvent(&lock_ptr->writer_event, 1, 0);
}
DECLARE_XBOXKRNL_EXPORT1(ExReleaseReadWriteLock, kThreading, kImplemented);

View File

@ -50,6 +50,16 @@ uint32_t ExTerminateThread(uint32_t exit_code);
uint32_t NtResumeThread(uint32_t handle, uint32_t* suspend_count_ptr);
uint32_t NtClose(uint32_t handle);
void xeKeInitializeApc(XAPC* apc, uint32_t thread_ptr, uint32_t kernel_routine,
uint32_t rundown_routine, uint32_t normal_routine,
uint32_t apc_mode, uint32_t normal_context);
void xeKfLowerIrql(PPCContext* ctx, unsigned char new_irql);
unsigned char xeKfRaiseIrql(PPCContext* ctx, unsigned char new_irql);
void xeKeKfReleaseSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, dword_t old_irql, bool change_irql=true);
uint32_t xeKeKfAcquireSpinLock(PPCContext* ctx, X_KSPINLOCK* lock, bool change_irql=true);
} // namespace xboxkrnl
} // namespace kernel

View File

@ -13,16 +13,10 @@
#include "xenia/base/threading.h"
#include "xenia/kernel/xobject.h"
#include "xenia/xbox.h"
#include "xenia/kernel/xthread.h"
namespace xe {
namespace kernel {
struct X_KSEMAPHORE {
X_DISPATCH_HEADER header;
xe::be<uint32_t> limit;
};
static_assert_size(X_KSEMAPHORE, 0x14);
class XSemaphore : public XObject {
public:
static const XObject::Type kObjectType = XObject::Type::Semaphore;

View File

@ -365,12 +365,12 @@ X_STATUS XThread::Create() {
pcr->tls_ptr = tls_static_address_;
pcr->pcr_ptr = pcr_address_;
pcr->current_thread = guest_object();
pcr->prcb_data.current_thread = guest_object();
pcr->stack_base_ptr = stack_base_;
pcr->stack_end_ptr = stack_limit_;
pcr->dpc_active = 0; // DPC active bool?
pcr->prcb_data.dpc_active = 0; // DPC active bool?
// Always retain when starting - the thread owns itself until exited.
RetainHandle();
@ -623,7 +623,8 @@ void XThread::EnqueueApc(uint32_t normal_routine, uint32_t normal_context,
uint32_t apc_ptr = memory()->SystemHeapAlloc(XAPC::kSize);
auto apc = reinterpret_cast<XAPC*>(memory()->TranslateVirtual(apc_ptr));
apc->Initialize();
apc->type = 18;
apc->apc_mode = 1;
apc->kernel_routine = XAPC::kDummyKernelRoutine;
apc->rundown_routine = XAPC::kDummyRundownRoutine;
apc->normal_routine = normal_routine;
@ -768,7 +769,7 @@ void XThread::SetAffinity(uint32_t affinity) {
uint8_t XThread::active_cpu() const {
const X_KPCR& pcr = *memory()->TranslateVirtual<const X_KPCR*>(pcr_address_);
return pcr.current_cpu;
return pcr.prcb_data.current_cpu;
}
void XThread::SetActiveCpu(uint8_t cpu_index) {
@ -777,7 +778,7 @@ void XThread::SetActiveCpu(uint8_t cpu_index) {
assert_true(cpu_index < 6);
X_KPCR& pcr = *memory()->TranslateVirtual<X_KPCR*>(pcr_address_);
pcr.current_cpu = cpu_index;
pcr.prcb_data.current_cpu = cpu_index;
if (is_guest_thread()) {
X_KTHREAD& thread_object =

View File

@ -32,6 +32,24 @@ class XEvent;
constexpr uint32_t X_CREATE_SUSPENDED = 0x00000001;
constexpr uint32_t X_TLS_OUT_OF_INDEXES = UINT32_MAX;
struct XDPC {
xe::be<uint16_t> type;
uint8_t selected_cpu_number;
uint8_t desired_cpu_number;
X_LIST_ENTRY list_entry;
xe::be<uint32_t> routine;
xe::be<uint32_t> context;
xe::be<uint32_t> arg1;
xe::be<uint32_t> arg2;
void Initialize(uint32_t guest_func, uint32_t guest_context) {
type = 19;
selected_cpu_number = 0;
desired_cpu_number = 0;
routine = guest_func;
context = guest_context;
}
};
struct XAPC {
static const uint32_t kSize = 40;
@ -42,56 +60,118 @@ struct XAPC {
// This is 4b shorter than NT - looks like the reserved dword at +4 is gone.
// NOTE: stored in guest memory.
uint16_t type; // +0
uint8_t processor_mode; // +2
uint8_t apc_mode; // +2
uint8_t enqueued; // +3
xe::be<uint32_t> thread_ptr; // +4
xe::be<uint32_t> flink; // +8
xe::be<uint32_t> blink; // +12
X_LIST_ENTRY list_entry; // +8
xe::be<uint32_t> kernel_routine; // +16
xe::be<uint32_t> rundown_routine; // +20
xe::be<uint32_t> normal_routine; // +24
xe::be<uint32_t> normal_context; // +28
xe::be<uint32_t> arg1; // +32
xe::be<uint32_t> arg2; // +36
void Initialize() {
type = 18; // ApcObject
processor_mode = 0;
enqueued = 0;
thread_ptr = 0;
flink = blink = 0;
kernel_routine = 0;
normal_routine = 0;
normal_context = 0;
arg1 = arg2 = 0;
}
};
struct X_KSEMAPHORE {
X_DISPATCH_HEADER header;
xe::be<uint32_t> limit;
};
static_assert_size(X_KSEMAPHORE, 0x14);
struct X_KTHREAD;
struct X_KPROCESS;
struct X_KPRCB {
TypedGuestPointer<X_KTHREAD> current_thread; // 0x0
TypedGuestPointer<X_KTHREAD> unk_4; // 0x4
TypedGuestPointer<X_KTHREAD> idle_thread; // 0x8
uint8_t current_cpu; // 0xC
uint8_t unk_D[3]; // 0xD
// should only have 1 bit set, used for ipis
xe::be<uint32_t> processor_mask; // 0x10
// incremented in clock interrupt
xe::be<uint32_t> dpc_clock; // 0x14
xe::be<uint32_t> interrupt_clock; // 0x18
xe::be<uint32_t> unk_1C; // 0x1C
xe::be<uint32_t> unk_20; // 0x20
// various fields used by KeIpiGenericCall
xe::be<uint32_t> ipi_args[3]; // 0x24
// looks like the target cpus clear their corresponding bit
// in this mask to signal completion to the initiator
xe::be<uint32_t> targeted_ipi_cpus_mask; // 0x30
xe::be<uint32_t> ipi_function; // 0x34
// used to synchronize?
TypedGuestPointer<X_KPRCB> ipi_initiator_prcb; // 0x38
xe::be<uint32_t> unk_3C; // 0x3C
xe::be<uint32_t> dpc_related_40; // 0x40
// must be held to modify any dpc-related fields in the kprcb
xe::be<uint32_t> dpc_lock; // 0x44
X_LIST_ENTRY queued_dpcs_list_head; // 0x48
xe::be<uint32_t> dpc_active; // 0x50
xe::be<uint32_t> unk_54; // 0x54
xe::be<uint32_t> unk_58; // 0x58
// definitely scheduler related
X_SINGLE_LIST_ENTRY unk_5C; // 0x5C
xe::be<uint32_t> unk_60; // 0x60
// i think the following mask has something to do with the array that comes
// after
xe::be<uint32_t> unk_mask_64; // 0x64
X_LIST_ENTRY unk_68[32]; // 0x68
// ExTerminateThread tail calls a function that does KeInsertQueueDpc of this
// dpc
XDPC thread_exit_dpc; // 0x168
// thread_exit_dpc's routine drains this list and frees each threads threadid,
// kernel stack and dereferences the thread
X_LIST_ENTRY terminating_threads_list; // 0x184
XDPC unk_18C; // 0x18C
};
// Processor Control Region
struct X_KPCR {
xe::be<uint32_t> tls_ptr; // 0x0
xe::be<uint32_t> msr_mask; // 0x4
xe::be<uint16_t> interrupt_related; // 0x8
uint8_t unk_08[0xE]; // 0xA
union {
xe::be<uint16_t> software_interrupt_state; // 0x8
struct {
uint8_t unknown_8; // 0x8
uint8_t apc_software_interrupt_state; // 0x9
};
};
uint8_t unk_0A[2]; // 0xA
uint8_t processtype_value_in_dpc; // 0xC
uint8_t unk_0D[3]; // 0xD
// used in KeSaveFloatingPointState / its vmx counterpart
xe::be<uint32_t> thread_fpu_related; // 0x10
xe::be<uint32_t> thread_vmx_related; // 0x14
uint8_t current_irql; // 0x18
uint8_t unk_19[0x17]; // 0x19
xe::be<uint32_t> pcr_ptr; // 0x30
uint8_t unk_34[0x38]; // 0x34
xe::be<uint64_t> pcr_ptr; // 0x30
// this seems to be just garbage data? we can stash a pointer to context here
// as a hack for now
union {
uint8_t unk_38[8]; // 0x38
uint64_t host_stash; // 0x38
};
uint8_t unk_40[28]; // 0x40
xe::be<uint32_t> unk_stack_5c; // 0x5C
uint8_t unk_60[12]; // 0x60
xe::be<uint32_t> use_alternative_stack; // 0x6C
xe::be<uint32_t> stack_base_ptr; // 0x70 Stack base address (high addr)
xe::be<uint32_t> stack_end_ptr; // 0x74 Stack end (low addr)
// maybe these are the stacks used in apcs?
//i know they're stacks, RtlGetStackLimits returns them if another var here is set
// i know they're stacks, RtlGetStackLimits returns them if another var here
// is set
xe::be<uint32_t> alt_stack_base_ptr; // 0x78
xe::be<uint32_t> alt_stack_end_ptr; // 0x7C
uint8_t unk_80[0x80]; // 0x80
xe::be<uint32_t> current_thread; // 0x100
uint8_t unk_104[0x8]; // 0x104
uint8_t current_cpu; // 0x10C
uint8_t unk_10D[0x43]; // 0x10D
xe::be<uint32_t> dpc_active; // 0x150
// if bit 1 is set in a handler pointer, it actually points to a KINTERRUPT
// otherwise, it points to a function to execute
xe::be<uint32_t> interrupt_handlers[32]; // 0x80
X_KPRCB prcb_data; // 0x100
// pointer to KPCRB?
TypedGuestPointer<X_KPRCB> prcb; // 0x2A8
uint8_t unk_2AC[0x2C]; // 0x2AC
};
struct X_KTHREAD {
@ -111,32 +191,50 @@ struct X_KTHREAD {
xe::be<uint32_t> stack_limit; // 0x60
xe::be<uint32_t> stack_kernel; // 0x64
xe::be<uint32_t> tls_address; // 0x68
uint8_t unk_6C; // 0x6C
// state = is thread running, suspended, etc
uint8_t thread_state; // 0x6C
// 0x70 = priority?
uint8_t unk_6D[0x3]; // 0x6D
uint8_t priority; // 0x70
uint8_t fpu_exceptions_on; // 0x71
uint8_t unk_72;
uint8_t unk_73;
xe::be<uint32_t> unk_74; // 0x74
xe::be<uint32_t> unk_78; // 0x78
xe::be<uint32_t> unk_7C; // 0x7C
xe::be<uint32_t> unk_80; // 0x80
xe::be<uint32_t> unk_84; // 0x84
// these two process types both get set to the same thing, process_type is
// referenced most frequently, however process_type_dup gets referenced a few
// times while the process is being created
uint8_t process_type_dup;
uint8_t process_type;
//apc_mode determines which list an apc goes into
util::X_TYPED_LIST<XAPC, offsetof(XAPC, list_entry)> apc_lists[2];
TypedGuestPointer<X_KPROCESS> process; // 0x84
uint8_t unk_88[0x3]; // 0x88
uint8_t unk_8B; // 0x8B
uint8_t apc_related; // 0x8B
uint8_t unk_8C[0x10]; // 0x8C
xe::be<uint32_t> unk_9C; // 0x9C
uint8_t unk_A0[0x10]; // 0xA0
xe::be<uint32_t> msr_mask; // 0x9C
uint8_t unk_A0[4]; // 0xA0
uint8_t unk_A4; // 0xA4
uint8_t unk_A5[0xB]; // 0xA5
int32_t apc_disable_count; // 0xB0
uint8_t unk_B4[0x8]; // 0xB4
uint8_t suspend_count; // 0xBC
uint8_t unk_BD; // 0xBD
uint8_t terminated; // 0xBE
uint8_t current_cpu; // 0xBF
uint8_t unk_C0[0x10]; // 0xC0
// these two pointers point to KPRCBs, but seem to be rarely referenced, if at
// all
TypedGuestPointer<X_KPRCB> a_prcb_ptr; // 0xC0
TypedGuestPointer<X_KPRCB> another_prcb_ptr; // 0xC4
uint8_t unk_C8[8]; // 0xC8
xe::be<uint32_t> stack_alloc_base; // 0xD0
uint8_t unk_D4[0x5C]; // 0xD4
// uint8_t unk_D4[0x5C]; // 0xD4
XAPC on_suspend; // 0xD4
X_KSEMAPHORE unk_FC; // 0xFC
// this is an entry in
X_LIST_ENTRY process_threads; // 0x110
xe::be<uint32_t> unk_118; // 0x118
xe::be<uint32_t> unk_11C; // 0x11C
xe::be<uint32_t> unk_120; // 0x120
xe::be<uint32_t> unk_124; // 0x124
xe::be<uint32_t> unk_128; // 0x128
xe::be<uint32_t> unk_12C; // 0x12C
xe::be<uint64_t> create_time; // 0x130
xe::be<uint64_t> exit_time; // 0x138
xe::be<uint32_t> exit_status; // 0x140

View File

@ -20,7 +20,7 @@
#include "xenia/base/memory.h"
#include "xenia/base/mutex.h"
#include "xenia/cpu/mmio_handler.h"
#include "xenia/guest_pointers.h"
namespace xe {
class ByteStream;
} // namespace xe
@ -369,6 +369,10 @@ class Memory {
#endif
}
template <typename T>
inline T* TranslateVirtual(TypedGuestPointer<T> guest_address) {
return TranslateVirtual<T*>(guest_address.m_ptr);
}
// Base address of physical memory in the host address space.
// This is often something like 0x200000000.

View File

@ -329,6 +329,10 @@ typedef struct {
} X_EXCEPTION_RECORD;
static_assert_size(X_EXCEPTION_RECORD, 0x50);
struct X_KSPINLOCK {
xe::be<uint32_t> prcb_of_owner;
};
static_assert_size(X_KSPINLOCK, 4);
#pragma pack(pop)
// Found by dumping the kSectionStringTable sections of various games: