add x_kthread priority/fpu_exceptions_on fields, set fpu_exceptions_on in KeEnableFpuExceptions, set priority in SetPriority
add msr field on context write to msr for mtmsr/mfmsr, do not have correct default value for msr yet, nor has mtmsrd been reimplemented do not evaluate assert expressions in release at all, while still avoiding unused variable warnings
This commit is contained in:
parent
3dcbd25e7f
commit
e21fd22d09
|
@ -20,9 +20,32 @@ namespace xe {
|
|||
static_assert(sizeof(type) == size, \
|
||||
"bad definition for " #type ": must be " #size " bytes")
|
||||
|
||||
// We rely on assert being compiled out in NDEBUG.
|
||||
/*
|
||||
* chrispy: we need to ensure our expression is not eliminated by the
|
||||
* preprocessor before the compiler runs, otherwise clang & gcc will warn about
|
||||
* unused variables and terminate compilation
|
||||
*
|
||||
* Initial approach was to do "#define xenia_assert static_cast<void>" in
|
||||
* release, however, code is generated in this case for the expression, which
|
||||
* isnt desirable (we have assert expressions w/ side effects in a few places)
|
||||
*
|
||||
* so instead, when compiling for msvc we do __noop (which takes varargs and
|
||||
* generates no code), and under clang/gcc we do __builtin_constant_p, which
|
||||
* also does not evaluate the args
|
||||
*
|
||||
*/
|
||||
#if defined(NDEBUG)
|
||||
|
||||
#if XE_COMPILER_MSVC == 1
|
||||
#define xenia_assert __noop
|
||||
#elif XE_COMPILER_HAS_GNU_EXTENSIONS == 1
|
||||
#define xenia_assert __builtin_constant_p
|
||||
#else
|
||||
#warning \
|
||||
"Compiler does not have MSVC or GNU extensions, falling back to static_cast<void> for assert expr which may cause expressions with sideeffects to be evaluated"
|
||||
#define xenia_assert static_cast<void>
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define xenia_assert assert
|
||||
#endif
|
||||
|
|
|
@ -377,6 +377,9 @@ typedef struct alignas(64) PPCContext_s {
|
|||
uint64_t r[32]; // 0x20 General purpose registers
|
||||
uint64_t ctr; // 0x18 Count register
|
||||
uint64_t lr; // 0x10 Link register
|
||||
|
||||
uint64_t msr; //machine state register
|
||||
|
||||
double f[32]; // 0x120 Floating-point registers
|
||||
vec128_t v[128]; // 0x220 VMX128 vector registers
|
||||
vec128_t vscr_vec;
|
||||
|
|
|
@ -793,30 +793,17 @@ int InstrEmit_mfmsr(PPCHIRBuilder& f, const InstrData& i) {
|
|||
// bit 48 = EE; interrupt enabled
|
||||
// bit 62 = RI; recoverable interrupt
|
||||
// return 8000h if unlocked (interrupts enabled), else 0
|
||||
#if 0
|
||||
f.MemoryBarrier();
|
||||
if (cvars::disable_global_lock || true) {
|
||||
f.StoreGPR(i.X.RT, f.LoadConstantUint64(0));
|
||||
|
||||
} else {
|
||||
f.CallExtern(f.builtins()->check_global_lock);
|
||||
f.StoreGPR(i.X.RT,
|
||||
f.LoadContext(offsetof(PPCContext, scratch), INT64_TYPE));
|
||||
}
|
||||
#else
|
||||
f.StoreGPR(i.X.RT, f.LoadConstantUint64(0));
|
||||
#endif
|
||||
f.StoreGPR(i.X.RT, f.LoadContext(offsetof(PPCContext, msr), INT64_TYPE));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int InstrEmit_mtmsr(PPCHIRBuilder& f, const InstrData& i) {
|
||||
f.StoreContext(
|
||||
offsetof(PPCContext, scratch),
|
||||
f.ZeroExtend(f.ZeroExtend(f.LoadGPR(i.X.RT), INT64_TYPE), INT64_TYPE));
|
||||
f.StoreContext(offsetof(PPCContext, msr), f.LoadGPR(i.X.RT));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int InstrEmit_mtmsrd(PPCHIRBuilder& f, const InstrData& i) {
|
||||
//todo: this is moving msr under a mask, so only writing EE and RI
|
||||
f.StoreContext(offsetof(PPCContext, scratch),
|
||||
f.ZeroExtend(f.LoadGPR(i.X.RT), INT64_TYPE));
|
||||
return 0;
|
||||
|
|
|
@ -518,7 +518,7 @@ void KernelState::UnloadUserModule(const object_ref<UserModule>& module,
|
|||
return e->path() == module->path();
|
||||
}) == user_modules_.end());
|
||||
|
||||
object_table()->ReleaseHandle(module->handle());
|
||||
object_table()->ReleaseHandleInLock(module->handle());
|
||||
}
|
||||
|
||||
void KernelState::TerminateTitle() {
|
||||
|
|
|
@ -149,7 +149,7 @@ X_STATUS ObjectTable::DuplicateHandle(X_HANDLE handle, X_HANDLE* out_handle) {
|
|||
X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
ObjectTableEntry* entry = LookupTable(handle);
|
||||
ObjectTableEntry* entry = LookupTableInLock(handle);
|
||||
if (!entry) {
|
||||
return X_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
|
@ -161,7 +161,10 @@ X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) {
|
|||
X_STATUS ObjectTable::ReleaseHandle(X_HANDLE handle) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
ObjectTableEntry* entry = LookupTable(handle);
|
||||
return ReleaseHandleInLock(handle);
|
||||
}
|
||||
X_STATUS ObjectTable::ReleaseHandleInLock(X_HANDLE handle) {
|
||||
ObjectTableEntry* entry = LookupTableInLock(handle);
|
||||
if (!entry) {
|
||||
return X_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
|
@ -175,7 +178,6 @@ X_STATUS ObjectTable::ReleaseHandle(X_HANDLE handle) {
|
|||
// (but not a failure code)
|
||||
return X_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
X_STATUS ObjectTable::RemoveHandle(X_HANDLE handle) {
|
||||
X_STATUS result = X_STATUS_SUCCESS;
|
||||
|
||||
|
@ -183,13 +185,13 @@ X_STATUS ObjectTable::RemoveHandle(X_HANDLE handle) {
|
|||
if (!handle) {
|
||||
return X_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
ObjectTableEntry* entry = LookupTable(handle);
|
||||
ObjectTableEntry* entry = LookupTableInLock(handle);
|
||||
if (!entry) {
|
||||
return X_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (entry->object) {
|
||||
auto object = entry->object;
|
||||
entry->object = nullptr;
|
||||
|
@ -246,13 +248,16 @@ void ObjectTable::PurgeAllObjects() {
|
|||
}
|
||||
|
||||
ObjectTable::ObjectTableEntry* ObjectTable::LookupTable(X_HANDLE handle) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
return LookupTableInLock(handle);
|
||||
}
|
||||
|
||||
ObjectTable::ObjectTableEntry* ObjectTable::LookupTableInLock(X_HANDLE handle) {
|
||||
handle = TranslateHandle(handle);
|
||||
if (!handle) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Lower 2 bits are ignored.
|
||||
uint32_t slot = GetHandleSlot(handle);
|
||||
if (slot <= table_capacity_) {
|
||||
|
@ -264,8 +269,8 @@ ObjectTable::ObjectTableEntry* ObjectTable::LookupTable(X_HANDLE handle) {
|
|||
|
||||
// Generic lookup
|
||||
template <>
|
||||
object_ref<XObject> ObjectTable::LookupObject<XObject>(
|
||||
X_HANDLE handle, bool already_locked) {
|
||||
object_ref<XObject> ObjectTable::LookupObject<XObject>(X_HANDLE handle,
|
||||
bool already_locked) {
|
||||
auto object = ObjectTable::LookupObject(handle, already_locked);
|
||||
auto result = object_ref<XObject>(reinterpret_cast<XObject*>(object));
|
||||
return result;
|
||||
|
@ -320,15 +325,14 @@ void ObjectTable::GetObjectsByType(XObject::Type type,
|
|||
}
|
||||
|
||||
X_HANDLE ObjectTable::TranslateHandle(X_HANDLE handle) {
|
||||
if (handle == 0xFFFFFFFF) {
|
||||
// CurrentProcess
|
||||
// assert_always();
|
||||
// chrispy: reordered these by likelihood, most likely case is that handle is
|
||||
// not a special handle
|
||||
XE_LIKELY_IF(handle < 0xFFFFFFFE) { return handle; }
|
||||
else if (handle == 0xFFFFFFFF) {
|
||||
return 0;
|
||||
} else if (handle == 0xFFFFFFFE) {
|
||||
// CurrentThread
|
||||
}
|
||||
else {
|
||||
return XThread::GetCurrentThreadHandle();
|
||||
} else {
|
||||
return handle;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ class ObjectTable {
|
|||
X_STATUS DuplicateHandle(X_HANDLE orig, X_HANDLE* out_handle);
|
||||
X_STATUS RetainHandle(X_HANDLE handle);
|
||||
X_STATUS ReleaseHandle(X_HANDLE handle);
|
||||
X_STATUS ReleaseHandleInLock(X_HANDLE handle);
|
||||
X_STATUS RemoveHandle(X_HANDLE handle);
|
||||
|
||||
bool Save(ByteStream* stream);
|
||||
|
@ -87,7 +88,7 @@ class ObjectTable {
|
|||
int handle_ref_count = 0;
|
||||
XObject* object = nullptr;
|
||||
};
|
||||
|
||||
ObjectTableEntry* LookupTableInLock(X_HANDLE handle);
|
||||
ObjectTableEntry* LookupTable(X_HANDLE handle);
|
||||
XObject* LookupObject(X_HANDLE handle, bool already_locked);
|
||||
void GetObjectsByType(XObject::Type type,
|
||||
|
|
|
@ -19,8 +19,13 @@ namespace xe {
|
|||
namespace kernel {
|
||||
namespace xboxkrnl {
|
||||
|
||||
void KeEnableFpuExceptions_entry(dword_t enabled) {
|
||||
void KeEnableFpuExceptions_entry(
|
||||
const ppc_context_t& ctx) { // dword_t enabled) {
|
||||
// TODO(benvanik): can we do anything about exceptions?
|
||||
// theres a lot more thats supposed to happen here, the floating point state has to be saved to kthread, the irql changes, the machine state register is changed to enable exceptions
|
||||
|
||||
X_KTHREAD* kthread = ctx->TranslateVirtualGPR<X_KTHREAD*>(ctx->r[13]);
|
||||
kthread->fpu_exceptions_on = static_cast<uint32_t>(ctx->r[3]) != 0;
|
||||
}
|
||||
DECLARE_XBOXKRNL_EXPORT1(KeEnableFpuExceptions, kNone, kStub);
|
||||
#if 0
|
||||
|
|
|
@ -717,6 +717,9 @@ void XThread::RundownAPCs() {
|
|||
int32_t XThread::QueryPriority() { return thread_->priority(); }
|
||||
|
||||
void XThread::SetPriority(int32_t increment) {
|
||||
if (is_guest_thread()) {
|
||||
guest_object<X_KTHREAD>()->priority = static_cast<uint8_t>(increment);
|
||||
}
|
||||
priority_ = increment;
|
||||
int32_t target_priority = 0;
|
||||
if (increment > 0x22) {
|
||||
|
|
|
@ -101,7 +101,12 @@ struct X_KTHREAD {
|
|||
xe::be<uint32_t> stack_kernel; // 0x64
|
||||
xe::be<uint32_t> tls_address; // 0x68
|
||||
uint8_t unk_6C; // 0x6C
|
||||
uint8_t unk_6D[0x7]; // 0x6D
|
||||
//0x70 = priority?
|
||||
uint8_t unk_6D[0x3]; // 0x6D
|
||||
uint8_t priority; // 0x70
|
||||
uint8_t fpu_exceptions_on; // 0x71
|
||||
uint8_t unk_72;
|
||||
uint8_t unk_73;
|
||||
xe::be<uint32_t> unk_74; // 0x74
|
||||
xe::be<uint32_t> unk_78; // 0x78
|
||||
xe::be<uint32_t> unk_7C; // 0x7C
|
||||
|
|
Loading…
Reference in New Issue