[Kernel] Make KeEnter/LeaveCriticalRegion only affect the caller thread

Adds a new X_KTHREAD::apc_disable_count field at 0xB0 into X_KTHREAD based on where 360 kernel seems to store it, and made CriticalRegion funcs act on that, instead of locking things between all threads, and changes DeliverAPCs to check that field before running the APCs.

XThread::LockApc/UnlockApc were also updated too as those previously called into EnterCrit/LeaveCrit to work, but AFAIK the code that uses LockApc/UnlockApc does have an actual need for locking between threads, so changed them to work on XThread::global_critical_region_ directly instead.
This commit is contained in:
emoose 2021-05-24 04:01:35 +01:00 committed by Rick Gibbed
parent 0d3ef65dcd
commit 052ce3d389
3 changed files with 19 additions and 12 deletions

View File

@ -960,14 +960,14 @@ void KeReleaseSpinLockFromRaisedIrql(lpdword_t lock_ptr) {
DECLARE_XBOXKRNL_EXPORT2(KeReleaseSpinLockFromRaisedIrql, kThreading,
kImplemented, kHighFrequency);
void KeEnterCriticalRegion() { XThread::EnterCriticalRegion(); }
void KeEnterCriticalRegion() {
XThread::GetCurrentThread()->EnterCriticalRegion();
}
DECLARE_XBOXKRNL_EXPORT2(KeEnterCriticalRegion, kThreading, kImplemented,
kHighFrequency);
void KeLeaveCriticalRegion() {
XThread::LeaveCriticalRegion();
XThread::GetCurrentThread()->CheckApcs();
XThread::GetCurrentThread()->LeaveCriticalRegion();
}
DECLARE_XBOXKRNL_EXPORT2(KeLeaveCriticalRegion, kThreading, kImplemented,
kHighFrequency);

View File

@ -578,11 +578,15 @@ void XThread::Reenter(uint32_t address) {
}
void XThread::EnterCriticalRegion() {
xe::global_critical_region::mutex().lock();
guest_object<X_KTHREAD>()->apc_disable_count--;
}
void XThread::LeaveCriticalRegion() {
xe::global_critical_region::mutex().unlock();
auto kthread = guest_object<X_KTHREAD>();
auto apc_disable_count = ++kthread->apc_disable_count;
if (apc_disable_count == 0) {
CheckApcs();
}
}
uint32_t XThread::RaiseIrql(uint32_t new_irql) {
@ -593,11 +597,11 @@ void XThread::LowerIrql(uint32_t new_irql) { irql_ = new_irql; }
void XThread::CheckApcs() { DeliverAPCs(); }
void XThread::LockApc() { EnterCriticalRegion(); }
void XThread::LockApc() { global_critical_region_.mutex().lock(); }
void XThread::UnlockApc(bool queue_delivery) {
bool needs_apc = apc_list_.HasPending();
LeaveCriticalRegion();
global_critical_region_.mutex().unlock();
if (needs_apc && queue_delivery) {
thread_->QueueUserCallback([this]() { DeliverAPCs(); });
}
@ -632,7 +636,8 @@ void XThread::DeliverAPCs() {
// https://www.drdobbs.com/inside-nts-asynchronous-procedure-call/184416590?pgno=7
auto processor = kernel_state()->processor();
LockApc();
while (apc_list_.HasPending()) {
auto kthread = guest_object<X_KTHREAD>();
while (apc_list_.HasPending() && kthread->apc_disable_count == 0) {
// Get APC entry (offset for LIST_ENTRY offset) and cache what we need.
// Calling the routine may delete the memory/overwrite it.
uint32_t apc_ptr = apc_list_.Shift() - 8;

View File

@ -113,7 +113,9 @@ struct X_KTHREAD {
uint8_t unk_8B; // 0x8B
uint8_t unk_8C[0x10]; // 0x8C
xe::be<uint32_t> unk_9C; // 0x9C
uint8_t unk_A0[0x1C]; // 0xA0
uint8_t unk_A0[0x10]; // 0xA0
int32_t apc_disable_count; // 0xB0
uint8_t unk_B4[0x8]; // 0xB4
uint8_t suspend_count; // 0xBC
uint8_t unk_BD; // 0xBD
uint8_t unk_BE; // 0xBE
@ -192,8 +194,8 @@ class XThread : public XObject, public cpu::Thread {
virtual void Reenter(uint32_t address);
static void EnterCriticalRegion();
static void LeaveCriticalRegion();
void EnterCriticalRegion();
void LeaveCriticalRegion();
uint32_t RaiseIrql(uint32_t new_irql);
void LowerIrql(uint32_t new_irql);