commit
a98fa47081
|
@ -428,7 +428,6 @@ void Debugger::OnMessage(std::vector<uint8_t> buffer) {
|
|||
// thread_builder.add_creation_flags();
|
||||
thread_builder.add_tls_address(thread->tls_ptr());
|
||||
thread_builder.add_pcr_address(thread->pcr_ptr());
|
||||
thread_builder.add_thread_state_address(thread->thread_state_ptr());
|
||||
thread_builder.add_thread_id(thread->thread_id());
|
||||
thread_builder.add_name(thread_name_string);
|
||||
thread_builder.add_priority(thread->priority());
|
||||
|
|
|
@ -345,12 +345,13 @@ bool GL4Shader::CompileProgram(std::string source) {
|
|||
|
||||
// Note that we put the translated source first so we get good line numbers.
|
||||
FILE* f = fopen(file_name, "w");
|
||||
fprintf(f, "%s", translated_disassembly_.c_str());
|
||||
fprintf(f, "\n\n");
|
||||
fprintf(f, "/*\n");
|
||||
fprintf(f, "%s", ucode_disassembly_.c_str());
|
||||
fprintf(f, " */\n");
|
||||
fclose(f);
|
||||
if (f) {
|
||||
fprintf(f, "%s", translated_disassembly_.c_str());
|
||||
fprintf(f, "/*\n");
|
||||
fprintf(f, "%s", ucode_disassembly_.c_str());
|
||||
fprintf(f, " */\n");
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
|
||||
program_ = glCreateShaderProgramv(shader_type_ == ShaderType::kVertex
|
||||
|
|
|
@ -144,6 +144,25 @@ void KernelState::set_process_type(uint32_t value) {
|
|||
pib->process_type = uint8_t(value);
|
||||
}
|
||||
|
||||
void KernelState::RegisterTitleTerminateNotification(uint32_t routine,
|
||||
uint32_t priority) {
|
||||
TerminateNotification notify;
|
||||
notify.guest_routine = routine;
|
||||
notify.priority = priority;
|
||||
|
||||
terminate_notifications.push_back(notify);
|
||||
}
|
||||
|
||||
void KernelState::RemoveTitleTerminateNotification(uint32_t routine) {
|
||||
for (auto it = terminate_notifications.begin();
|
||||
it != terminate_notifications.end(); it++) {
|
||||
if (it->guest_routine == routine) {
|
||||
terminate_notifications.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KernelState::RegisterModule(XModule* module) {}
|
||||
|
||||
void KernelState::UnregisterModule(XModule* module) {}
|
||||
|
@ -318,7 +337,21 @@ object_ref<XUserModule> KernelState::LoadUserModule(const char* raw_name) {
|
|||
void KernelState::TerminateTitle(bool from_guest_thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
|
||||
// First: Kill all guest threads.
|
||||
// First: Call terminate routines
|
||||
// TODO: These might take arguments
|
||||
// FIXME: Calling these will send some threads into kernel code and they'll
|
||||
// hold the lock when terminated! Do we need to wait for all threads to exit?
|
||||
/*
|
||||
if (from_guest_thread) {
|
||||
for (auto routine : terminate_notifications) {
|
||||
auto thread_state = XThread::GetCurrentThread()->thread_state();
|
||||
processor()->Execute(thread_state, routine.guest_routine);
|
||||
}
|
||||
}
|
||||
terminate_notifications.clear();
|
||||
*/
|
||||
|
||||
// Second: Kill all guest threads.
|
||||
for (auto it = threads_by_id_.begin(); it != threads_by_id_.end();) {
|
||||
if (it->second->guest_thread()) {
|
||||
auto thread = it->second;
|
||||
|
@ -329,7 +362,7 @@ void KernelState::TerminateTitle(bool from_guest_thread) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (it->second->running()) {
|
||||
if (thread->running()) {
|
||||
thread->Terminate(0);
|
||||
}
|
||||
|
||||
|
@ -340,7 +373,7 @@ void KernelState::TerminateTitle(bool from_guest_thread) {
|
|||
}
|
||||
}
|
||||
|
||||
// Second: Unload all user modules (including the executable)
|
||||
// Third: Unload all user modules (including the executable)
|
||||
for (int i = 0; i < user_modules_.size(); i++) {
|
||||
X_STATUS status = user_modules_[i]->Unload();
|
||||
assert_true(XSUCCEEDED(status));
|
||||
|
|
|
@ -79,6 +79,11 @@ struct ProcessInfoBlock {
|
|||
xe::be<uint32_t> unk_5C;
|
||||
};
|
||||
|
||||
struct TerminateNotification {
|
||||
uint32_t guest_routine;
|
||||
uint32_t priority;
|
||||
};
|
||||
|
||||
class KernelState {
|
||||
public:
|
||||
KernelState(Emulator* emulator);
|
||||
|
@ -108,6 +113,9 @@ class KernelState {
|
|||
return process_info_block_address_;
|
||||
}
|
||||
|
||||
void RegisterTitleTerminateNotification(uint32_t routine, uint32_t priority);
|
||||
void RemoveTitleTerminateNotification(uint32_t routine);
|
||||
|
||||
void RegisterModule(XModule* module);
|
||||
void UnregisterModule(XModule* module);
|
||||
bool IsKernelModule(const char* name);
|
||||
|
@ -179,6 +187,7 @@ class KernelState {
|
|||
object_ref<XUserModule> executable_module_;
|
||||
std::vector<object_ref<XKernelModule>> kernel_modules_;
|
||||
std::vector<object_ref<XUserModule>> user_modules_;
|
||||
std::vector<TerminateNotification> terminate_notifications;
|
||||
|
||||
uint32_t process_info_block_address_;
|
||||
|
||||
|
|
|
@ -106,18 +106,17 @@ uint32_t XThread::GetCurrentThreadHandle() {
|
|||
return thread->handle();
|
||||
}
|
||||
|
||||
uint32_t XThread::GetCurrentThreadId(const uint8_t* pcr) {
|
||||
return xe::load_and_swap<uint32_t>(pcr + 0x2D8 + 0x14C);
|
||||
uint32_t XThread::GetCurrentThreadId() {
|
||||
XThread* thread = XThread::GetCurrentThread();
|
||||
return thread->guest_object<X_KTHREAD>()->thread_id;
|
||||
}
|
||||
|
||||
uint32_t XThread::last_error() {
|
||||
uint8_t* p = memory()->TranslateVirtual(thread_state_address_);
|
||||
return xe::load_and_swap<uint32_t>(p + 0x160);
|
||||
return guest_object<X_KTHREAD>()->last_error;
|
||||
}
|
||||
|
||||
void XThread::set_last_error(uint32_t error_code) {
|
||||
uint8_t* p = memory()->TranslateVirtual(thread_state_address_);
|
||||
xe::store_and_swap<uint32_t>(p + 0x160, error_code);
|
||||
guest_object<X_KTHREAD>()->last_error = error_code;
|
||||
}
|
||||
|
||||
void XThread::set_name(const std::string& name) {
|
||||
|
@ -129,9 +128,15 @@ void XThread::set_name(const std::string& name) {
|
|||
}
|
||||
}
|
||||
|
||||
uint8_t next_cpu = 0;
|
||||
uint8_t GetFakeCpuNumber(uint8_t proc_mask) {
|
||||
if (!proc_mask) {
|
||||
return 0; // is this reasonable?
|
||||
next_cpu++;
|
||||
if (next_cpu > 6) {
|
||||
next_cpu = 0;
|
||||
}
|
||||
|
||||
return next_cpu; // is this reasonable?
|
||||
}
|
||||
assert_false(proc_mask & 0xC0);
|
||||
|
||||
|
@ -143,15 +148,12 @@ uint8_t GetFakeCpuNumber(uint8_t proc_mask) {
|
|||
X_STATUS XThread::Create() {
|
||||
// Thread kernel object
|
||||
// This call will also setup the native pointer for us.
|
||||
auto guest_object = CreateNative<X_KTHREAD>(sizeof(X_KTHREAD));
|
||||
if (!guest_object) {
|
||||
auto guest_thread = CreateNative<X_KTHREAD>();
|
||||
if (!guest_thread) {
|
||||
XELOGW("Unable to allocate thread object");
|
||||
return X_STATUS_NO_MEMORY;
|
||||
}
|
||||
|
||||
guest_object->header.type = 6;
|
||||
StashNative(&guest_object->header, this);
|
||||
|
||||
auto module = kernel_state()->GetExecutableModule();
|
||||
|
||||
// Allocate thread scratch.
|
||||
|
@ -210,8 +212,7 @@ X_STATUS XThread::Create() {
|
|||
// 0x160: last error
|
||||
// So, at offset 0x100 we have a 4b pointer to offset 200, then have the
|
||||
// structure.
|
||||
pcr_address_ = memory()->SystemHeapAlloc(0x2D8 + 0xAB0);
|
||||
thread_state_address_ = pcr_address_ + 0x2D8;
|
||||
pcr_address_ = memory()->SystemHeapAlloc(0x2D8);
|
||||
if (!pcr_address_) {
|
||||
XELOGW("Unable to allocate thread state block");
|
||||
return X_STATUS_NO_MEMORY;
|
||||
|
@ -232,27 +233,11 @@ X_STATUS XThread::Create() {
|
|||
uint8_t proc_mask =
|
||||
static_cast<uint8_t>(creation_params_.creation_flags >> 24);
|
||||
|
||||
// Processor Control Region
|
||||
struct XPCR {
|
||||
xe::be<uint32_t> tls_ptr; // 0x0
|
||||
char unk_04[0x2C]; // 0x4
|
||||
xe::be<uint32_t> pcr_ptr; // 0x30
|
||||
char unk_34[0x3C]; // 0x34
|
||||
xe::be<uint32_t> stack_base_ptr; // 0x70 Stack base address (high addr)
|
||||
xe::be<uint32_t> stack_end_ptr; // 0x74 Stack end (low addr)
|
||||
char unk_78[0x88]; // 0x78
|
||||
xe::be<uint32_t> teb_ptr; // 0x100
|
||||
char unk_104[0x8]; // 0x104
|
||||
xe::be<uint8_t> current_cpu; // 0x10C
|
||||
char unk_10D[0x43]; // 0x10D
|
||||
xe::be<uint32_t> dpc_active; // 0x150
|
||||
};
|
||||
|
||||
XPCR* pcr = memory()->TranslateVirtual<XPCR*>(pcr_address_);
|
||||
X_KPCR* pcr = memory()->TranslateVirtual<X_KPCR*>(pcr_address_);
|
||||
|
||||
pcr->tls_ptr = tls_address_;
|
||||
pcr->pcr_ptr = pcr_address_;
|
||||
pcr->teb_ptr = thread_state_address_;
|
||||
pcr->current_thread = guest_object();
|
||||
|
||||
pcr->stack_base_ptr =
|
||||
thread_state_->stack_address() + thread_state_->stack_size();
|
||||
|
@ -262,19 +247,16 @@ X_STATUS XThread::Create() {
|
|||
pcr->dpc_active = 0; // DPC active bool?
|
||||
|
||||
// Setup the thread state block (last error/etc).
|
||||
// TODO: This is actually a KTHREAD object. Use the one from CreateNative
|
||||
// instead.
|
||||
uint8_t* p = memory()->TranslateVirtual(thread_state_address_);
|
||||
xe::store_and_swap<uint32_t>(p + 0x000, 6);
|
||||
xe::store_and_swap<uint32_t>(p + 0x008, thread_state_address_ + 0x008);
|
||||
xe::store_and_swap<uint32_t>(p + 0x00C, thread_state_address_ + 0x008);
|
||||
xe::store_and_swap<uint32_t>(p + 0x010, thread_state_address_ + 0x010);
|
||||
xe::store_and_swap<uint32_t>(p + 0x014, thread_state_address_ + 0x010);
|
||||
uint8_t* p = memory()->TranslateVirtual(guest_object());
|
||||
guest_thread->header.type = 6;
|
||||
|
||||
xe::store_and_swap<uint32_t>(p + 0x040, thread_state_address_ + 0x018 + 8);
|
||||
xe::store_and_swap<uint32_t>(p + 0x044, thread_state_address_ + 0x018 + 8);
|
||||
xe::store_and_swap<uint32_t>(p + 0x048, thread_state_address_);
|
||||
xe::store_and_swap<uint32_t>(p + 0x04C, thread_state_address_ + 0x018);
|
||||
xe::store_and_swap<uint32_t>(p + 0x010, guest_object() + 0x010);
|
||||
xe::store_and_swap<uint32_t>(p + 0x014, guest_object() + 0x010);
|
||||
|
||||
xe::store_and_swap<uint32_t>(p + 0x040, guest_object() + 0x018 + 8);
|
||||
xe::store_and_swap<uint32_t>(p + 0x044, guest_object() + 0x018 + 8);
|
||||
xe::store_and_swap<uint32_t>(p + 0x048, guest_object());
|
||||
xe::store_and_swap<uint32_t>(p + 0x04C, guest_object() + 0x018);
|
||||
|
||||
xe::store_and_swap<uint16_t>(p + 0x054, 0x102);
|
||||
xe::store_and_swap<uint16_t>(p + 0x056, 1);
|
||||
|
@ -283,27 +265,27 @@ X_STATUS XThread::Create() {
|
|||
xe::store_and_swap<uint32_t>(p + 0x060, thread_state_->stack_address());
|
||||
xe::store_and_swap<uint32_t>(p + 0x068, tls_address_);
|
||||
xe::store_and_swap<uint8_t>(p + 0x06C, 0);
|
||||
xe::store_and_swap<uint32_t>(p + 0x074, thread_state_address_ + 0x074);
|
||||
xe::store_and_swap<uint32_t>(p + 0x078, thread_state_address_ + 0x074);
|
||||
xe::store_and_swap<uint32_t>(p + 0x07C, thread_state_address_ + 0x07C);
|
||||
xe::store_and_swap<uint32_t>(p + 0x080, thread_state_address_ + 0x07C);
|
||||
xe::store_and_swap<uint32_t>(p + 0x074, guest_object() + 0x074);
|
||||
xe::store_and_swap<uint32_t>(p + 0x078, guest_object() + 0x074);
|
||||
xe::store_and_swap<uint32_t>(p + 0x07C, guest_object() + 0x07C);
|
||||
xe::store_and_swap<uint32_t>(p + 0x080, guest_object() + 0x07C);
|
||||
xe::store_and_swap<uint32_t>(p + 0x084,
|
||||
kernel_state_->process_info_block_address());
|
||||
xe::store_and_swap<uint8_t>(p + 0x08B, 1);
|
||||
// D4 = APC
|
||||
// FC = semaphore (ptr, 0, 2)
|
||||
// A88 = APC
|
||||
// 18 = timer
|
||||
// 0xD4 = APC
|
||||
// 0xFC = semaphore (ptr, 0, 2)
|
||||
// 0xA88 = APC
|
||||
// 0x18 = timer
|
||||
xe::store_and_swap<uint32_t>(p + 0x09C, 0xFDFFD7FF);
|
||||
xe::store_and_swap<uint32_t>(
|
||||
p + 0x0D0, thread_state_->stack_address() + thread_state_->stack_size());
|
||||
xe::store_and_swap<uint64_t>(p + 0x130, Clock::QueryGuestSystemTime());
|
||||
xe::store_and_swap<uint32_t>(p + 0x144, thread_state_address_ + 0x144);
|
||||
xe::store_and_swap<uint32_t>(p + 0x148, thread_state_address_ + 0x144);
|
||||
xe::store_and_swap<uint32_t>(p + 0x144, guest_object() + 0x144);
|
||||
xe::store_and_swap<uint32_t>(p + 0x148, guest_object() + 0x144);
|
||||
xe::store_and_swap<uint32_t>(p + 0x14C, thread_id_);
|
||||
xe::store_and_swap<uint32_t>(p + 0x150, creation_params_.start_address);
|
||||
xe::store_and_swap<uint32_t>(p + 0x154, thread_state_address_ + 0x154);
|
||||
xe::store_and_swap<uint32_t>(p + 0x158, thread_state_address_ + 0x154);
|
||||
xe::store_and_swap<uint32_t>(p + 0x154, guest_object() + 0x154);
|
||||
xe::store_and_swap<uint32_t>(p + 0x158, guest_object() + 0x154);
|
||||
xe::store_and_swap<uint32_t>(p + 0x160, 0); // last error
|
||||
xe::store_and_swap<uint32_t>(p + 0x16C, creation_params_.creation_flags);
|
||||
xe::store_and_swap<uint32_t>(p + 0x17C, 1);
|
||||
|
@ -331,6 +313,7 @@ X_STATUS XThread::Create() {
|
|||
// Release the self-reference to the thread.
|
||||
Release();
|
||||
});
|
||||
|
||||
if (!thread_) {
|
||||
// TODO(benvanik): translate error?
|
||||
XELOGE("CreateThread failed");
|
||||
|
@ -520,7 +503,8 @@ void XThread::DeliverAPCs() {
|
|||
LockApc();
|
||||
}
|
||||
|
||||
XELOGD("Completed delivery of APC to %.8X", uint32_t(apc->normal_routine));
|
||||
XELOGD("Completed delivery of APC to %.8X (%.8X, %.8X, %.8X)",
|
||||
normal_routine, normal_context, arg1, arg2);
|
||||
|
||||
// If special, free it.
|
||||
if (needs_freeing) {
|
||||
|
@ -614,6 +598,8 @@ void XThread::SetActiveCpu(uint32_t cpu_index) {
|
|||
}
|
||||
|
||||
X_STATUS XThread::Resume(uint32_t* out_suspend_count) {
|
||||
--guest_object<X_KTHREAD>()->suspend_count;
|
||||
|
||||
if (thread_->Resume(out_suspend_count)) {
|
||||
return X_STATUS_SUCCESS;
|
||||
} else {
|
||||
|
@ -622,6 +608,8 @@ X_STATUS XThread::Resume(uint32_t* out_suspend_count) {
|
|||
}
|
||||
|
||||
X_STATUS XThread::Suspend(uint32_t* out_suspend_count) {
|
||||
++guest_object<X_KTHREAD>()->suspend_count;
|
||||
|
||||
if (thread_->Suspend(out_suspend_count)) {
|
||||
return X_STATUS_SUCCESS;
|
||||
} else {
|
||||
|
|
|
@ -65,13 +65,35 @@ struct XAPC {
|
|||
}
|
||||
};
|
||||
|
||||
// http://www.nirsoft.net/kernel_struct/vista/KTHREAD.html
|
||||
// Processor Control Region
|
||||
struct X_KPCR {
|
||||
xe::be<uint32_t> tls_ptr; // 0x0
|
||||
char unk_04[0x2C]; // 0x4
|
||||
xe::be<uint32_t> pcr_ptr; // 0x30
|
||||
char unk_34[0x3C]; // 0x34
|
||||
xe::be<uint32_t> stack_base_ptr; // 0x70 Stack base address (high addr)
|
||||
xe::be<uint32_t> stack_end_ptr; // 0x74 Stack end (low addr)
|
||||
char unk_78[0x88]; // 0x78
|
||||
xe::be<uint32_t> current_thread; // 0x100
|
||||
char unk_104[0x8]; // 0x104
|
||||
xe::be<uint8_t> current_cpu; // 0x10C
|
||||
char unk_10D[0x43]; // 0x10D
|
||||
xe::be<uint32_t> dpc_active; // 0x150
|
||||
};
|
||||
|
||||
struct X_KTHREAD {
|
||||
X_DISPATCH_HEADER header; // 0x0
|
||||
char unk_04[0xAA0]; // 0x4
|
||||
X_DISPATCH_HEADER header; // 0x0
|
||||
char unk_10[0xAC]; // 0x10
|
||||
uint8_t suspend_count; // 0xBC
|
||||
char unk_BD[0x8F]; // 0xBD
|
||||
xe::be<uint32_t> thread_id; // 0x14C
|
||||
char unk_150[0x10]; // 0x150
|
||||
xe::be<uint32_t> last_error; // 0x160
|
||||
char unk_164[0x94C]; // 0x164
|
||||
|
||||
// This struct is actually quite long... so uh, not filling this out!
|
||||
};
|
||||
static_assert_size(X_KTHREAD, 0xAB0);
|
||||
|
||||
class XThread : public XObject {
|
||||
public:
|
||||
|
@ -83,11 +105,10 @@ class XThread : public XObject {
|
|||
static bool IsInThread(XThread* other);
|
||||
static XThread* GetCurrentThread();
|
||||
static uint32_t GetCurrentThreadHandle();
|
||||
static uint32_t GetCurrentThreadId(const uint8_t* pcr);
|
||||
static uint32_t GetCurrentThreadId();
|
||||
|
||||
uint32_t tls_ptr() const { return tls_address_; }
|
||||
uint32_t pcr_ptr() const { return pcr_address_; }
|
||||
uint32_t thread_state_ptr() const { return thread_state_address_; }
|
||||
bool guest_thread() const { return guest_thread_; }
|
||||
bool running() const { return running_; }
|
||||
|
||||
|
@ -149,9 +170,8 @@ class XThread : public XObject {
|
|||
uint32_t scratch_size_ = 0;
|
||||
uint32_t tls_address_ = 0;
|
||||
uint32_t pcr_address_ = 0;
|
||||
uint32_t thread_state_address_ = 0;
|
||||
cpu::ThreadState* thread_state_ = nullptr;
|
||||
bool guest_thread_ = false; // Launched into guest code.
|
||||
bool guest_thread_ = false; // Launched into guest code?
|
||||
bool running_ = false;
|
||||
|
||||
std::string name_;
|
||||
|
@ -159,7 +179,7 @@ class XThread : public XObject {
|
|||
int32_t priority_ = 0;
|
||||
uint32_t affinity_ = 0;
|
||||
|
||||
std::atomic<uint32_t> irql_;
|
||||
std::atomic<uint32_t> irql_ = 0;
|
||||
xe::mutex apc_lock_;
|
||||
NativeList* apc_list_ = nullptr;
|
||||
};
|
||||
|
|
|
@ -350,6 +350,14 @@ dword_result_t NetDll_XNetGetDebugXnAddr(dword_t caller,
|
|||
DECLARE_XAM_EXPORT(NetDll_XNetGetDebugXnAddr,
|
||||
ExportTag::kNetworking | ExportTag::kStub);
|
||||
|
||||
// http://www.google.com/patents/WO2008112448A1?cl=en
|
||||
// Reserves a port for use by system link
|
||||
dword_result_t NetDll_XNetSetSystemLinkPort(dword_t caller, dword_t port) {
|
||||
return 1;
|
||||
}
|
||||
DECLARE_XAM_EXPORT(NetDll_XNetSetSystemLinkPort,
|
||||
ExportTag::kNetworking | ExportTag::kStub);
|
||||
|
||||
SHIM_CALL NetDll_XNetGetEthernetLinkStatus_shim(PPCContext* ppc_context,
|
||||
KernelState* kernel_state) {
|
||||
// Games seem to call this before *Startup. If we return 0, they don't even
|
||||
|
@ -513,13 +521,25 @@ SHIM_CALL NetDll_accept_shim(PPCContext* ppc_context,
|
|||
SOCKET ret_socket = accept(socket_handle, &addr, &addrlen);
|
||||
|
||||
if (ret_socket == INVALID_SOCKET) {
|
||||
std::memset(SHIM_MEM_ADDR(addr_ptr), 0, sizeof(addr));
|
||||
SHIM_SET_MEM_32(addrlen_ptr, sizeof(addr));
|
||||
if (addr_ptr) {
|
||||
std::memset(SHIM_MEM_ADDR(addr_ptr), 0, sizeof(addr));
|
||||
}
|
||||
|
||||
if (addrlen_ptr) {
|
||||
SHIM_SET_MEM_32(addrlen_ptr, sizeof(addr));
|
||||
}
|
||||
|
||||
SHIM_SET_RETURN_32(-1);
|
||||
} else {
|
||||
assert_true(ret_socket >> 32 == 0);
|
||||
StoreSockaddr(addr, SHIM_MEM_ADDR(addr_ptr));
|
||||
SHIM_SET_MEM_32(addrlen_ptr, addrlen);
|
||||
if (addr_ptr) {
|
||||
StoreSockaddr(addr, SHIM_MEM_ADDR(addr_ptr));
|
||||
}
|
||||
|
||||
if (addrlen_ptr) {
|
||||
SHIM_SET_MEM_32(addrlen_ptr, addrlen);
|
||||
}
|
||||
|
||||
SHIM_SET_RETURN_32(static_cast<uint32_t>(ret_socket));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -318,27 +318,25 @@ SHIM_CALL XexGetProcedureAddress_shim(PPCContext* ppc_context,
|
|||
SHIM_SET_RETURN_32(result);
|
||||
}
|
||||
|
||||
SHIM_CALL ExRegisterTitleTerminateNotification_shim(PPCContext* ppc_context,
|
||||
KernelState* kernel_state) {
|
||||
uint32_t registration_ptr = SHIM_GET_ARG_32(0);
|
||||
uint32_t create = SHIM_GET_ARG_32(1);
|
||||
|
||||
uint32_t routine = SHIM_MEM_32(registration_ptr + 0);
|
||||
uint32_t priority = SHIM_MEM_32(registration_ptr + 4);
|
||||
// list entry flink
|
||||
// list entry blink
|
||||
|
||||
XELOGD("ExRegisterTitleTerminateNotification(%.8X(%.8X, %d), %.1X)",
|
||||
registration_ptr, routine, priority, create);
|
||||
void AppendParam(StringBuffer& string_buffer,
|
||||
pointer_t<X_EX_TITLE_TERMINATE_REGISTRATION> reg) {
|
||||
string_buffer.AppendFormat("%.8X(%.8X, %.8X)", reg.guest_address(),
|
||||
reg->notification_routine, reg->priority);
|
||||
}
|
||||
|
||||
void ExRegisterTitleTerminateNotification(
|
||||
pointer_t<X_EX_TITLE_TERMINATE_REGISTRATION> reg, dword_t create) {
|
||||
if (create) {
|
||||
// Adding.
|
||||
// TODO(benvanik): add to master list (kernel?).
|
||||
kernel_state()->RegisterTitleTerminateNotification(
|
||||
reg->notification_routine, reg->priority);
|
||||
} else {
|
||||
// Removing.
|
||||
// TODO(benvanik): remove from master list.
|
||||
kernel_state()->RemoveTitleTerminateNotification(reg->notification_routine);
|
||||
}
|
||||
}
|
||||
DECLARE_XBOXKRNL_EXPORT(ExRegisterTitleTerminateNotification,
|
||||
ExportTag::kImplemented);
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace xe
|
||||
|
@ -354,6 +352,4 @@ void xe::kernel::xboxkrnl::RegisterModuleExports(
|
|||
SHIM_SET_MAPPING("xboxkrnl.exe", XexLoadImage, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", XexUnloadImage, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", XexGetProcedureAddress, state);
|
||||
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", ExRegisterTitleTerminateNotification, state);
|
||||
}
|
||||
|
|
|
@ -292,26 +292,22 @@ DECLARE_XBOXKRNL_EXPORT(RtlImageXexHeaderField, ExportTag::kImplemented);
|
|||
// their embedded data and InitializeCriticalSection will never be called.
|
||||
#pragma pack(push, 1)
|
||||
struct X_RTL_CRITICAL_SECTION {
|
||||
uint8_t unknown00;
|
||||
uint8_t spin_count_div_256; // * 256
|
||||
uint16_t __padding0;
|
||||
uint32_t __padding1;
|
||||
// uint32_t unknown04; // maybe the handle to the event?
|
||||
uint32_t queue_head; // head of queue, pointing to this offset
|
||||
uint32_t queue_tail; // tail of queue?
|
||||
int32_t lock_count; // -1 -> 0 on first lock 0x10
|
||||
uint32_t recursion_count; // 0 -> 1 on first lock 0x14
|
||||
uint32_t owning_thread_id; // 0 unless locked 0x18
|
||||
xe::be<uint8_t> unk_00; // 0x0
|
||||
xe::be<uint8_t> spin_count_div_256; // 0x1
|
||||
xe::be<uint16_t> __padding0; // 0x2
|
||||
xe::be<uint32_t> unk_04; // 0x4 maybe the handle to the event?
|
||||
xe::be<uint32_t> queue_head; // 0x8 head of queue, pointing to this offset
|
||||
xe::be<uint32_t> queue_tail; // 0xC tail of queue?
|
||||
int32_t lock_count; // 0x10 -1 -> 0 on first lock 0x10
|
||||
xe::be<int32_t> recursion_count; // 0x14 0 -> 1 on first lock 0x14
|
||||
xe::be<uint32_t> owning_thread_id; // 0x18 0 unless locked 0x18
|
||||
};
|
||||
#pragma pack(pop)
|
||||
static_assert_size(X_RTL_CRITICAL_SECTION, 28);
|
||||
|
||||
void xeRtlInitializeCriticalSection(X_RTL_CRITICAL_SECTION* cs,
|
||||
uint32_t cs_ptr) {
|
||||
// VOID
|
||||
// _Out_ LPCRITICAL_SECTION lpCriticalSection
|
||||
|
||||
cs->unknown00 = 1;
|
||||
cs->unk_00 = 1;
|
||||
cs->spin_count_div_256 = 0;
|
||||
cs->queue_head = cs_ptr + 8;
|
||||
cs->queue_tail = cs_ptr + 8;
|
||||
|
@ -320,31 +316,22 @@ void xeRtlInitializeCriticalSection(X_RTL_CRITICAL_SECTION* cs,
|
|||
cs->owning_thread_id = 0;
|
||||
}
|
||||
|
||||
SHIM_CALL RtlInitializeCriticalSection_shim(PPCContext* ppc_context,
|
||||
KernelState* kernel_state) {
|
||||
uint32_t cs_ptr = SHIM_GET_ARG_32(0);
|
||||
|
||||
XELOGD("RtlInitializeCriticalSection(%.8X)", cs_ptr);
|
||||
|
||||
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
|
||||
xeRtlInitializeCriticalSection(cs, cs_ptr);
|
||||
void RtlInitializeCriticalSection(pointer_t<X_RTL_CRITICAL_SECTION> cs) {
|
||||
xeRtlInitializeCriticalSection(cs, cs.guest_address());
|
||||
}
|
||||
DECLARE_XBOXKRNL_EXPORT(RtlInitializeCriticalSection, ExportTag::kImplemented);
|
||||
|
||||
X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(X_RTL_CRITICAL_SECTION* cs,
|
||||
uint32_t cs_ptr,
|
||||
uint32_t spin_count) {
|
||||
// NTSTATUS
|
||||
// _Out_ LPCRITICAL_SECTION lpCriticalSection,
|
||||
// _In_ DWORD dwSpinCount
|
||||
|
||||
// Spin count is rouned up to 256 intervals then packed in.
|
||||
// Spin count is rounded up to 256 intervals then packed in.
|
||||
// uint32_t spin_count_div_256 = (uint32_t)floor(spin_count / 256.0f + 0.5f);
|
||||
uint32_t spin_count_div_256 = (spin_count + 255) >> 8;
|
||||
if (spin_count_div_256 > 255) {
|
||||
spin_count_div_256 = 255;
|
||||
}
|
||||
|
||||
cs->unknown00 = 1;
|
||||
cs->unk_00 = 1;
|
||||
cs->spin_count_div_256 = spin_count_div_256;
|
||||
cs->queue_head = cs_ptr + 8;
|
||||
cs->queue_tail = cs_ptr + 8;
|
||||
|
@ -355,19 +342,13 @@ X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(X_RTL_CRITICAL_SECTION* cs,
|
|||
return X_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
SHIM_CALL RtlInitializeCriticalSectionAndSpinCount_shim(
|
||||
PPCContext* ppc_context, KernelState* kernel_state) {
|
||||
uint32_t cs_ptr = SHIM_GET_ARG_32(0);
|
||||
uint32_t spin_count = SHIM_GET_ARG_32(1);
|
||||
|
||||
XELOGD("RtlInitializeCriticalSectionAndSpinCount(%.8X, %u)", cs_ptr,
|
||||
spin_count);
|
||||
|
||||
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
|
||||
X_STATUS result =
|
||||
xeRtlInitializeCriticalSectionAndSpinCount(cs, cs_ptr, spin_count);
|
||||
SHIM_SET_RETURN_32(result);
|
||||
dword_result_t RtlInitializeCriticalSectionAndSpinCount(
|
||||
pointer_t<X_RTL_CRITICAL_SECTION> cs, dword_t spin_count) {
|
||||
return xeRtlInitializeCriticalSectionAndSpinCount(cs, cs.guest_address(),
|
||||
spin_count);
|
||||
}
|
||||
DECLARE_XBOXKRNL_EXPORT(RtlInitializeCriticalSectionAndSpinCount,
|
||||
ExportTag::kImplemented);
|
||||
|
||||
SHIM_CALL RtlEnterCriticalSection_shim(PPCContext* ppc_context,
|
||||
KernelState* kernel_state) {
|
||||
|
@ -377,12 +358,11 @@ SHIM_CALL RtlEnterCriticalSection_shim(PPCContext* ppc_context,
|
|||
|
||||
// XELOGD("RtlEnterCriticalSection(%.8X)", cs_ptr);
|
||||
|
||||
const uint8_t* pcr = SHIM_MEM_ADDR(ppc_context->r[13]);
|
||||
uint32_t thread_id = XThread::GetCurrentThreadId(pcr);
|
||||
uint32_t thread_id = XThread::GetCurrentThreadId();
|
||||
|
||||
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
|
||||
|
||||
uint32_t spin_wait_remaining = cs->spin_count_div_256 * 256;
|
||||
|
||||
spin:
|
||||
if (xe::atomic_inc(&cs->lock_count) != 0) {
|
||||
// If this thread already owns the CS increment the recursion count.
|
||||
|
@ -419,8 +399,7 @@ SHIM_CALL RtlTryEnterCriticalSection_shim(PPCContext* ppc_context,
|
|||
|
||||
// XELOGD("RtlTryEnterCriticalSection(%.8X)", cs_ptr);
|
||||
|
||||
const uint8_t* pcr = SHIM_MEM_ADDR(ppc_context->r[13]);
|
||||
uint32_t thread_id = XThread::GetCurrentThreadId(pcr);
|
||||
uint32_t thread_id = XThread::GetCurrentThreadId();
|
||||
|
||||
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
|
||||
|
||||
|
@ -446,16 +425,22 @@ SHIM_CALL RtlLeaveCriticalSection_shim(PPCContext* ppc_context,
|
|||
|
||||
// XELOGD("RtlLeaveCriticalSection(%.8X)", cs_ptr);
|
||||
|
||||
// FYI: No need to check if the owning thread is calling this, as that should
|
||||
// be the only case.
|
||||
|
||||
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
|
||||
|
||||
// Drop recursion count - if we are still not zero'ed return.
|
||||
uint32_t recursion_count = --cs->recursion_count;
|
||||
int32_t recursion_count = --cs->recursion_count;
|
||||
assert_true(recursion_count > -1);
|
||||
if (recursion_count) {
|
||||
assert_true(cs->recursion_count > 0);
|
||||
|
||||
xe::atomic_dec(&cs->lock_count);
|
||||
return;
|
||||
}
|
||||
|
||||
// Unlock!
|
||||
// Not owned - unlock!
|
||||
cs->owning_thread_id = 0;
|
||||
if (xe::atomic_dec(&cs->lock_count) != -1) {
|
||||
// There were waiters - wake one of them.
|
||||
|
@ -530,9 +515,6 @@ void xe::kernel::xboxkrnl::RegisterRtlExports(
|
|||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlMultiByteToUnicodeN, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlUnicodeToMultiByteN, state);
|
||||
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlInitializeCriticalSection, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlInitializeCriticalSectionAndSpinCount,
|
||||
state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlEnterCriticalSection, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlTryEnterCriticalSection, state);
|
||||
SHIM_SET_MAPPING("xboxkrnl.exe", RtlLeaveCriticalSection, state);
|
||||
|
|
|
@ -54,6 +54,7 @@ typedef struct {
|
|||
xe::be<uint32_t> wait_list_flink;
|
||||
xe::be<uint32_t> wait_list_blink;
|
||||
} X_DISPATCH_HEADER;
|
||||
static_assert_size(X_DISPATCH_HEADER, 0x10);
|
||||
|
||||
// http://www.nirsoft.net/kernel_struct/vista/OBJECT_HEADER.html
|
||||
struct X_OBJECT_HEADER {
|
||||
|
@ -118,6 +119,11 @@ class XObject {
|
|||
const std::string& name() const { return name_; }
|
||||
uint32_t guest_object() const { return guest_object_ptr_; }
|
||||
|
||||
template <typename T>
|
||||
T* guest_object() {
|
||||
return memory()->TranslateVirtual<T*>(guest_object_ptr_);
|
||||
}
|
||||
|
||||
void RetainHandle();
|
||||
bool ReleaseHandle();
|
||||
void Retain();
|
||||
|
@ -157,8 +163,8 @@ class XObject {
|
|||
void SetNativePointer(uint32_t native_ptr, bool uninitialized = false);
|
||||
|
||||
template <typename T>
|
||||
T* CreateNative(uint32_t size) {
|
||||
return reinterpret_cast<T*>(CreateNative(size));
|
||||
T* CreateNative() {
|
||||
return reinterpret_cast<T*>(CreateNative(sizeof(T)));
|
||||
}
|
||||
|
||||
// Stash native pointer into X_DISPATCH_HEADER
|
||||
|
|
|
@ -53,6 +53,9 @@ class Window {
|
|||
virtual bool is_fullscreen() const { return false; }
|
||||
virtual void ToggleFullscreen(bool fullscreen) {}
|
||||
|
||||
virtual bool is_bordered() const { return false; }
|
||||
virtual void SetBordered(bool enabled) {}
|
||||
|
||||
bool has_focus() const { return has_focus_; }
|
||||
virtual void set_focus(bool value) { has_focus_ = value; }
|
||||
|
||||
|
|
|
@ -159,8 +159,7 @@ bool Win32Window::set_title(const std::wstring& title) {
|
|||
}
|
||||
|
||||
bool Win32Window::is_fullscreen() const {
|
||||
DWORD style = GetWindowLong(hwnd_, GWL_STYLE);
|
||||
return (style & WS_OVERLAPPEDWINDOW) != WS_OVERLAPPEDWINDOW;
|
||||
return fullscreen_;
|
||||
}
|
||||
|
||||
void Win32Window::ToggleFullscreen(bool fullscreen) {
|
||||
|
@ -168,6 +167,8 @@ void Win32Window::ToggleFullscreen(bool fullscreen) {
|
|||
return;
|
||||
}
|
||||
|
||||
fullscreen_ = fullscreen;
|
||||
|
||||
DWORD style = GetWindowLong(hwnd_, GWL_STYLE);
|
||||
if (fullscreen) {
|
||||
// Kill our borders and resize to take up entire primary monitor.
|
||||
|
@ -195,6 +196,25 @@ void Win32Window::ToggleFullscreen(bool fullscreen) {
|
|||
}
|
||||
}
|
||||
|
||||
bool Win32Window::is_bordered() const {
|
||||
DWORD style = GetWindowLong(hwnd_, GWL_STYLE);
|
||||
return (style & WS_OVERLAPPEDWINDOW) == WS_OVERLAPPEDWINDOW;
|
||||
}
|
||||
|
||||
void Win32Window::SetBordered(bool enabled) {
|
||||
if (is_fullscreen()) {
|
||||
// Don't screw with the borders if we're fullscreen.
|
||||
return;
|
||||
}
|
||||
|
||||
DWORD style = GetWindowLong(hwnd_, GWL_STYLE);
|
||||
if (enabled) {
|
||||
SetWindowLong(hwnd_, GWL_STYLE, style | WS_OVERLAPPEDWINDOW);
|
||||
} else {
|
||||
SetWindowLong(hwnd_, GWL_STYLE, style & ~WS_OVERLAPPEDWINDOW);
|
||||
}
|
||||
}
|
||||
|
||||
void Win32Window::set_cursor_visible(bool value) {
|
||||
if (is_cursor_visible_ == value) {
|
||||
return;
|
||||
|
|
|
@ -34,6 +34,9 @@ class Win32Window : public Window {
|
|||
bool is_fullscreen() const override;
|
||||
void ToggleFullscreen(bool fullscreen) override;
|
||||
|
||||
bool is_bordered() const override;
|
||||
void SetBordered(bool enabled) override;
|
||||
|
||||
void set_cursor_visible(bool value) override;
|
||||
void set_focus(bool value) override;
|
||||
|
||||
|
@ -65,6 +68,7 @@ class Win32Window : public Window {
|
|||
|
||||
HWND hwnd_ = nullptr;
|
||||
bool closing_ = false;
|
||||
bool fullscreen_ = false;
|
||||
|
||||
WINDOWPLACEMENT windowed_pos_;
|
||||
};
|
||||
|
|
|
@ -344,6 +344,22 @@ struct X_SLIST_HEADER {
|
|||
};
|
||||
static_assert_size(X_SLIST_HEADER, 8);
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/hardware/ff550671(v=vs.85).aspx
|
||||
struct X_IO_STATUS_BLOCK {
|
||||
union {
|
||||
xe::be<X_STATUS> status;
|
||||
xe::be<uint32_t> pointer;
|
||||
};
|
||||
xe::be<uint32_t> information;
|
||||
};
|
||||
|
||||
struct X_EX_TITLE_TERMINATE_REGISTRATION {
|
||||
xe::be<uint32_t> notification_routine; // 0x0
|
||||
xe::be<uint32_t> priority; // 0x4
|
||||
X_LIST_ENTRY list_entry; // 0x8 ??
|
||||
};
|
||||
static_assert_size(X_EX_TITLE_TERMINATE_REGISTRATION, 16);
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
} // namespace xe
|
||||
|
|
Loading…
Reference in New Issue