Part 2 of kernel cleanup: merging functions into shims.

This commit is contained in:
Ben Vanik 2014-08-16 00:11:24 -07:00
parent 916dc397ab
commit bf48e9fbbd
13 changed files with 594 additions and 1159 deletions

View File

@ -30,8 +30,11 @@ using namespace xe::kernel;
// This is a global object initialized with the XboxkrnlModule. // This is a global object initialized with the XboxkrnlModule.
// It references the current kernel state object that all kernel methods should // It references the current kernel state object that all kernel methods should
// be using to stash their variables. // be using to stash their variables.
KernelState* xe::kernel::shared_kernel_state_ = NULL; namespace xe {
namespace kernel {
KernelState* shared_kernel_state_ = nullptr;
} // namespace kernel
} // namespace xe
KernelState::KernelState(Emulator* emulator) : KernelState::KernelState(Emulator* emulator) :
emulator_(emulator), memory_(emulator->memory()), emulator_(emulator), memory_(emulator->memory()),

View File

@ -95,13 +95,6 @@ private:
}; };
// This is a global object initialized with the KernelState ctor.
// It references the current kernel state object that all kernel methods should
// be using to stash their variables.
// This sucks, but meh.
extern KernelState* shared_kernel_state_;
} // namespace kernel } // namespace kernel
} // namespace xe } // namespace xe

View File

@ -59,16 +59,6 @@ using PPCContext = alloy::frontend::ppc::PPCContext;
#define SHIM_SET_RETURN_64(v) SHIM_SET_GPR_64(3, v) #define SHIM_SET_RETURN_64(v) SHIM_SET_GPR_64(3, v)
#define IMPL_MEM_ADDR(a) (a ? state->memory()->Translate(a) : nullptr)
#define IMPL_MEM_8(a) poly::load_and_swap<uint8_t>(IMPL_MEM_ADDR(a))
#define IMPL_MEM_16(a) poly::load_and_swap<uint16_t>(IMPL_MEM_ADDR(a))
#define IMPL_MEM_32(a) poly::load_and_swap<uint32_t>(IMPL_MEM_ADDR(a))
#define IMPL_SET_MEM_8(a, v) poly::store_and_swap<uint8_t>(IMPL_MEM_ADDR(a), v)
#define IMPL_SET_MEM_16(a, v) poly::store_and_swap<uint16_t>(IMPL_MEM_ADDR(a), v)
#define IMPL_SET_MEM_32(a, v) poly::store_and_swap<uint32_t>(IMPL_MEM_ADDR(a), v)
} // namespace kernel } // namespace kernel
} // namespace xe } // namespace xe

View File

@ -23,13 +23,13 @@ using namespace xe::kernel::xam;
namespace xe { namespace xe {
namespace kernel { namespace kernel {
// TODO(benvanik): actually check to see if these are the same. // TODO(benvanik): actually check to see if these are the same.
void xeVdQueryVideoMode(X_VIDEO_MODE *video_mode, bool swap); void xeVdQueryVideoMode(X_VIDEO_MODE* video_mode);
SHIM_CALL XGetVideoMode_shim( SHIM_CALL XGetVideoMode_shim(PPCContext* ppc_state, KernelState* state) {
PPCContext* ppc_state, KernelState* state) {
uint32_t video_mode_ptr = SHIM_GET_ARG_32(0); uint32_t video_mode_ptr = SHIM_GET_ARG_32(0);
X_VIDEO_MODE* video_mode = (X_VIDEO_MODE*)SHIM_MEM_ADDR(video_mode_ptr); X_VIDEO_MODE* video_mode = (X_VIDEO_MODE*)SHIM_MEM_ADDR(video_mode_ptr);
xeVdQueryVideoMode(video_mode, true); xeVdQueryVideoMode(video_mode);
} }

View File

@ -244,14 +244,10 @@ SHIM_CALL DbgPrint_shim(
} }
void xeDbgBreakPoint() {
DebugBreak();
}
SHIM_CALL DbgBreakPoint_shim( SHIM_CALL DbgBreakPoint_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
XELOGD("DbgBreakPoint()"); XELOGD("DbgBreakPoint()");
DebugBreak();
} }

View File

@ -24,9 +24,13 @@ namespace xe {
namespace kernel { namespace kernel {
void xeHalReturnToFirmware(uint32_t routine) { SHIM_CALL HalReturnToFirmware_shim(
KernelState* state = shared_kernel_state_; PPCContext* ppc_state, KernelState* state) {
assert_not_null(state); uint32_t routine = SHIM_GET_ARG_32(0);
XELOGD(
"HalReturnToFirmware(%d)",
routine);
// void // void
// IN FIRMWARE_REENTRY Routine // IN FIRMWARE_REENTRY Routine
@ -41,18 +45,6 @@ void xeHalReturnToFirmware(uint32_t routine) {
} }
SHIM_CALL HalReturnToFirmware_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t routine = SHIM_GET_ARG_32(0);
XELOGD(
"HalReturnToFirmware(%d)",
routine);
xeHalReturnToFirmware(routine);
}
} // namespace kernel } // namespace kernel
} // namespace xe } // namespace xe

View File

@ -25,12 +25,21 @@ namespace xe {
namespace kernel { namespace kernel {
X_STATUS xeNtAllocateVirtualMemory( SHIM_CALL NtAllocateVirtualMemory_shim(
uint32_t* base_addr_ptr, uint32_t* region_size_ptr, PPCContext* ppc_state, KernelState* state) {
uint32_t allocation_type, uint32_t protect_bits, uint32_t base_addr_ptr = SHIM_GET_ARG_32(0);
uint32_t unknown) { uint32_t base_addr_value = SHIM_MEM_32(base_addr_ptr);
KernelState* state = shared_kernel_state_; uint32_t region_size_ptr = SHIM_GET_ARG_32(1);
assert_not_null(state); uint32_t region_size_value = SHIM_MEM_32(region_size_ptr);
uint32_t allocation_type = SHIM_GET_ARG_32(2); // X_MEM_* bitmask
uint32_t protect_bits = SHIM_GET_ARG_32(3); // X_PAGE_* bitmask
uint32_t unknown = SHIM_GET_ARG_32(4);
XELOGD(
"NtAllocateVirtualMemory(%.8X(%.8X), %.8X(%.8X), %.8X, %.8X, %.8X)",
base_addr_ptr, base_addr_value,
region_size_ptr, region_size_value,
allocation_type, protect_bits, unknown);
// NTSTATUS // NTSTATUS
// _Inout_ PVOID *BaseAddress, // _Inout_ PVOID *BaseAddress,
@ -48,117 +57,59 @@ X_STATUS xeNtAllocateVirtualMemory(
// it's simple today we could extend it to do better things in the future. // it's simple today we could extend it to do better things in the future.
// Must request a size. // Must request a size.
if (!*region_size_ptr) { if (!region_size_value) {
return X_STATUS_INVALID_PARAMETER; SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
return;
} }
// Check allocation type. // Check allocation type.
if (!(allocation_type & (X_MEM_COMMIT | X_MEM_RESET | X_MEM_RESERVE))) { if (!(allocation_type & (X_MEM_COMMIT | X_MEM_RESET | X_MEM_RESERVE))) {
return X_STATUS_INVALID_PARAMETER; SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
return;
} }
// If MEM_RESET is set only MEM_RESET can be set. // If MEM_RESET is set only MEM_RESET can be set.
if (allocation_type & X_MEM_RESET && (allocation_type & ~X_MEM_RESET)) { if (allocation_type & X_MEM_RESET && (allocation_type & ~X_MEM_RESET)) {
return X_STATUS_INVALID_PARAMETER; SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
return;
} }
// Don't allow games to set execute bits. // Don't allow games to set execute bits.
if (protect_bits & (X_PAGE_EXECUTE | X_PAGE_EXECUTE_READ | if (protect_bits & (X_PAGE_EXECUTE | X_PAGE_EXECUTE_READ |
X_PAGE_EXECUTE_READWRITE | X_PAGE_EXECUTE_WRITECOPY)) { X_PAGE_EXECUTE_READWRITE | X_PAGE_EXECUTE_WRITECOPY)) {
return X_STATUS_ACCESS_DENIED; SHIM_SET_RETURN_32(X_STATUS_ACCESS_DENIED);
return;
} }
// Adjust size. // Adjust size.
uint32_t adjusted_size = *region_size_ptr; uint32_t adjusted_size = region_size_value;
// TODO(benvanik): adjust based on page size flags/etc? // TODO(benvanik): adjust based on page size flags/etc?
// TODO(benvanik): support different allocation types. // TODO(benvanik): support different allocation types.
// Right now we treat everything as a commit and ignore allocations that have // Right now we treat everything as a commit and ignore allocations that have
// already happened. // already happened.
if (*base_addr_ptr) { if (base_addr_value) {
// Having a pointer already means that this is likely a follow-on COMMIT. // Having a pointer already means that this is likely a follow-on COMMIT.
assert_true(!(allocation_type & X_MEM_RESERVE) && assert_true(!(allocation_type & X_MEM_RESERVE) &&
(allocation_type & X_MEM_COMMIT)); (allocation_type & X_MEM_COMMIT));
return X_STATUS_SUCCESS; SHIM_SET_MEM_32(base_addr_ptr, base_addr_value);
SHIM_SET_MEM_32(region_size_ptr, adjusted_size);
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
return;
} }
// Allocate. // Allocate.
uint32_t flags = (allocation_type & X_MEM_NOZERO); uint32_t flags = (allocation_type & X_MEM_NOZERO);
uint32_t addr = (uint32_t)state->memory()->HeapAlloc( uint32_t addr = (uint32_t)state->memory()->HeapAlloc(
*base_addr_ptr, adjusted_size, flags); base_addr_value, adjusted_size, flags);
if (!addr) { if (!addr) {
// Failed - assume no memory available. // Failed - assume no memory available.
return X_STATUS_NO_MEMORY; SHIM_SET_RETURN_32(X_STATUS_NO_MEMORY);
return;
} }
// Stash back. // Stash back.
// Maybe set X_STATUS_ALREADY_COMMITTED if MEM_COMMIT? // Maybe set X_STATUS_ALREADY_COMMITTED if MEM_COMMIT?
*base_addr_ptr = addr; SHIM_SET_MEM_32(base_addr_ptr, addr);
*region_size_ptr = adjusted_size; SHIM_SET_MEM_32(region_size_ptr, adjusted_size);
return X_STATUS_SUCCESS; SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
}
SHIM_CALL NtAllocateVirtualMemory_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t base_addr_ptr = SHIM_GET_ARG_32(0);
uint32_t base_addr_value = SHIM_MEM_32(base_addr_ptr);
uint32_t region_size_ptr = SHIM_GET_ARG_32(1);
uint32_t region_size_value = SHIM_MEM_32(region_size_ptr);
uint32_t allocation_type = SHIM_GET_ARG_32(2); // X_MEM_* bitmask
uint32_t protect_bits = SHIM_GET_ARG_32(3); // X_PAGE_* bitmask
uint32_t unknown = SHIM_GET_ARG_32(4);
XELOGD(
"NtAllocateVirtualMemory(%.8X(%.8X), %.8X(%.8X), %.8X, %.8X, %.8X)",
base_addr_ptr, base_addr_value,
region_size_ptr, region_size_value,
allocation_type, protect_bits, unknown);
X_STATUS result = xeNtAllocateVirtualMemory(
&base_addr_value, &region_size_value,
allocation_type, protect_bits, unknown);
if (XSUCCEEDED(result)) {
SHIM_SET_MEM_32(base_addr_ptr, base_addr_value);
SHIM_SET_MEM_32(region_size_ptr, region_size_value);
}
SHIM_SET_RETURN_32(result);
}
X_STATUS xeNtFreeVirtualMemory(
uint32_t* base_addr_ptr, uint32_t* region_size_ptr,
uint32_t free_type, uint32_t unknown) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// NTSTATUS
// _Inout_ PVOID *BaseAddress,
// _Inout_ PSIZE_T RegionSize,
// _In_ ULONG FreeType
// ? handle?
// I've only seen zero.
assert_true(unknown == 0);
if (!*base_addr_ptr) {
return X_STATUS_MEMORY_NOT_ALLOCATED;
}
// TODO(benvanik): ignore decommits for now.
if (free_type == X_MEM_DECOMMIT) {
return X_STATUS_SUCCESS;
}
// Free.
uint32_t flags = 0;
uint32_t freed_size = state->memory()->HeapFree(
*base_addr_ptr, flags);
if (!freed_size) {
return X_STATUS_UNSUCCESSFUL;
}
// Stash back.
*region_size_ptr = freed_size;
return X_STATUS_SUCCESS;
} }
@ -178,28 +129,55 @@ SHIM_CALL NtFreeVirtualMemory_shim(
region_size_ptr, region_size_value, region_size_ptr, region_size_value,
free_type, unknown); free_type, unknown);
X_STATUS result = xeNtFreeVirtualMemory( // NTSTATUS
&base_addr_value, &region_size_value, // _Inout_ PVOID *BaseAddress,
free_type, unknown); // _Inout_ PSIZE_T RegionSize,
// _In_ ULONG FreeType
// ? handle?
if (XSUCCEEDED(result)) { // I've only seen zero.
SHIM_SET_MEM_32(base_addr_ptr, base_addr_value); assert_true(unknown == 0);
SHIM_SET_MEM_32(region_size_ptr, region_size_value);
if (!base_addr_value) {
SHIM_SET_RETURN_32(X_STATUS_MEMORY_NOT_ALLOCATED);
return;
} }
SHIM_SET_RETURN_32(result);
// TODO(benvanik): ignore decommits for now.
if (free_type == X_MEM_DECOMMIT) {
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
return;
}
// Free.
uint32_t flags = 0;
uint32_t freed_size = state->memory()->HeapFree(
base_addr_value, flags);
if (!freed_size) {
SHIM_SET_RETURN_32(X_STATUS_UNSUCCESSFUL);
return;
}
SHIM_SET_MEM_32(base_addr_ptr, base_addr_value);
SHIM_SET_MEM_32(region_size_ptr, freed_size);
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
} }
SHIM_CALL NtQueryVirtualMemory_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0);
uint32_t memory_basic_information_ptr = SHIM_GET_ARG_32(1);
X_MEMORY_BASIC_INFORMATION *memory_basic_information = (X_MEMORY_BASIC_INFORMATION*)SHIM_MEM_ADDR(memory_basic_information_ptr);
X_STATUS xeNtQueryVirtualMemory( XELOGD(
uint32_t base_address, X_MEMORY_BASIC_INFORMATION *memory_basic_information, bool swap) { "NtQueryVirtualMemory(%.8X, %.8X)",
KernelState* state = shared_kernel_state_; base_address, memory_basic_information_ptr);
assert_not_null(state);
MEMORY_BASIC_INFORMATION mem_info; MEMORY_BASIC_INFORMATION mem_info;
size_t result = state->memory()->QueryInformation(base_address, &mem_info); size_t result = state->memory()->QueryInformation(base_address, &mem_info);
if (!result) { if (!result) {
return STATUS_INVALID_PARAMETER; SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
return;
} }
auto membase = state->memory()->membase(); auto membase = state->memory()->membase();
@ -214,49 +192,41 @@ X_STATUS xeNtQueryVirtualMemory(
memory_basic_information->protect = mem_info.Protect; memory_basic_information->protect = mem_info.Protect;
memory_basic_information->type = mem_info.Type; memory_basic_information->type = mem_info.Type;
if (swap) { // TODO(benvanik): auto swap structure.
memory_basic_information->base_address = memory_basic_information->base_address =
poly::byte_swap(memory_basic_information->base_address); poly::byte_swap(memory_basic_information->base_address);
memory_basic_information->allocation_base = memory_basic_information->allocation_base =
poly::byte_swap(memory_basic_information->allocation_base); poly::byte_swap(memory_basic_information->allocation_base);
memory_basic_information->allocation_protect = memory_basic_information->allocation_protect =
poly::byte_swap(memory_basic_information->allocation_protect); poly::byte_swap(memory_basic_information->allocation_protect);
memory_basic_information->region_size = memory_basic_information->region_size =
poly::byte_swap(memory_basic_information->region_size); poly::byte_swap(memory_basic_information->region_size);
memory_basic_information->state = memory_basic_information->state =
poly::byte_swap(memory_basic_information->state); poly::byte_swap(memory_basic_information->state);
memory_basic_information->protect = memory_basic_information->protect =
poly::byte_swap(memory_basic_information->protect); poly::byte_swap(memory_basic_information->protect);
memory_basic_information->type = memory_basic_information->type =
poly::byte_swap(memory_basic_information->type); poly::byte_swap(memory_basic_information->type);
}
XELOGE("NtQueryVirtualMemory NOT IMPLEMENTED"); XELOGE("NtQueryVirtualMemory NOT IMPLEMENTED");
return X_STATUS_SUCCESS; SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
} }
SHIM_CALL NtQueryVirtualMemory_shim( SHIM_CALL MmAllocatePhysicalMemoryEx_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0); uint32_t type = SHIM_GET_ARG_32(0);
uint32_t memory_basic_information_ptr = SHIM_GET_ARG_32(1); uint32_t region_size = SHIM_GET_ARG_32(1);
X_MEMORY_BASIC_INFORMATION *memory_basic_information = (X_MEMORY_BASIC_INFORMATION*)SHIM_MEM_ADDR(memory_basic_information_ptr); uint32_t protect_bits = SHIM_GET_ARG_32(2);
uint32_t min_addr_range = SHIM_GET_ARG_32(3);
uint32_t max_addr_range = SHIM_GET_ARG_32(4);
uint32_t alignment = SHIM_GET_ARG_32(5);
XELOGD( XELOGD(
"NtQueryVirtualMemory(%.8X, %.8X)", "MmAllocatePhysicalMemoryEx(%d, %.8X, %.8X, %.8X, %.8X, %.8X)",
base_address, memory_basic_information_ptr); type, region_size, protect_bits,
min_addr_range, max_addr_range, alignment);
X_STATUS result = xeNtQueryVirtualMemory(base_address, memory_basic_information, true);
SHIM_SET_RETURN_32(result);
}
uint32_t xeMmAllocatePhysicalMemoryEx(
uint32_t type, uint32_t region_size, uint32_t protect_bits,
uint32_t min_addr_range, uint32_t max_addr_range, uint32_t alignment) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// Type will usually be 0 (user request?), where 1 and 2 are sometimes made // Type will usually be 0 (user request?), where 1 and 2 are sometimes made
// by D3D/etc. // by D3D/etc.
@ -264,7 +234,8 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
// Check protection bits. // Check protection bits.
if (!(protect_bits & (X_PAGE_READONLY | X_PAGE_READWRITE))) { if (!(protect_bits & (X_PAGE_READONLY | X_PAGE_READWRITE))) {
XELOGE("MmAllocatePhysicalMemoryEx: bad protection bits"); XELOGE("MmAllocatePhysicalMemoryEx: bad protection bits");
return 0; SHIM_SET_RETURN_32(0);
return;
} }
// Either may be OR'ed into protect_bits: // Either may be OR'ed into protect_bits:
@ -302,7 +273,8 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
0, adjusted_size, flags, adjusted_alignment); 0, adjusted_size, flags, adjusted_alignment);
if (!base_address) { if (!base_address) {
// Failed - assume no memory available. // Failed - assume no memory available.
return 0; SHIM_SET_RETURN_32(0);
return;
} }
// Move the address into the right range. // Move the address into the right range.
@ -315,36 +287,19 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
//} //}
base_address += 0xA0000000; base_address += 0xA0000000;
return base_address;
}
SHIM_CALL MmAllocatePhysicalMemoryEx_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t type = SHIM_GET_ARG_32(0);
uint32_t region_size = SHIM_GET_ARG_32(1);
uint32_t protect_bits = SHIM_GET_ARG_32(2);
uint32_t min_addr_range = SHIM_GET_ARG_32(3);
uint32_t max_addr_range = SHIM_GET_ARG_32(4);
uint32_t alignment = SHIM_GET_ARG_32(5);
XELOGD(
"MmAllocatePhysicalMemoryEx(%d, %.8X, %.8X, %.8X, %.8X, %.8X)",
type, region_size, protect_bits,
min_addr_range, max_addr_range, alignment);
uint32_t base_address = xeMmAllocatePhysicalMemoryEx(
type, region_size, protect_bits,
min_addr_range, max_addr_range, alignment);
SHIM_SET_RETURN_32(base_address); SHIM_SET_RETURN_32(base_address);
} }
void xeMmFreePhysicalMemory(uint32_t type, uint32_t base_address) { SHIM_CALL MmFreePhysicalMemory_shim(
KernelState* state = shared_kernel_state_; PPCContext* ppc_state, KernelState* state) {
assert_not_null(state); uint32_t type = SHIM_GET_ARG_32(0);
uint32_t base_address = SHIM_GET_ARG_32(1);
XELOGD(
"MmFreePhysicalAddress(%d, %.8X)",
type, base_address);
// base_address = result of MmAllocatePhysicalMemory. // base_address = result of MmAllocatePhysicalMemory.
// Strip off physical bits before passing down. // Strip off physical bits before passing down.
@ -358,29 +313,6 @@ void xeMmFreePhysicalMemory(uint32_t type, uint32_t base_address) {
} }
SHIM_CALL MmFreePhysicalMemory_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t type = SHIM_GET_ARG_32(0);
uint32_t base_address = SHIM_GET_ARG_32(1);
XELOGD(
"MmFreePhysicalAddress(%d, %.8X)",
type, base_address);
xeMmFreePhysicalMemory(type, base_address);
}
uint32_t xeMmQueryAddressProtect(uint32_t base_address) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
uint32_t access = state->memory()->QueryProtect(base_address);
return access;
}
SHIM_CALL MmQueryAddressProtect_shim( SHIM_CALL MmQueryAddressProtect_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0); uint32_t base_address = SHIM_GET_ARG_32(0);
@ -389,19 +321,9 @@ SHIM_CALL MmQueryAddressProtect_shim(
"MmQueryAddressProtect(%.8X)", "MmQueryAddressProtect(%.8X)",
base_address); base_address);
uint32_t result = xeMmQueryAddressProtect(base_address); uint32_t access = state->memory()->QueryProtect(base_address);
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(access);
}
uint32_t xeMmQueryAllocationSize(uint32_t base_address) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
size_t size = state->memory()->QuerySize(base_address);
return (uint32_t)size;
} }
@ -412,10 +334,10 @@ SHIM_CALL MmQueryAllocationSize_shim(
XELOGD( XELOGD(
"MmQueryAllocationSize(%.8X)", "MmQueryAllocationSize(%.8X)",
base_address); base_address);
size_t size = state->memory()->QuerySize(base_address);
uint32_t result = xeMmQueryAllocationSize(base_address); SHIM_SET_RETURN_32(static_cast<uint32_t>(size));
SHIM_SET_RETURN_32(result);
} }
@ -427,7 +349,7 @@ SHIM_CALL MmQueryStatistics_shim(
"MmQueryStatistics(%.8X)", "MmQueryStatistics(%.8X)",
stats_ptr); stats_ptr);
uint32_t size = SHIM_MEM_32(stats_ptr + 0); uint32_t size = SHIM_MEM_32(stats_ptr + 0);
if (size != 104) { if (size != 104) {
SHIM_SET_RETURN_32(X_STATUS_BUFFER_TOO_SMALL); SHIM_SET_RETURN_32(X_STATUS_BUFFER_TOO_SMALL);
return; return;
@ -475,7 +397,14 @@ uint32_t size = SHIM_MEM_32(stats_ptr + 0);
// http://msdn.microsoft.com/en-us/library/windows/hardware/ff554547(v=vs.85).aspx // http://msdn.microsoft.com/en-us/library/windows/hardware/ff554547(v=vs.85).aspx
uint32_t xeMmGetPhysicalAddress(uint32_t base_address) { SHIM_CALL MmGetPhysicalAddress_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0);
XELOGD(
"MmGetPhysicalAddress(%.8X)",
base_address);
// PHYSICAL_ADDRESS MmGetPhysicalAddress( // PHYSICAL_ADDRESS MmGetPhysicalAddress(
// _In_ PVOID BaseAddress // _In_ PVOID BaseAddress
// ); // );
@ -493,21 +422,7 @@ uint32_t xeMmGetPhysicalAddress(uint32_t base_address) {
base_address |= 0xE0000000; base_address |= 0xE0000000;
}*/ }*/
return base_address; SHIM_SET_RETURN_32(base_address);
}
SHIM_CALL MmGetPhysicalAddress_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0);
XELOGD(
"MmGetPhysicalAddress(%.8X)",
base_address);
uint32_t result = xeMmGetPhysicalAddress(base_address);
SHIM_SET_RETURN_32(result);
} }
@ -536,7 +451,6 @@ SHIM_CALL ExAllocatePoolTypeWithTag_shim(
} }
SHIM_CALL ExFreePool_shim( SHIM_CALL ExFreePool_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t base_address = SHIM_GET_ARG_32(0); uint32_t base_address = SHIM_GET_ARG_32(0);

View File

@ -133,9 +133,13 @@ SHIM_CALL ExGetXConfigSetting_shim(
} }
int xeXexCheckExecutablePriviledge(uint32_t privilege) { SHIM_CALL XexCheckExecutablePrivilege_shim(
KernelState* state = shared_kernel_state_; PPCContext* ppc_state, KernelState* state) {
assert_not_null(state); uint32_t privilege = SHIM_GET_ARG_32(0);
XELOGD(
"XexCheckExecutablePrivilege(%.8X)",
privilege);
// BOOL // BOOL
// DWORD Privilege // DWORD Privilege
@ -146,7 +150,8 @@ int xeXexCheckExecutablePriviledge(uint32_t privilege) {
XUserModule* module = state->GetExecutableModule(); XUserModule* module = state->GetExecutableModule();
if (!module) { if (!module) {
return 0; SHIM_SET_RETURN_32(0);
return;
} }
xe_xex2_ref xex = module->xex(); xe_xex2_ref xex = module->xex();
@ -156,47 +161,10 @@ int xeXexCheckExecutablePriviledge(uint32_t privilege) {
xe_xex2_release(xex); xe_xex2_release(xex);
module->Release(); module->Release();
return result;
}
SHIM_CALL XexCheckExecutablePrivilege_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t privilege = SHIM_GET_ARG_32(0);
XELOGD(
"XexCheckExecutablePrivilege(%.8X)",
privilege);
int result = xeXexCheckExecutablePriviledge(privilege);
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
int xeXexGetModuleHandle(const char* module_name,
X_HANDLE* module_handle_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// BOOL
// LPCSZ ModuleName
// LPHMODULE ModuleHandle
XModule* module = state->GetModule(module_name);
if (!module) {
return 0;
}
// NOTE: we don't retain the handle for return.
*module_handle_ptr = module->handle();
module->Release();
return 1;
}
SHIM_CALL XexGetModuleHandle_shim( SHIM_CALL XexGetModuleHandle_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t module_name_ptr = SHIM_GET_ARG_32(0); uint32_t module_name_ptr = SHIM_GET_ARG_32(0);
@ -207,11 +175,18 @@ SHIM_CALL XexGetModuleHandle_shim(
"XexGetModuleHandle(%s, %.8X)", "XexGetModuleHandle(%s, %.8X)",
module_name, module_handle_ptr); module_name, module_handle_ptr);
X_HANDLE module_handle = 0; XModule* module = state->GetModule(module_name);
int result = xeXexGetModuleHandle(module_name, &module_handle); if (!module) {
SHIM_SET_MEM_32(module_handle_ptr, module_handle); SHIM_SET_RETURN_32(X_ERROR_NOT_FOUND);
return;
}
SHIM_SET_RETURN_32(result); // NOTE: we don't retain the handle for return.
SHIM_SET_MEM_32(module_handle_ptr, module->handle());
module->Release();
SHIM_SET_RETURN_32(X_ERROR_SUCCESS);
} }

View File

@ -27,45 +27,24 @@ namespace xe {
namespace kernel { namespace kernel {
//RtlCompareMemory
struct x {
};
struct RtlCompareMemoryExport {
KernelState* state;
static void Call(PPCContext* ppc_state) {
uint32_t source1 = SHIM_GET_ARG_32(0);
uint32_t source2 = SHIM_GET_ARG_32(1);
uint32_t length = SHIM_GET_ARG_32(2);
XELOGD(
"RtlCompareMemory(%.8X, %.8X, %d)",
source1, source2, length);
uint32_t result = 0;
SHIM_SET_RETURN_64(result);
}
virtual void Log() {
//
}
X_STATUS RtlCompareMemory(uint32_t source1_ptr, uint32_t source2_ptr, uint32_t length) {
}
};
// http://msdn.microsoft.com/en-us/library/ff561778 // http://msdn.microsoft.com/en-us/library/ff561778
uint32_t xeRtlCompareMemory(uint32_t source1_ptr, uint32_t source2_ptr, SHIM_CALL RtlCompareMemory_shim(
uint32_t length) { PPCContext* ppc_state, KernelState* state) {
KernelState* state = shared_kernel_state_; uint32_t source1_ptr = SHIM_GET_ARG_32(0);
assert_not_null(state); uint32_t source2_ptr = SHIM_GET_ARG_32(1);
uint32_t length = SHIM_GET_ARG_32(2);
XELOGD(
"RtlCompareMemory(%.8X, %.8X, %d)",
source1_ptr, source2_ptr, length);
// SIZE_T // SIZE_T
// _In_ const VOID *Source1, // _In_ const VOID *Source1,
// _In_ const VOID *Source2, // _In_ const VOID *Source2,
// _In_ SIZE_T Length // _In_ SIZE_T Length
uint8_t* p1 = IMPL_MEM_ADDR(source1_ptr); uint8_t* p1 = SHIM_MEM_ADDR(source1_ptr);
uint8_t* p2 = IMPL_MEM_ADDR(source2_ptr); uint8_t* p2 = SHIM_MEM_ADDR(source2_ptr);
// Note that the return value is the number of bytes that match, so it's best // Note that the return value is the number of bytes that match, so it's best
// we just do this ourselves vs. using memcmp. // we just do this ourselves vs. using memcmp.
@ -78,30 +57,20 @@ uint32_t xeRtlCompareMemory(uint32_t source1_ptr, uint32_t source2_ptr,
} }
} }
return c; SHIM_SET_RETURN_64(c);
}
SHIM_CALL RtlCompareMemory_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t source1 = SHIM_GET_ARG_32(0);
uint32_t source2 = SHIM_GET_ARG_32(1);
uint32_t length = SHIM_GET_ARG_32(2);
XELOGD(
"RtlCompareMemory(%.8X, %.8X, %d)",
source1, source2, length);
uint32_t result = xeRtlCompareMemory(source1, source2, length);
SHIM_SET_RETURN_64(result);
} }
// http://msdn.microsoft.com/en-us/library/ff552123 // http://msdn.microsoft.com/en-us/library/ff552123
uint32_t xeRtlCompareMemoryUlong(uint32_t source_ptr, uint32_t length, SHIM_CALL RtlCompareMemoryUlong_shim(
uint32_t pattern) { PPCContext* ppc_state, KernelState* state) {
KernelState* state = shared_kernel_state_; uint32_t source_ptr = SHIM_GET_ARG_32(0);
assert_not_null(state); uint32_t length = SHIM_GET_ARG_32(1);
uint32_t pattern = SHIM_GET_ARG_32(2);
XELOGD(
"RtlCompareMemoryUlong(%.8X, %d, %.8X)",
source_ptr, length, pattern);
// SIZE_T // SIZE_T
// _In_ PVOID Source, // _In_ PVOID Source,
@ -109,10 +78,11 @@ uint32_t xeRtlCompareMemoryUlong(uint32_t source_ptr, uint32_t length,
// _In_ ULONG Pattern // _In_ ULONG Pattern
if ((source_ptr % 4) || (length % 4)) { if ((source_ptr % 4) || (length % 4)) {
return 0; SHIM_SET_RETURN_64(0);
return;
} }
uint8_t* p = IMPL_MEM_ADDR(source_ptr); uint8_t* p = SHIM_MEM_ADDR(source_ptr);
// Swap pattern. // Swap pattern.
// TODO(benvanik): ensure byte order of pattern is correct. // TODO(benvanik): ensure byte order of pattern is correct.
@ -128,30 +98,20 @@ uint32_t xeRtlCompareMemoryUlong(uint32_t source_ptr, uint32_t length,
} }
} }
return c; SHIM_SET_RETURN_64(c);
}
SHIM_CALL RtlCompareMemoryUlong_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t source = SHIM_GET_ARG_32(0);
uint32_t length = SHIM_GET_ARG_32(1);
uint32_t pattern = SHIM_GET_ARG_32(2);
XELOGD(
"RtlCompareMemoryUlong(%.8X, %d, %.8X)",
source, length, pattern);
uint32_t result = xeRtlCompareMemoryUlong(source, length, pattern);
SHIM_SET_RETURN_64(result);
} }
// http://msdn.microsoft.com/en-us/library/ff552263 // http://msdn.microsoft.com/en-us/library/ff552263
void xeRtlFillMemoryUlong(uint32_t destination_ptr, uint32_t length, SHIM_CALL RtlFillMemoryUlong_shim(
uint32_t pattern) { PPCContext* ppc_state, KernelState* state) {
KernelState* state = shared_kernel_state_; uint32_t destination_ptr = SHIM_GET_ARG_32(0);
assert_not_null(state); uint32_t length = SHIM_GET_ARG_32(1);
uint32_t pattern = SHIM_GET_ARG_32(2);
XELOGD(
"RtlFillMemoryUlong(%.8X, %d, %.8X)",
destination_ptr, length, pattern);
// VOID // VOID
// _Out_ PVOID Destination, // _Out_ PVOID Destination,
@ -159,7 +119,7 @@ void xeRtlFillMemoryUlong(uint32_t destination_ptr, uint32_t length,
// _In_ ULONG Pattern // _In_ ULONG Pattern
// NOTE: length must be % 4, so we can work on uint32s. // NOTE: length must be % 4, so we can work on uint32s.
uint32_t* p = (uint32_t*)IMPL_MEM_ADDR(destination_ptr); uint32_t* p = (uint32_t*)SHIM_MEM_ADDR(destination_ptr);
// TODO(benvanik): ensure byte order is correct - we're writing back the // TODO(benvanik): ensure byte order is correct - we're writing back the
// swapped arg value. // swapped arg value.
@ -175,20 +135,6 @@ void xeRtlFillMemoryUlong(uint32_t destination_ptr, uint32_t length,
} }
SHIM_CALL RtlFillMemoryUlong_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t destination = SHIM_GET_ARG_32(0);
uint32_t length = SHIM_GET_ARG_32(1);
uint32_t pattern = SHIM_GET_ARG_32(2);
XELOGD(
"RtlFillMemoryUlong(%.8X, %d, %.8X)",
destination, length, pattern);
xeRtlFillMemoryUlong(destination, length, pattern);
}
// typedef struct _STRING { // typedef struct _STRING {
// USHORT Length; // USHORT Length;
// USHORT MaximumLength; // USHORT MaximumLength;
@ -197,27 +143,6 @@ SHIM_CALL RtlFillMemoryUlong_shim(
// http://msdn.microsoft.com/en-us/library/ff561918 // http://msdn.microsoft.com/en-us/library/ff561918
void xeRtlInitAnsiString(uint32_t destination_ptr, uint32_t source_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID
// _Out_ PANSI_STRING DestinationString,
// _In_opt_ PCSZ SourceString
if (source_ptr != 0) {
const char* source = (char*)IMPL_MEM_ADDR(source_ptr);
uint16_t length = (uint16_t)xestrlena(source);
IMPL_SET_MEM_16(destination_ptr + 0, length);
IMPL_SET_MEM_16(destination_ptr + 2, length + 1);
} else {
IMPL_SET_MEM_16(destination_ptr + 0, 0);
IMPL_SET_MEM_16(destination_ptr + 2, 0);
}
IMPL_SET_MEM_32(destination_ptr + 4, source_ptr);
}
SHIM_CALL RtlInitAnsiString_shim( SHIM_CALL RtlInitAnsiString_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t destination_ptr = SHIM_GET_ARG_32(0); uint32_t destination_ptr = SHIM_GET_ARG_32(0);
@ -227,38 +152,43 @@ SHIM_CALL RtlInitAnsiString_shim(
XELOGD("RtlInitAnsiString(%.8X, %.8X = %s)", XELOGD("RtlInitAnsiString(%.8X, %.8X = %s)",
destination_ptr, source_ptr, source ? source : "<null>"); destination_ptr, source_ptr, source ? source : "<null>");
xeRtlInitAnsiString(destination_ptr, source_ptr); // VOID
// _Out_ PANSI_STRING DestinationString,
// _In_opt_ PCSZ SourceString
if (source_ptr != 0) {
const char* source = (char*)SHIM_MEM_ADDR(source_ptr);
uint16_t length = (uint16_t)xestrlena(source);
SHIM_SET_MEM_16(destination_ptr + 0, length);
SHIM_SET_MEM_16(destination_ptr + 2, length + 1);
} else {
SHIM_SET_MEM_16(destination_ptr + 0, 0);
SHIM_SET_MEM_16(destination_ptr + 2, 0);
}
SHIM_SET_MEM_32(destination_ptr + 4, source_ptr);
} }
// http://msdn.microsoft.com/en-us/library/ff561899 // http://msdn.microsoft.com/en-us/library/ff561899
void xeRtlFreeAnsiString(uint32_t string_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID
// _Inout_ PANSI_STRING AnsiString
uint32_t buffer = IMPL_MEM_32(string_ptr + 4);
if (!buffer) {
return;
}
uint32_t length = IMPL_MEM_16(string_ptr + 2);
state->memory()->HeapFree(buffer, length);
IMPL_SET_MEM_16(string_ptr + 0, 0);
IMPL_SET_MEM_16(string_ptr + 2, 0);
IMPL_SET_MEM_32(string_ptr + 4, 0);
}
SHIM_CALL RtlFreeAnsiString_shim( SHIM_CALL RtlFreeAnsiString_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t string_ptr = SHIM_GET_ARG_32(0); uint32_t string_ptr = SHIM_GET_ARG_32(0);
XELOGD("RtlFreeAnsiString(%.8X)", string_ptr); XELOGD("RtlFreeAnsiString(%.8X)", string_ptr);
xeRtlFreeAnsiString(string_ptr); // VOID
// _Inout_ PANSI_STRING AnsiString
uint32_t buffer = SHIM_MEM_32(string_ptr + 4);
if (!buffer) {
return;
}
uint32_t length = SHIM_MEM_16(string_ptr + 2);
state->memory()->HeapFree(buffer, length);
SHIM_SET_MEM_16(string_ptr + 0, 0);
SHIM_SET_MEM_16(string_ptr + 2, 0);
SHIM_SET_MEM_32(string_ptr + 4, 0);
} }
@ -270,30 +200,6 @@ SHIM_CALL RtlFreeAnsiString_shim(
// http://msdn.microsoft.com/en-us/library/ff561934 // http://msdn.microsoft.com/en-us/library/ff561934
void xeRtlInitUnicodeString(uint32_t destination_ptr, uint32_t source_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID
// _Out_ PUNICODE_STRING DestinationString,
// _In_opt_ PCWSTR SourceString
const wchar_t* source =
source_ptr ? (const wchar_t*)IMPL_MEM_ADDR(source_ptr) : NULL;
if (source) {
uint16_t length = (uint16_t)xestrlenw(source);
IMPL_SET_MEM_16(destination_ptr + 0, length * 2);
IMPL_SET_MEM_16(destination_ptr + 2, (length + 1) * 2);
IMPL_SET_MEM_32(destination_ptr + 4, source_ptr);
} else {
IMPL_SET_MEM_16(destination_ptr + 0, 0);
IMPL_SET_MEM_16(destination_ptr + 2, 0);
IMPL_SET_MEM_32(destination_ptr + 4, 0);
}
}
SHIM_CALL RtlInitUnicodeString_shim( SHIM_CALL RtlInitUnicodeString_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t destination_ptr = SHIM_GET_ARG_32(0); uint32_t destination_ptr = SHIM_GET_ARG_32(0);
@ -304,85 +210,47 @@ SHIM_CALL RtlInitUnicodeString_shim(
XELOGD("RtlInitUnicodeString(%.8X, %.8X = %ls)", XELOGD("RtlInitUnicodeString(%.8X, %.8X = %ls)",
destination_ptr, source_ptr, source ? source : L"<null>"); destination_ptr, source_ptr, source ? source : L"<null>");
xeRtlInitUnicodeString(destination_ptr, source_ptr); // VOID
// _Out_ PUNICODE_STRING DestinationString,
// _In_opt_ PCWSTR SourceString
if (source) {
uint16_t length = (uint16_t)xestrlenw(source);
SHIM_SET_MEM_16(destination_ptr + 0, length * 2);
SHIM_SET_MEM_16(destination_ptr + 2, (length + 1) * 2);
SHIM_SET_MEM_32(destination_ptr + 4, source_ptr);
} else {
SHIM_SET_MEM_16(destination_ptr + 0, 0);
SHIM_SET_MEM_16(destination_ptr + 2, 0);
SHIM_SET_MEM_32(destination_ptr + 4, 0);
}
} }
// http://msdn.microsoft.com/en-us/library/ff561903 // http://msdn.microsoft.com/en-us/library/ff561903
void xeRtlFreeUnicodeString(uint32_t string_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID
// _Inout_ PUNICODE_STRING UnicodeString
uint32_t buffer = IMPL_MEM_32(string_ptr + 4);
if (!buffer) {
return;
}
uint32_t length = IMPL_MEM_16(string_ptr + 2);
state->memory()->HeapFree(buffer, length);
IMPL_SET_MEM_16(string_ptr + 0, 0);
IMPL_SET_MEM_16(string_ptr + 2, 0);
IMPL_SET_MEM_32(string_ptr + 4, 0);
}
SHIM_CALL RtlFreeUnicodeString_shim( SHIM_CALL RtlFreeUnicodeString_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t string_ptr = SHIM_GET_ARG_32(0); uint32_t string_ptr = SHIM_GET_ARG_32(0);
XELOGD("RtlFreeUnicodeString(%.8X)", string_ptr); XELOGD("RtlFreeUnicodeString(%.8X)", string_ptr);
xeRtlFreeUnicodeString(string_ptr); // VOID
// _Inout_ PUNICODE_STRING UnicodeString
uint32_t buffer = SHIM_MEM_32(string_ptr + 4);
if (!buffer) {
return;
}
uint32_t length = SHIM_MEM_16(string_ptr + 2);
state->memory()->HeapFree(buffer, length);
SHIM_SET_MEM_16(string_ptr + 0, 0);
SHIM_SET_MEM_16(string_ptr + 2, 0);
SHIM_SET_MEM_32(string_ptr + 4, 0);
} }
// http://msdn.microsoft.com/en-us/library/ff562969 // http://msdn.microsoft.com/en-us/library/ff562969
X_STATUS xeRtlUnicodeStringToAnsiString(uint32_t destination_ptr,
uint32_t source_ptr,
uint32_t alloc_dest) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// NTSTATUS
// _Inout_ PANSI_STRING DestinationString,
// _In_ PCUNICODE_STRING SourceString,
// _In_ BOOLEAN AllocateDestinationString
std::wstring unicode_str = poly::load_and_swap<std::wstring>(
IMPL_MEM_ADDR(IMPL_MEM_32(source_ptr + 4)));
std::string ansi_str = poly::to_string(unicode_str);
if (ansi_str.size() > 0xFFFF - 1) {
return X_STATUS_INVALID_PARAMETER_2;
}
X_STATUS result = X_STATUS_SUCCESS;
if (alloc_dest) {
auto buffer_ptr = state->memory()->HeapAlloc(0, ansi_str.size() + 1, 0);
memcpy(IMPL_MEM_ADDR(buffer_ptr), ansi_str.data(), ansi_str.size() + 1);
IMPL_SET_MEM_16(destination_ptr + 0,
static_cast<uint16_t>(ansi_str.size()));
IMPL_SET_MEM_16(destination_ptr + 2,
static_cast<uint16_t>(ansi_str.size() + 1));
IMPL_SET_MEM_32(destination_ptr + 4, static_cast<uint32_t>(buffer_ptr));
} else {
uint32_t buffer_capacity = IMPL_MEM_16(destination_ptr + 2);
uint32_t buffer_ptr = IMPL_MEM_32(destination_ptr + 4);
if (buffer_capacity < ansi_str.size() + 1) {
// Too large - we just write what we can.
result = X_STATUS_BUFFER_OVERFLOW;
memcpy(IMPL_MEM_ADDR(buffer_ptr), ansi_str.data(), buffer_capacity - 1);
} else {
memcpy(IMPL_MEM_ADDR(buffer_ptr), ansi_str.data(), ansi_str.size() + 1);
}
IMPL_SET_MEM_8(buffer_ptr + buffer_capacity - 1, 0); // \0
}
return result;
}
SHIM_CALL RtlUnicodeStringToAnsiString_shim( SHIM_CALL RtlUnicodeStringToAnsiString_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t destination_ptr = SHIM_GET_ARG_32(0); uint32_t destination_ptr = SHIM_GET_ARG_32(0);
@ -392,8 +260,40 @@ SHIM_CALL RtlUnicodeStringToAnsiString_shim(
XELOGD("RtlUnicodeStringToAnsiString(%.8X, %.8X, %d)", XELOGD("RtlUnicodeStringToAnsiString(%.8X, %.8X, %d)",
destination_ptr, source_ptr, alloc_dest); destination_ptr, source_ptr, alloc_dest);
X_STATUS result = xeRtlUnicodeStringToAnsiString( // NTSTATUS
destination_ptr, source_ptr, alloc_dest); // _Inout_ PANSI_STRING DestinationString,
// _In_ PCUNICODE_STRING SourceString,
// _In_ BOOLEAN AllocateDestinationString
std::wstring unicode_str = poly::load_and_swap<std::wstring>(
SHIM_MEM_ADDR(SHIM_MEM_32(source_ptr + 4)));
std::string ansi_str = poly::to_string(unicode_str);
if (ansi_str.size() > 0xFFFF - 1) {
SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER_2);
return;
}
X_STATUS result = X_STATUS_SUCCESS;
if (alloc_dest) {
auto buffer_ptr = state->memory()->HeapAlloc(0, ansi_str.size() + 1, 0);
memcpy(SHIM_MEM_ADDR(buffer_ptr), ansi_str.data(), ansi_str.size() + 1);
SHIM_SET_MEM_16(destination_ptr + 0,
static_cast<uint16_t>(ansi_str.size()));
SHIM_SET_MEM_16(destination_ptr + 2,
static_cast<uint16_t>(ansi_str.size() + 1));
SHIM_SET_MEM_32(destination_ptr + 4, static_cast<uint32_t>(buffer_ptr));
} else {
uint32_t buffer_capacity = SHIM_MEM_16(destination_ptr + 2);
uint32_t buffer_ptr = SHIM_MEM_32(destination_ptr + 4);
if (buffer_capacity < ansi_str.size() + 1) {
// Too large - we just write what we can.
result = X_STATUS_BUFFER_OVERFLOW;
memcpy(SHIM_MEM_ADDR(buffer_ptr), ansi_str.data(), buffer_capacity - 1);
} else {
memcpy(SHIM_MEM_ADDR(buffer_ptr), ansi_str.data(), ansi_str.size() + 1);
}
SHIM_SET_MEM_8(buffer_ptr + buffer_capacity - 1, 0); // \0
}
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
@ -457,22 +357,6 @@ SHIM_CALL RtlUnicodeToMultiByteN_shim(
} }
uint32_t xeRtlNtStatusToDosError(X_STATUS status) {
if (!status || (status & 0x20000000)) {
// Success.
return status;
} else if ((status & 0xF0000000) == 0xD0000000) {
// High bit doesn't matter.
status &= ~0x10000000;
}
// TODO(benvanik): implement lookup table.
XELOGE("RtlNtStatusToDosError lookup NOT IMPLEMENTED");
return 317; // ERROR_MR_MID_NOT_FOUND
}
SHIM_CALL RtlNtStatusToDosError_shim( SHIM_CALL RtlNtStatusToDosError_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t status = SHIM_GET_ARG_32(0); uint32_t status = SHIM_GET_ARG_32(0);
@ -481,15 +365,39 @@ SHIM_CALL RtlNtStatusToDosError_shim(
"RtlNtStatusToDosError(%.4X)", "RtlNtStatusToDosError(%.4X)",
status); status);
uint32_t result = xeRtlNtStatusToDosError(status); if (!status || (status & 0x20000000)) {
// Success.
SHIM_SET_RETURN_32(0);
return;
} else if ((status & 0xF0000000) == 0xD0000000) {
// High bit doesn't matter.
status &= ~0x10000000;
}
// TODO(benvanik): implement lookup table.
XELOGE("RtlNtStatusToDosError lookup NOT SHIMEMENTED");
uint32_t result = 317; // ERROR_MR_MID_NOT_FOUND
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
uint32_t xeRtlImageXexHeaderField(uint32_t xex_header_base_ptr, SHIM_CALL RtlImageXexHeaderField_shim(
uint32_t image_field) { PPCContext* ppc_state, KernelState* state) {
KernelState* state = shared_kernel_state_; uint32_t xex_header_base = SHIM_GET_ARG_32(0);
assert_not_null(state); uint32_t image_field = SHIM_GET_ARG_32(1);
// NOTE: this is totally faked!
// We set the XexExecutableModuleHandle pointer to a block that has at offset
// 0x58 a pointer to our XexHeaderBase. If the value passed doesn't match
// then die.
// The only ImageField I've seen in the wild is
// 0x20401 (XEX_HEADER_DEFAULT_HEAP_SIZE), so that's all we'll support.
XELOGD(
"RtlImageXexHeaderField(%.8X, %.8X)",
xex_header_base, image_field);
// PVOID // PVOID
// PVOID XexHeaderBase // PVOID XexHeaderBase
@ -512,34 +420,15 @@ uint32_t xeRtlImageXexHeaderField(uint32_t xex_header_base_ptr,
const xe_xex2_header_t* xex_header = module->xex_header(); const xe_xex2_header_t* xex_header = module->xex_header();
for (size_t n = 0; n < xex_header->header_count; n++) { for (size_t n = 0; n < xex_header->header_count; n++) {
if (xex_header->headers[n].key == image_field) { if (xex_header->headers[n].key == image_field) {
uint32_t value = xex_header->headers[n].value;
module->Release(); module->Release();
return xex_header->headers[n].value; SHIM_SET_RETURN_64(value);
return;
} }
} }
module->Release(); module->Release();
return 0; SHIM_SET_RETURN_64(0);
}
SHIM_CALL RtlImageXexHeaderField_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t xex_header_base = SHIM_GET_ARG_32(0);
uint32_t image_field = SHIM_GET_ARG_32(1);
// NOTE: this is totally faked!
// We set the XexExecutableModuleHandle pointer to a block that has at offset
// 0x58 a pointer to our XexHeaderBase. If the value passed doesn't match
// then die.
// The only ImageField I've seen in the wild is
// 0x20401 (XEX_HEADER_DEFAULT_HEAP_SIZE), so that's all we'll support.
XELOGD(
"RtlImageXexHeaderField(%.8X, %.8X)",
xex_header_base, image_field);
uint32_t result = xeRtlImageXexHeaderField(xex_header_base, image_field);
SHIM_SET_RETURN_64(result);
} }
@ -560,9 +449,8 @@ SHIM_CALL RtlImageXexHeaderField_shim(
// This structure tries to match the one on the 360 as best I can figure out. // This structure tries to match the one on the 360 as best I can figure out.
// Unfortunately some games have the critical sections pre-initialized in // Unfortunately some games have the critical sections pre-initialized in
// their embedded data and InitializeCriticalSection will never be called. // their embedded data and InitializeCriticalSection will never be called.
namespace {
#pragma pack(push, 1) #pragma pack(push, 1)
typedef struct { struct X_RTL_CRITICAL_SECTION {
uint8_t unknown00; uint8_t unknown00;
uint8_t spin_count_div_256; // * 256 uint8_t spin_count_div_256; // * 256
uint8_t __padding[6]; uint8_t __padding[6];
@ -572,20 +460,14 @@ typedef struct {
int32_t lock_count; // -1 -> 0 on first lock 0x10 int32_t lock_count; // -1 -> 0 on first lock 0x10
uint32_t recursion_count; // 0 -> 1 on first lock 0x14 uint32_t recursion_count; // 0 -> 1 on first lock 0x14
uint32_t owning_thread_id; // 0 unless locked 0x18 uint32_t owning_thread_id; // 0 unless locked 0x18
} X_RTL_CRITICAL_SECTION; };
#pragma pack(pop) #pragma pack(pop)
}
static_assert_size(X_RTL_CRITICAL_SECTION, 28); static_assert_size(X_RTL_CRITICAL_SECTION, 28);
void xeRtlInitializeCriticalSection(uint32_t cs_ptr) { void xeRtlInitializeCriticalSection(X_RTL_CRITICAL_SECTION* cs) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID // VOID
// _Out_ LPCRITICAL_SECTION lpCriticalSection // _Out_ LPCRITICAL_SECTION lpCriticalSection
X_RTL_CRITICAL_SECTION* cs = (X_RTL_CRITICAL_SECTION*)IMPL_MEM_ADDR(cs_ptr);
cs->unknown00 = 1; cs->unknown00 = 1;
cs->spin_count_div_256 = 0; cs->spin_count_div_256 = 0;
cs->lock_count = -1; cs->lock_count = -1;
@ -600,15 +482,13 @@ SHIM_CALL RtlInitializeCriticalSection_shim(
XELOGD("RtlInitializeCriticalSection(%.8X)", cs_ptr); XELOGD("RtlInitializeCriticalSection(%.8X)", cs_ptr);
xeRtlInitializeCriticalSection(cs_ptr); auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
xeRtlInitializeCriticalSection(cs);
} }
X_STATUS xeRtlInitializeCriticalSectionAndSpinCount( X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(
uint32_t cs_ptr, uint32_t spin_count) { X_RTL_CRITICAL_SECTION* cs, uint32_t spin_count) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// NTSTATUS // NTSTATUS
// _Out_ LPCRITICAL_SECTION lpCriticalSection, // _Out_ LPCRITICAL_SECTION lpCriticalSection,
// _In_ DWORD dwSpinCount // _In_ DWORD dwSpinCount
@ -620,7 +500,6 @@ X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(
spin_count_div_256 = 255; spin_count_div_256 = 255;
} }
X_RTL_CRITICAL_SECTION* cs = (X_RTL_CRITICAL_SECTION*)IMPL_MEM_ADDR(cs_ptr);
cs->unknown00 = 1; cs->unknown00 = 1;
cs->spin_count_div_256 = spin_count_div_256; cs->spin_count_div_256 = spin_count_div_256;
cs->lock_count = -1; cs->lock_count = -1;
@ -639,22 +518,18 @@ SHIM_CALL RtlInitializeCriticalSectionAndSpinCount_shim(
XELOGD("RtlInitializeCriticalSectionAndSpinCount(%.8X, %d)", XELOGD("RtlInitializeCriticalSectionAndSpinCount(%.8X, %d)",
cs_ptr, spin_count); cs_ptr, spin_count);
auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
X_STATUS result = xeRtlInitializeCriticalSectionAndSpinCount( X_STATUS result = xeRtlInitializeCriticalSectionAndSpinCount(
cs_ptr, spin_count); cs, spin_count);
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
// TODO(benvanik): remove the need for passing in thread_id. // TODO(benvanik): remove the need for passing in thread_id.
void xeRtlEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id) { void xeRtlEnterCriticalSection(X_RTL_CRITICAL_SECTION* cs, uint32_t thread_id) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID // VOID
// _Inout_ LPCRITICAL_SECTION lpCriticalSection // _Inout_ LPCRITICAL_SECTION lpCriticalSection
X_RTL_CRITICAL_SECTION* cs = (X_RTL_CRITICAL_SECTION*)IMPL_MEM_ADDR(cs_ptr);
uint32_t spin_wait_remaining = cs->spin_count_div_256 * 256; uint32_t spin_wait_remaining = cs->spin_count_div_256 * 256;
spin: spin:
if (poly::atomic_inc(&cs->lock_count) != 0) { if (poly::atomic_inc(&cs->lock_count) != 0) {
@ -693,20 +568,16 @@ SHIM_CALL RtlEnterCriticalSection_shim(
const uint8_t* thread_state_block = ppc_state->membase + ppc_state->r[13]; const uint8_t* thread_state_block = ppc_state->membase + ppc_state->r[13];
uint32_t thread_id = XThread::GetCurrentThreadId(thread_state_block); uint32_t thread_id = XThread::GetCurrentThreadId(thread_state_block);
xeRtlEnterCriticalSection(cs_ptr, thread_id); auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
xeRtlEnterCriticalSection(cs, thread_id);
} }
// TODO(benvanik): remove the need for passing in thread_id. // TODO(benvanik): remove the need for passing in thread_id.
uint32_t xeRtlTryEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id) { uint32_t xeRtlTryEnterCriticalSection(X_RTL_CRITICAL_SECTION* cs, uint32_t thread_id) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// DWORD // DWORD
// _Inout_ LPCRITICAL_SECTION lpCriticalSection // _Inout_ LPCRITICAL_SECTION lpCriticalSection
X_RTL_CRITICAL_SECTION* cs = (X_RTL_CRITICAL_SECTION*)IMPL_MEM_ADDR(cs_ptr);
if (poly::atomic_cas(-1, 0, &cs->lock_count)) { if (poly::atomic_cas(-1, 0, &cs->lock_count)) {
// Able to steal the lock right away. // Able to steal the lock right away.
cs->owning_thread_id = thread_id; cs->owning_thread_id = thread_id;
@ -731,20 +602,16 @@ SHIM_CALL RtlTryEnterCriticalSection_shim(
const uint8_t* thread_state_block = ppc_state->membase + ppc_state->r[13]; const uint8_t* thread_state_block = ppc_state->membase + ppc_state->r[13];
uint32_t thread_id = XThread::GetCurrentThreadId(thread_state_block); uint32_t thread_id = XThread::GetCurrentThreadId(thread_state_block);
uint32_t result = xeRtlTryEnterCriticalSection(cs_ptr, thread_id); auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
uint32_t result = xeRtlTryEnterCriticalSection(cs, thread_id);
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
void xeRtlLeaveCriticalSection(uint32_t cs_ptr) { void xeRtlLeaveCriticalSection(X_RTL_CRITICAL_SECTION* cs) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// VOID // VOID
// _Inout_ LPCRITICAL_SECTION lpCriticalSection // _Inout_ LPCRITICAL_SECTION lpCriticalSection
X_RTL_CRITICAL_SECTION* cs = (X_RTL_CRITICAL_SECTION*)IMPL_MEM_ADDR(cs_ptr);
// Drop recursion count - if we are still not zero'ed return. // Drop recursion count - if we are still not zero'ed return.
uint32_t recursion_count = --cs->recursion_count; uint32_t recursion_count = --cs->recursion_count;
if (recursion_count) { if (recursion_count) {
@ -768,7 +635,8 @@ SHIM_CALL RtlLeaveCriticalSection_shim(
// XELOGD("RtlLeaveCriticalSection(%.8X)", cs_ptr); // XELOGD("RtlLeaveCriticalSection(%.8X)", cs_ptr);
xeRtlLeaveCriticalSection(cs_ptr); auto cs = (X_RTL_CRITICAL_SECTION*)SHIM_MEM_ADDR(cs_ptr);
xeRtlLeaveCriticalSection(cs);
} }

View File

@ -14,21 +14,20 @@
#include <xenia/core.h> #include <xenia/core.h>
#include <xenia/xbox.h> #include <xenia/xbox.h>
namespace xe { namespace xe {
namespace kernel { namespace kernel {
struct X_RTL_CRITICAL_SECTION;
void xeRtlInitializeCriticalSection(uint32_t cs_ptr); void xeRtlInitializeCriticalSection(X_RTL_CRITICAL_SECTION* cs);
X_STATUS xeRtlInitializeCriticalSectionAndSpinCount( X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(X_RTL_CRITICAL_SECTION* cs,
uint32_t cs_ptr, uint32_t spin_count); uint32_t spin_count);
void xeRtlEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id); void xeRtlEnterCriticalSection(X_RTL_CRITICAL_SECTION* cs, uint32_t thread_id);
uint32_t xeRtlTryEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id); uint32_t xeRtlTryEnterCriticalSection(X_RTL_CRITICAL_SECTION* cs,
void xeRtlLeaveCriticalSection(uint32_t cs_ptr); uint32_t thread_id);
void xeRtlLeaveCriticalSection(X_RTL_CRITICAL_SECTION* cs);
} // namespace kernel } // namespace kernel
} // namespace xe } // namespace xe
#endif // XENIA_KERNEL_XBOXKRNL_RTL_H_ #endif // XENIA_KERNEL_XBOXKRNL_RTL_H_

View File

@ -66,44 +66,6 @@ namespace kernel {
// } // }
X_STATUS xeExCreateThread(
uint32_t* handle_ptr, uint32_t stack_size, uint32_t* thread_id_ptr,
uint32_t xapi_thread_startup,
uint32_t start_address, uint32_t start_context, uint32_t creation_flags) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// DWORD
// LPHANDLE Handle,
// DWORD StackSize,
// LPDWORD ThreadId,
// LPVOID XapiThreadStartup, ?? often 0
// LPVOID StartAddress,
// LPVOID StartContext,
// DWORD CreationFlags // 0x80?
XThread* thread = new XThread(
state, stack_size, xapi_thread_startup, start_address, start_context,
creation_flags);
X_STATUS result_code = thread->Create();
if (XFAILED(result_code)) {
// Failed!
thread->Release();
XELOGE("Thread creation failed: %.8X", result_code);
return result_code;
}
if (handle_ptr) {
*handle_ptr = thread->handle();
}
if (thread_id_ptr) {
*thread_id_ptr = thread->thread_id();
}
return result_code;
}
SHIM_CALL ExCreateThread_shim( SHIM_CALL ExCreateThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t handle_ptr = SHIM_GET_ARG_32(0); uint32_t handle_ptr = SHIM_GET_ARG_32(0);
@ -124,18 +86,34 @@ SHIM_CALL ExCreateThread_shim(
start_context, start_context,
creation_flags); creation_flags);
uint32_t handle; // DWORD
uint32_t thread_id; // LPHANDLE Handle,
X_STATUS result = xeExCreateThread( // DWORD StackSize,
&handle, stack_size, &thread_id, xapi_thread_startup, // LPDWORD ThreadId,
start_address, start_context, creation_flags); // LPVOID XapiThreadStartup, ?? often 0
// LPVOID StartAddress,
// LPVOID StartContext,
// DWORD CreationFlags // 0x80?
XThread* thread = new XThread(
state, stack_size, xapi_thread_startup, start_address, start_context,
creation_flags);
X_STATUS result = thread->Create();
if (XFAILED(result)) {
// Failed!
thread->Release();
XELOGE("Thread creation failed: %.8X", result);
SHIM_SET_RETURN_32(result);
return;
}
if (XSUCCEEDED(result)) { if (XSUCCEEDED(result)) {
if (handle_ptr) { if (handle_ptr) {
SHIM_SET_MEM_32(handle_ptr, handle); SHIM_SET_MEM_32(handle_ptr, thread->handle());
} }
if (thread_id_ptr) { if (thread_id_ptr) {
SHIM_SET_MEM_32(thread_id_ptr, thread_id); SHIM_SET_MEM_32(thread_id_ptr, thread->thread_id());
} }
} }
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
@ -158,24 +136,6 @@ SHIM_CALL ExTerminateThread_shim(
} }
X_STATUS xeNtResumeThread(uint32_t handle, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
XThread* thread = NULL;
result = state->object_table()->GetObject(
handle, (XObject**)&thread);
if (XSUCCEEDED(result)) {
result = thread->Resume(out_suspend_count);
thread->Release();
}
return result;
}
SHIM_CALL NtResumeThread_shim( SHIM_CALL NtResumeThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t handle = SHIM_GET_ARG_32(0); uint32_t handle = SHIM_GET_ARG_32(0);
@ -186,8 +146,14 @@ SHIM_CALL NtResumeThread_shim(
handle, handle,
suspend_count_ptr); suspend_count_ptr);
XThread* thread = NULL;
X_STATUS result = state->object_table()->GetObject(
handle, (XObject**)&thread);
uint32_t suspend_count; uint32_t suspend_count;
X_STATUS result = xeNtResumeThread(handle, &suspend_count); if (XSUCCEEDED(result)) {
result = thread->Resume(&suspend_count);
thread->Release();
}
if (XSUCCEEDED(result)) { if (XSUCCEEDED(result)) {
if (suspend_count_ptr) { if (suspend_count_ptr) {
SHIM_SET_MEM_32(suspend_count_ptr, suspend_count); SHIM_SET_MEM_32(suspend_count_ptr, suspend_count);
@ -198,34 +164,22 @@ SHIM_CALL NtResumeThread_shim(
} }
X_STATUS xeKeResumeThread(void* thread_ptr, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
XThread* thread = (XThread*)XObject::GetObject(state, thread_ptr);
if (thread) {
result = thread->Resume(out_suspend_count);
}
return result;
}
SHIM_CALL KeResumeThread_shim( SHIM_CALL KeResumeThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t thread = SHIM_GET_ARG_32(0); uint32_t thread_ptr = SHIM_GET_ARG_32(0);
uint32_t suspend_count_ptr = SHIM_GET_ARG_32(1); uint32_t suspend_count_ptr = SHIM_GET_ARG_32(1);
XELOGD( XELOGD(
"KeResumeThread(%.8X, %.8X)", "KeResumeThread(%.8X, %.8X)",
thread, thread_ptr,
suspend_count_ptr); suspend_count_ptr);
void* thread_ptr = SHIM_MEM_ADDR(thread); X_STATUS result;
XThread* thread = (XThread*)XObject::GetObject(state, SHIM_MEM_ADDR(thread_ptr));
uint32_t suspend_count; uint32_t suspend_count;
X_STATUS result = xeKeResumeThread(thread_ptr, &suspend_count); if (thread) {
result = thread->Resume(&suspend_count);
}
if (XSUCCEEDED(result)) { if (XSUCCEEDED(result)) {
if (suspend_count_ptr) { if (suspend_count_ptr) {
SHIM_SET_MEM_32(suspend_count_ptr, suspend_count); SHIM_SET_MEM_32(suspend_count_ptr, suspend_count);
@ -236,24 +190,6 @@ SHIM_CALL KeResumeThread_shim(
} }
X_STATUS xeNtSuspendThread(uint32_t handle, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
XThread* thread = NULL;
result = state->object_table()->GetObject(
handle, (XObject**)&thread);
if (XSUCCEEDED(result)) {
result = thread->Suspend(out_suspend_count);
thread->Release();
}
return result;
}
SHIM_CALL NtSuspendThread_shim( SHIM_CALL NtSuspendThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t handle = SHIM_GET_ARG_32(0); uint32_t handle = SHIM_GET_ARG_32(0);
@ -264,8 +200,15 @@ SHIM_CALL NtSuspendThread_shim(
handle, handle,
suspend_count_ptr); suspend_count_ptr);
XThread* thread = NULL;
X_STATUS result = state->object_table()->GetObject(
handle, (XObject**)&thread);
uint32_t suspend_count; uint32_t suspend_count;
X_STATUS result = xeNtSuspendThread(handle, &suspend_count); if (XSUCCEEDED(result)) {
result = thread->Suspend(&suspend_count);
thread->Release();
}
if (XSUCCEEDED(result)) { if (XSUCCEEDED(result)) {
if (suspend_count_ptr) { if (suspend_count_ptr) {
SHIM_SET_MEM_32(suspend_count_ptr, suspend_count); SHIM_SET_MEM_32(suspend_count_ptr, suspend_count);
@ -276,32 +219,22 @@ SHIM_CALL NtSuspendThread_shim(
} }
uint32_t xeKeSetAffinityThread(void* thread_ptr, uint32_t affinity) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XThread* thread = (XThread*)XObject::GetObject(state, thread_ptr);
if (thread) {
thread->SetAffinity(affinity);
}
return affinity;
}
SHIM_CALL KeSetAffinityThread_shim( SHIM_CALL KeSetAffinityThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t thread = SHIM_GET_ARG_32(0); uint32_t thread_ptr = SHIM_GET_ARG_32(0);
uint32_t affinity = SHIM_GET_ARG_32(1); uint32_t affinity = SHIM_GET_ARG_32(1);
XELOGD( XELOGD(
"KeSetAffinityThread(%.8X, %.8X)", "KeSetAffinityThread(%.8X, %.8X)",
thread, thread_ptr,
affinity); affinity);
void* thread_ptr = SHIM_MEM_ADDR(thread); XThread* thread = (XThread*)XObject::GetObject(state, SHIM_MEM_ADDR(thread_ptr));
uint32_t result = xeKeSetAffinityThread(thread_ptr, affinity); if (thread) {
SHIM_SET_RETURN_32(result); thread->SetAffinity(affinity);
}
SHIM_SET_RETURN_32(affinity);
} }
@ -325,45 +258,26 @@ SHIM_CALL KeQueryBasePriorityThread_shim(
} }
uint32_t xeKeSetBasePriorityThread(void* thread_ptr, int32_t increment) { SHIM_CALL KeSetBasePriorityThread_shim(
KernelState* state = shared_kernel_state_; PPCContext* ppc_state, KernelState* state) {
assert_not_null(state); uint32_t thread_ptr = SHIM_GET_ARG_32(0);
uint32_t increment = SHIM_GET_ARG_32(1);
XELOGD(
"KeSetBasePriorityThread(%.8X, %.8X)",
thread_ptr,
increment);
int32_t prev_priority = 0; int32_t prev_priority = 0;
XThread* thread = (XThread*)XObject::GetObject(state, thread_ptr); XThread* thread =
(XThread*)XObject::GetObject(state, SHIM_MEM_ADDR(thread_ptr));
if (thread) { if (thread) {
prev_priority = thread->QueryPriority(); prev_priority = thread->QueryPriority();
thread->SetPriority(increment); thread->SetPriority(increment);
} }
return prev_priority; SHIM_SET_RETURN_32(prev_priority);
}
SHIM_CALL KeSetBasePriorityThread_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t thread = SHIM_GET_ARG_32(0);
uint32_t increment = SHIM_GET_ARG_32(1);
XELOGD(
"KeSetBasePriorityThread(%.8X, %.8X)",
thread,
increment);
void* thread_ptr = SHIM_MEM_ADDR(thread);
uint32_t result = xeKeSetBasePriorityThread(thread_ptr, increment);
SHIM_SET_RETURN_32(result);
}
uint32_t xeKeGetCurrentProcessType() {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
// DWORD
return X_PROCTYPE_USER;
} }
@ -372,38 +286,27 @@ SHIM_CALL KeGetCurrentProcessType_shim(
// XELOGD( // XELOGD(
// "KeGetCurrentProcessType()"); // "KeGetCurrentProcessType()");
int result = xeKeGetCurrentProcessType(); // DWORD
int result = X_PROCTYPE_USER;
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
uint64_t xeKeQueryPerformanceFrequency() {
LARGE_INTEGER frequency;
if (QueryPerformanceFrequency(&frequency)) {
return frequency.QuadPart;
} else {
return 0;
}
}
SHIM_CALL KeQueryPerformanceFrequency_shim( SHIM_CALL KeQueryPerformanceFrequency_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
// XELOGD( // XELOGD(
// "KeQueryPerformanceFrequency()"); // "KeQueryPerformanceFrequency()");
uint64_t result = xeKeQueryPerformanceFrequency(); uint64_t result = 0;
LARGE_INTEGER frequency;
if (QueryPerformanceFrequency(&frequency)) {
result = frequency.QuadPart;
}
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
X_STATUS xeKeDelayExecutionThread(
uint32_t processor_mode, uint32_t alertable, uint64_t interval) {
XThread* thread = XThread::GetCurrentThread();
return thread->Delay(processor_mode, alertable, interval);
}
SHIM_CALL KeDelayExecutionThread_shim( SHIM_CALL KeDelayExecutionThread_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t processor_mode = SHIM_GET_ARG_32(0); uint32_t processor_mode = SHIM_GET_ARG_32(0);
@ -415,8 +318,8 @@ SHIM_CALL KeDelayExecutionThread_shim(
// "KeDelayExecutionThread(%.8X, %d, %.8X(%.16llX)", // "KeDelayExecutionThread(%.8X, %d, %.8X(%.16llX)",
// processor_mode, alertable, interval_ptr, interval); // processor_mode, alertable, interval_ptr, interval);
X_STATUS result = xeKeDelayExecutionThread( XThread* thread = XThread::GetCurrentThread();
processor_mode, alertable, interval); X_STATUS result = thread->Delay(processor_mode, alertable, interval);
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
@ -425,18 +328,12 @@ SHIM_CALL KeDelayExecutionThread_shim(
SHIM_CALL NtYieldExecution_shim( SHIM_CALL NtYieldExecution_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
XELOGD("NtYieldExecution()"); XELOGD("NtYieldExecution()");
xeKeDelayExecutionThread(0, 0, 0); XThread* thread = XThread::GetCurrentThread();
X_STATUS result = thread->Delay(0, 0, 0);
SHIM_SET_RETURN_64(0); SHIM_SET_RETURN_64(0);
} }
void xeKeQuerySystemTime(uint64_t* time_ptr) {
FILETIME t;
GetSystemTimeAsFileTime(&t);
*time_ptr = ((uint64_t)t.dwHighDateTime << 32) | t.dwLowDateTime;
}
SHIM_CALL KeQuerySystemTime_shim( SHIM_CALL KeQuerySystemTime_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t time_ptr = SHIM_GET_ARG_32(0); uint32_t time_ptr = SHIM_GET_ARG_32(0);
@ -445,8 +342,9 @@ SHIM_CALL KeQuerySystemTime_shim(
"KeQuerySystemTime(%.8X)", "KeQuerySystemTime(%.8X)",
time_ptr); time_ptr);
uint64_t time; FILETIME t;
xeKeQuerySystemTime(&time); GetSystemTimeAsFileTime(&t);
uint64_t time = ((uint64_t)t.dwHighDateTime << 32) | t.dwLowDateTime;
if (time_ptr) { if (time_ptr) {
SHIM_SET_MEM_64(time_ptr, time); SHIM_SET_MEM_64(time_ptr, time);
@ -461,8 +359,10 @@ SHIM_CALL KeQuerySystemTime_shim(
// http://msdn.microsoft.com/en-us/library/ms686801 // http://msdn.microsoft.com/en-us/library/ms686801
uint32_t xeKeTlsAlloc() { SHIM_CALL KeTlsAlloc_shim(
// DWORD PPCContext* ppc_state, KernelState* state) {
XELOGD(
"KeTlsAlloc()");
uint32_t tls_index; uint32_t tls_index;
@ -477,41 +377,11 @@ uint32_t xeKeTlsAlloc() {
} }
#endif // WIN32 #endif // WIN32
return tls_index; SHIM_SET_RETURN_64(tls_index);
}
SHIM_CALL KeTlsAlloc_shim(
PPCContext* ppc_state, KernelState* state) {
XELOGD(
"KeTlsAlloc()");
uint32_t result = xeKeTlsAlloc();
SHIM_SET_RETURN_64(result);
} }
// http://msdn.microsoft.com/en-us/library/ms686804 // http://msdn.microsoft.com/en-us/library/ms686804
int KeTlsFree(uint32_t tls_index) {
// BOOL
// _In_ DWORD dwTlsIndex
if (tls_index == X_TLS_OUT_OF_INDEXES) {
return 0;
}
int result_code = 0;
#if XE_PLATFORM_WIN32
result_code = TlsFree(tls_index);
#else
result_code = pthread_key_delete(tls_index) == 0;
#endif // WIN32
return result_code;
}
SHIM_CALL KeTlsFree_shim( SHIM_CALL KeTlsFree_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t tls_index = SHIM_GET_ARG_32(0); uint32_t tls_index = SHIM_GET_ARG_32(0);
@ -520,15 +390,32 @@ SHIM_CALL KeTlsFree_shim(
"KeTlsFree(%.8X)", "KeTlsFree(%.8X)",
tls_index); tls_index);
int result = xeKeTlsAlloc(); if (tls_index == X_TLS_OUT_OF_INDEXES) {
SHIM_SET_RETURN_64(0);
return;
}
int result = 0;
#if XE_PLATFORM_WIN32
result = TlsFree(tls_index);
#else
result = pthread_key_delete(tls_index) == 0;
#endif // WIN32
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
// http://msdn.microsoft.com/en-us/library/ms686812 // http://msdn.microsoft.com/en-us/library/ms686812
uint64_t xeKeTlsGetValue(uint32_t tls_index) { SHIM_CALL KeTlsGetValue_shim(
// LPVOID PPCContext* ppc_state, KernelState* state) {
// _In_ DWORD dwTlsIndex uint32_t tls_index = SHIM_GET_ARG_32(0);
// Logging disabled, as some games spam this.
//XELOGD(
// "KeTlsGetValue(%.8X)",
// tls_index);
uint64_t value = 0; uint64_t value = 0;
@ -539,46 +426,15 @@ uint64_t xeKeTlsGetValue(uint32_t tls_index) {
#endif // WIN32 #endif // WIN32
if (!value) { if (!value) {
XELOGW("KeTlsGetValue should SetLastError if result is NULL"); // XELOGW("KeTlsGetValue should SetLastError if result is NULL");
// TODO(benvanik): SetLastError // TODO(benvanik): SetLastError
} }
return value; SHIM_SET_RETURN_64(value);
}
SHIM_CALL KeTlsGetValue_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t tls_index = SHIM_GET_ARG_32(0);
// Logging disabled, as some games spam this.
//XELOGD(
// "KeTlsGetValue(%.8X)",
// tls_index);
uint64_t result = xeKeTlsGetValue(tls_index);
SHIM_SET_RETURN_64(result);
} }
// http://msdn.microsoft.com/en-us/library/ms686818 // http://msdn.microsoft.com/en-us/library/ms686818
int xeKeTlsSetValue(uint32_t tls_index, uint64_t tls_value) {
// BOOL
// _In_ DWORD dwTlsIndex,
// _In_opt_ LPVOID lpTlsValue
int result_code = 0;
#if XE_PLATFORM_WIN32
result_code = TlsSetValue(tls_index, (LPVOID)tls_value);
#else
result_code = pthread_setspecific(tls_index, (void*)tls_value) == 0;
#endif // WIN32
return result_code;
}
SHIM_CALL KeTlsSetValue_shim( SHIM_CALL KeTlsSetValue_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t tls_index = SHIM_GET_ARG_32(0); uint32_t tls_index = SHIM_GET_ARG_32(0);
@ -588,30 +444,18 @@ SHIM_CALL KeTlsSetValue_shim(
"KeTlsSetValue(%.8X, %.8X)", "KeTlsSetValue(%.8X, %.8X)",
tls_index, tls_value); tls_index, tls_value);
int result = xeKeTlsSetValue(tls_index, tls_value); int result = 0;
#if XE_PLATFORM_WIN32
result = TlsSetValue(tls_index, (LPVOID)tls_value);
#else
result = pthread_setspecific(tls_index, (void*)tls_value) == 0;
#endif // WIN32
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
X_STATUS xeNtCreateEvent(uint32_t* handle_ptr, void* obj_attributes,
uint32_t event_type, uint32_t initial_state) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XEvent* ev = new XEvent(state);
ev->Initialize(!event_type, !!initial_state);
// obj_attributes may have a name inside of it, if != NULL.
if (obj_attributes) {
//ev->SetName(...);
}
*handle_ptr = ev->handle();
return X_STATUS_SUCCESS;
}
SHIM_CALL NtCreateEvent_shim( SHIM_CALL NtCreateEvent_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t handle_ptr = SHIM_GET_ARG_32(0); uint32_t handle_ptr = SHIM_GET_ARG_32(0);
@ -623,31 +467,19 @@ SHIM_CALL NtCreateEvent_shim(
"NtCreateEvent(%.8X, %.8X, %d, %d)", "NtCreateEvent(%.8X, %.8X, %d, %d)",
handle_ptr, obj_attributes_ptr, event_type, initial_state); handle_ptr, obj_attributes_ptr, event_type, initial_state);
uint32_t handle; XEvent* ev = new XEvent(state);
X_STATUS result = xeNtCreateEvent( ev->Initialize(!event_type, !!initial_state);
&handle, SHIM_MEM_ADDR(obj_attributes_ptr),
event_type, initial_state);
if (XSUCCEEDED(result)) { // obj_attributes may have a name inside of it, if != NULL.
if (handle_ptr) { auto obj_attributes = SHIM_MEM_ADDR(obj_attributes_ptr);
SHIM_SET_MEM_32(handle_ptr, handle); if (obj_attributes) {
} //ev->SetName(...);
}
SHIM_SET_RETURN_32(result);
}
int32_t xeKeSetEvent(void* event_ptr, uint32_t increment, uint32_t wait) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XEvent* ev = (XEvent*)XObject::GetObject(state, event_ptr);
assert_not_null(ev);
if (!ev) {
return 0;
} }
return ev->Set(increment, !!wait); if (handle_ptr) {
SHIM_SET_MEM_32(handle_ptr, ev->handle());
}
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
} }
@ -662,8 +494,15 @@ SHIM_CALL KeSetEvent_shim(
event_ref, increment, wait); event_ref, increment, wait);
void* event_ptr = SHIM_MEM_ADDR(event_ref); void* event_ptr = SHIM_MEM_ADDR(event_ref);
int32_t result = xeKeSetEvent(event_ptr, increment, wait);
XEvent* ev = (XEvent*)XObject::GetObject(state, event_ptr);
assert_not_null(ev);
if (!ev) {
SHIM_SET_RETURN_64(0);
return;
}
auto result = ev->Set(increment, !!wait);
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
@ -745,20 +584,6 @@ SHIM_CALL NtPulseEvent_shim(
} }
int32_t xeKeResetEvent(void* event_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XEvent* ev = (XEvent*)XEvent::GetObject(state, event_ptr);
assert_not_null(ev);
if (!ev) {
return 0;
}
return ev->Reset();
}
SHIM_CALL KeResetEvent_shim( SHIM_CALL KeResetEvent_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t event_ref = SHIM_GET_ARG_32(0); uint32_t event_ref = SHIM_GET_ARG_32(0);
@ -768,8 +593,14 @@ SHIM_CALL KeResetEvent_shim(
event_ref); event_ref);
void* event_ptr = SHIM_MEM_ADDR(event_ref); void* event_ptr = SHIM_MEM_ADDR(event_ref);
int32_t result = xeKeResetEvent(event_ptr); XEvent* ev = (XEvent*)XEvent::GetObject(state, event_ptr);
assert_not_null(ev);
if (!ev) {
SHIM_SET_RETURN_64(0);
return;
}
auto result = ev->Reset();
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
@ -823,22 +654,6 @@ SHIM_CALL NtCreateSemaphore_shim(
} }
void xeKeInitializeSemaphore(
void* semaphore_ptr, int32_t count, int32_t limit) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(
state, semaphore_ptr, 5 /* SemaphoreObject */);
assert_not_null(sem);
if (!sem) {
return;
}
sem->Initialize(count, limit);
}
SHIM_CALL KeInitializeSemaphore_shim( SHIM_CALL KeInitializeSemaphore_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t semaphore_ref = SHIM_GET_ARG_32(0); uint32_t semaphore_ref = SHIM_GET_ARG_32(0);
@ -850,25 +665,14 @@ SHIM_CALL KeInitializeSemaphore_shim(
semaphore_ref, count, limit); semaphore_ref, count, limit);
void* semaphore_ptr = SHIM_MEM_ADDR(semaphore_ref); void* semaphore_ptr = SHIM_MEM_ADDR(semaphore_ref);
xeKeInitializeSemaphore(semaphore_ptr, count, limit); XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(
} state, semaphore_ptr, 5 /* SemaphoreObject */);
int32_t xeKeReleaseSemaphore(
void* semaphore_ptr, int32_t increment, int32_t adjustment, bool wait) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(state, semaphore_ptr);
assert_not_null(sem); assert_not_null(sem);
if (!sem) { if (!sem) {
return 0; return;
} }
// TODO(benvanik): increment thread priority? sem->Initialize(count, limit);
// TODO(benvanik): wait?
return sem->ReleaseSemaphore(adjustment);
} }
@ -884,9 +688,17 @@ SHIM_CALL KeReleaseSemaphore_shim(
semaphore_ref, increment, adjustment, wait); semaphore_ref, increment, adjustment, wait);
void* semaphore_ptr = SHIM_MEM_ADDR(semaphore_ref); void* semaphore_ptr = SHIM_MEM_ADDR(semaphore_ref);
int32_t result = xeKeReleaseSemaphore( XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(state, semaphore_ptr);
semaphore_ptr, increment, adjustment, wait == 1); assert_not_null(sem);
if (!sem) {
SHIM_SET_RETURN_64(0);
return;
}
// TODO(benvanik): increment thread priority?
// TODO(benvanik): wait?
int32_t result = sem->ReleaseSemaphore(adjustment);
SHIM_SET_RETURN_64(result); SHIM_SET_RETURN_64(result);
} }
@ -1072,25 +884,9 @@ SHIM_CALL NtCancelTimer_shim(
} }
X_STATUS xeKeWaitForSingleObject(
void* object_ptr, uint32_t wait_reason, uint32_t processor_mode,
uint32_t alertable, uint64_t* opt_timeout) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
XObject* object = XObject::GetObject(state, object_ptr);
if (!object) {
// The only kind-of failure code.
return X_STATUS_ABANDONED_WAIT_0;
}
return object->Wait(wait_reason, processor_mode, alertable, opt_timeout);
}
SHIM_CALL KeWaitForSingleObject_shim( SHIM_CALL KeWaitForSingleObject_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t object = SHIM_GET_ARG_32(0); uint32_t object_ptr = SHIM_GET_ARG_32(0);
uint32_t wait_reason = SHIM_GET_ARG_32(1); uint32_t wait_reason = SHIM_GET_ARG_32(1);
uint32_t processor_mode = SHIM_GET_ARG_32(2); uint32_t processor_mode = SHIM_GET_ARG_32(2);
uint32_t alertable = SHIM_GET_ARG_32(3); uint32_t alertable = SHIM_GET_ARG_32(3);
@ -1098,13 +894,18 @@ SHIM_CALL KeWaitForSingleObject_shim(
XELOGD( XELOGD(
"KeWaitForSingleObject(%.8X, %.8X, %.8X, %.1X, %.8X)", "KeWaitForSingleObject(%.8X, %.8X, %.8X, %.1X, %.8X)",
object, wait_reason, processor_mode, alertable, timeout_ptr); object_ptr, wait_reason, processor_mode, alertable, timeout_ptr);
XObject* object = XObject::GetObject(state, SHIM_MEM_ADDR(object_ptr));
if (!object) {
// The only kind-of failure code.
SHIM_SET_RETURN_32(X_STATUS_ABANDONED_WAIT_0);
return;
}
void* object_ptr = SHIM_MEM_ADDR(object);
uint64_t timeout = timeout_ptr ? SHIM_MEM_64(timeout_ptr) : 0; uint64_t timeout = timeout_ptr ? SHIM_MEM_64(timeout_ptr) : 0;
X_STATUS result = xeKeWaitForSingleObject( X_STATUS result = object->Wait(wait_reason, processor_mode, alertable,
object_ptr, wait_reason, processor_mode, alertable, timeout_ptr ? &timeout : nullptr);
timeout_ptr ? &timeout : NULL);
SHIM_SET_RETURN_32(result); SHIM_SET_RETURN_32(result);
} }
@ -1259,19 +1060,6 @@ SHIM_CALL NtSignalAndWaitForSingleObjectEx_shim(
} }
uint32_t xeKfAcquireSpinLock(uint32_t* lock_ptr) {
// Lock.
while (!poly::atomic_cas(0, 1, lock_ptr)) {
// Spin!
// TODO(benvanik): error on deadlock?
}
// Raise IRQL to DISPATCH.
XThread* thread = XThread::GetCurrentThread();
return thread->RaiseIrql(2);
}
SHIM_CALL KfAcquireSpinLock_shim( SHIM_CALL KfAcquireSpinLock_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t lock_ptr = SHIM_GET_ARG_32(0); uint32_t lock_ptr = SHIM_GET_ARG_32(0);
@ -1280,23 +1068,21 @@ SHIM_CALL KfAcquireSpinLock_shim(
// "KfAcquireSpinLock(%.8X)", // "KfAcquireSpinLock(%.8X)",
// lock_ptr); // lock_ptr);
// Lock.
auto lock = reinterpret_cast<uint32_t*>(SHIM_MEM_ADDR(lock_ptr)); auto lock = reinterpret_cast<uint32_t*>(SHIM_MEM_ADDR(lock_ptr));
uint32_t old_irql = xeKfAcquireSpinLock(lock); while (!poly::atomic_cas(0, 1, lock)) {
// Spin!
// TODO(benvanik): error on deadlock?
}
// Raise IRQL to DISPATCH.
XThread* thread = XThread::GetCurrentThread();
auto old_irql = thread->RaiseIrql(2);
SHIM_SET_RETURN_64(old_irql); SHIM_SET_RETURN_64(old_irql);
} }
void xeKfReleaseSpinLock(uint32_t* lock_ptr, uint32_t old_irql) {
// Restore IRQL.
XThread* thread = XThread::GetCurrentThread();
thread->LowerIrql(old_irql);
// Unlock.
poly::atomic_dec(lock_ptr);
}
SHIM_CALL KfReleaseSpinLock_shim( SHIM_CALL KfReleaseSpinLock_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t lock_ptr = SHIM_GET_ARG_32(0); uint32_t lock_ptr = SHIM_GET_ARG_32(0);
@ -1307,8 +1093,13 @@ SHIM_CALL KfReleaseSpinLock_shim(
// lock_ptr, // lock_ptr,
// old_irql); // old_irql);
xeKfReleaseSpinLock(reinterpret_cast<uint32_t*>(SHIM_MEM_ADDR(lock_ptr)), // Restore IRQL.
old_irql); XThread* thread = XThread::GetCurrentThread();
thread->LowerIrql(old_irql);
// Unlock.
auto lock = reinterpret_cast<uint32_t*>(SHIM_MEM_ADDR(lock_ptr));
poly::atomic_dec(lock);
} }
@ -1343,21 +1134,11 @@ SHIM_CALL KeReleaseSpinLockFromRaisedIrql_shim(
} }
void xeKeEnterCriticalRegion() {
XThread::EnterCriticalRegion();
}
SHIM_CALL KeEnterCriticalRegion_shim( SHIM_CALL KeEnterCriticalRegion_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
// XELOGD( // XELOGD(
// "KeEnterCriticalRegion()"); // "KeEnterCriticalRegion()");
xeKeEnterCriticalRegion(); XThread::EnterCriticalRegion();
}
void xeKeLeaveCriticalRegion() {
XThread::LeaveCriticalRegion();
} }
@ -1365,7 +1146,7 @@ SHIM_CALL KeLeaveCriticalRegion_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
// XELOGD( // XELOGD(
// "KeLeaveCriticalRegion()"); // "KeLeaveCriticalRegion()");
xeKeLeaveCriticalRegion(); XThread::LeaveCriticalRegion();
} }

View File

@ -43,12 +43,6 @@ namespace kernel {
// http://www.microsoft.com/en-za/download/details.aspx?id=5313 -- "Stripped Down Direct3D: Xbox 360 Command Buffer and Resource Management" // http://www.microsoft.com/en-za/download/details.aspx?id=5313 -- "Stripped Down Direct3D: Xbox 360 Command Buffer and Resource Management"
void xeVdGetCurrentDisplayGamma(uint32_t* arg0, float* arg1) {
*arg0 = 2;
*arg1 = 2.22222233f;
}
SHIM_CALL VdGetCurrentDisplayGamma_shim( SHIM_CALL VdGetCurrentDisplayGamma_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t arg0_ptr = SHIM_GET_ARG_32(0); uint32_t arg0_ptr = SHIM_GET_ARG_32(0);
@ -58,14 +52,8 @@ SHIM_CALL VdGetCurrentDisplayGamma_shim(
"VdGetCurrentDisplayGamma(%.8X, %.8X)", "VdGetCurrentDisplayGamma(%.8X, %.8X)",
arg0_ptr, arg1_ptr); arg0_ptr, arg1_ptr);
uint32_t arg0 = 0; SHIM_SET_MEM_32(arg0_ptr, 2);
union { SHIM_SET_MEM_F32(arg1_ptr, 2.22222233f);
float float_value;
uint32_t uint_value;
} arg1;
xeVdGetCurrentDisplayGamma(&arg0, &arg1.float_value);
SHIM_SET_MEM_32(arg0_ptr, arg0);
SHIM_SET_MEM_32(arg1_ptr, arg1.uint_value);
} }
@ -103,22 +91,16 @@ SHIM_CALL VdGetCurrentDisplayInformation_shim(
} }
uint32_t xeVdQueryVideoFlags() {
// ?
return 0x00000006;
}
SHIM_CALL VdQueryVideoFlags_shim( SHIM_CALL VdQueryVideoFlags_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
XELOGD( XELOGD(
"VdQueryVideoFlags()"); "VdQueryVideoFlags()");
SHIM_SET_RETURN_64(xeVdQueryVideoFlags()); SHIM_SET_RETURN_64(0x00000006);
} }
void xeVdQueryVideoMode(X_VIDEO_MODE *video_mode, bool swap) { void xeVdQueryVideoMode(X_VIDEO_MODE* video_mode) {
if (video_mode == NULL) { if (video_mode == NULL) {
return; return;
} }
@ -134,17 +116,16 @@ void xeVdQueryVideoMode(X_VIDEO_MODE *video_mode, bool swap) {
video_mode->unknown_0x8a = 0x8A; video_mode->unknown_0x8a = 0x8A;
video_mode->unknown_0x01 = 0x01; video_mode->unknown_0x01 = 0x01;
if (swap) { // TODO(benvanik): auto swap structure.
video_mode->display_width = poly::byte_swap(video_mode->display_width); video_mode->display_width = poly::byte_swap(video_mode->display_width);
video_mode->display_height = poly::byte_swap(video_mode->display_height); video_mode->display_height = poly::byte_swap(video_mode->display_height);
video_mode->is_interlaced = poly::byte_swap(video_mode->is_interlaced); video_mode->is_interlaced = poly::byte_swap(video_mode->is_interlaced);
video_mode->is_widescreen = poly::byte_swap(video_mode->is_widescreen); video_mode->is_widescreen = poly::byte_swap(video_mode->is_widescreen);
video_mode->is_hi_def = poly::byte_swap(video_mode->is_hi_def); video_mode->is_hi_def = poly::byte_swap(video_mode->is_hi_def);
video_mode->refresh_rate = poly::byte_swap(video_mode->refresh_rate); video_mode->refresh_rate = poly::byte_swap(video_mode->refresh_rate);
video_mode->video_standard = poly::byte_swap(video_mode->video_standard); video_mode->video_standard = poly::byte_swap(video_mode->video_standard);
video_mode->unknown_0x8a = poly::byte_swap(video_mode->unknown_0x8a); video_mode->unknown_0x8a = poly::byte_swap(video_mode->unknown_0x8a);
video_mode->unknown_0x01 = poly::byte_swap(video_mode->unknown_0x01); video_mode->unknown_0x01 = poly::byte_swap(video_mode->unknown_0x01);
}
} }
@ -157,23 +138,7 @@ SHIM_CALL VdQueryVideoMode_shim(
"VdQueryVideoMode(%.8X)", "VdQueryVideoMode(%.8X)",
video_mode_ptr); video_mode_ptr);
xeVdQueryVideoMode(video_mode, true); xeVdQueryVideoMode(video_mode);
}
void xeVdInitializeEngines(uint32_t unk0, uint32_t callback, uint32_t unk1,
uint32_t unk2_ptr, uint32_t unk3_ptr) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
}
// r3 = 0x4F810000
// r4 = function ptr (cleanup callback?)
// r5 = 0
// r6/r7 = some binary data in .data
} }
@ -188,8 +153,11 @@ SHIM_CALL VdInitializeEngines_shim(
XELOGD( XELOGD(
"VdInitializeEngines(%.8X, %.8X, %.8X, %.8X, %.8X)", "VdInitializeEngines(%.8X, %.8X, %.8X, %.8X, %.8X)",
unk0, callback, unk1, unk2_ptr, unk3_ptr); unk0, callback, unk1, unk2_ptr, unk3_ptr);
xeVdInitializeEngines(unk0, callback, unk1, unk2_ptr, unk3_ptr); // r3 = 0x4F810000
// r4 = function ptr (cleanup callback?)
// r5 = 0
// r6/r7 = some binary data in .data
} }
@ -204,9 +172,15 @@ SHIM_CALL VdShutdownEngines_shim(
} }
void xeVdSetGraphicsInterruptCallback(uint32_t callback, uint32_t user_data) { SHIM_CALL VdSetGraphicsInterruptCallback_shim(
KernelState* state = shared_kernel_state_; PPCContext* ppc_state, KernelState* state) {
assert_not_null(state); uint32_t callback = SHIM_GET_ARG_32(0);
uint32_t user_data = SHIM_GET_ARG_32(1);
XELOGD(
"VdSetGraphicsInterruptCallback(%.8X, %.8X)",
callback, user_data);
GraphicsSystem* gs = state->emulator()->graphics_system(); GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) { if (!gs) {
return; return;
@ -220,22 +194,15 @@ void xeVdSetGraphicsInterruptCallback(uint32_t callback, uint32_t user_data) {
} }
SHIM_CALL VdSetGraphicsInterruptCallback_shim( SHIM_CALL VdInitializeRingBuffer_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t callback = SHIM_GET_ARG_32(0); uint32_t ptr = SHIM_GET_ARG_32(0);
uint32_t user_data = SHIM_GET_ARG_32(1); uint32_t page_count = SHIM_GET_ARG_32(1);
XELOGD( XELOGD(
"VdSetGraphicsInterruptCallback(%.8X, %.8X)", "VdInitializeRingBuffer(%.8X, %.8X)",
callback, user_data); ptr, page_count);
xeVdSetGraphicsInterruptCallback(callback, user_data);
}
void xeVdInitializeRingBuffer(uint32_t ptr, uint32_t page_count) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system(); GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) { if (!gs) {
return; return;
@ -252,22 +219,15 @@ void xeVdInitializeRingBuffer(uint32_t ptr, uint32_t page_count) {
} }
SHIM_CALL VdInitializeRingBuffer_shim( SHIM_CALL VdEnableRingBufferRPtrWriteBack_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t ptr = SHIM_GET_ARG_32(0); uint32_t ptr = SHIM_GET_ARG_32(0);
uint32_t page_count = SHIM_GET_ARG_32(1); uint32_t block_size = SHIM_GET_ARG_32(1);
XELOGD( XELOGD(
"VdInitializeRingBuffer(%.8X, %.8X)", "VdEnableRingBufferRPtrWriteBack(%.8X, %.8X)",
ptr, page_count); ptr, block_size);
xeVdInitializeRingBuffer(ptr, page_count);
}
void xeVdEnableRingBufferRPtrWriteBack(uint32_t ptr, uint32_t block_size) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system(); GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) { if (!gs) {
return; return;
@ -284,37 +244,18 @@ void xeVdEnableRingBufferRPtrWriteBack(uint32_t ptr, uint32_t block_size) {
//((p + 0x3C) & 0x1FFFFFFF) + ((((p + 0x3C) >> 20) + 0x200) & 0x1000) //((p + 0x3C) & 0x1FFFFFFF) + ((((p + 0x3C) >> 20) + 0x200) & 0x1000)
//also 0x3C offset into WriteBacks is PrimaryRingBufferReadIndex //also 0x3C offset into WriteBacks is PrimaryRingBufferReadIndex
//(1:17:38 AM) Rick: .text:8201B348 lwz r11, 0x2B10(r31) //(1:17:38 AM) Rick: .text:8201B348 lwz r11, 0x2B10(r31)
//(1:17:38 AM) Rick: .text:8201B34C addi r11, r11, 0x3C //(1:17:38 AM) Rick: .text:8201B34C addi r11, r11, 0x3C
//(1:17:38 AM) Rick: .text:8201B350 srwi r10, r11, 20 # r10 = r11 >> 20 //(1:17:38 AM) Rick: .text:8201B350 srwi r10, r11, 20 # r10 = r11 >> 20
//(1:17:38 AM) Rick: .text:8201B354 clrlwi r11, r11, 3 # r11 = r11 & 0x1FFFFFFF //(1:17:38 AM) Rick: .text:8201B354 clrlwi r11, r11, 3 # r11 = r11 & 0x1FFFFFFF
//(1:17:38 AM) Rick: .text:8201B358 addi r10, r10, 0x200 //(1:17:38 AM) Rick: .text:8201B358 addi r10, r10, 0x200
//(1:17:39 AM) Rick: .text:8201B35C rlwinm r10, r10, 0,19,19 # r10 = r10 & 0x1000 //(1:17:39 AM) Rick: .text:8201B35C rlwinm r10, r10, 0,19,19 # r10 = r10 & 0x1000
//(1:17:39 AM) Rick: .text:8201B360 add r3, r10, r11 //(1:17:39 AM) Rick: .text:8201B360 add r3, r10, r11
//(1:17:39 AM) Rick: .text:8201B364 bl VdEnableRingBufferRPtrWriteBack //(1:17:39 AM) Rick: .text:8201B364 bl VdEnableRingBufferRPtrWriteBack
// TODO(benvanik): something? // TODO(benvanik): something?
} }
SHIM_CALL VdEnableRingBufferRPtrWriteBack_shim(
PPCContext* ppc_state, KernelState* state) {
uint32_t ptr = SHIM_GET_ARG_32(0);
uint32_t block_size = SHIM_GET_ARG_32(1);
XELOGD(
"VdEnableRingBufferRPtrWriteBack(%.8X, %.8X)",
ptr, block_size);
xeVdEnableRingBufferRPtrWriteBack(ptr, block_size);
}
void xeVdGetSystemCommandBuffer(uint32_t* p0, uint32_t* p1) {
*p0 = 0xBEEF0000;
*p1 = 0xBEEF0001;
}
SHIM_CALL VdGetSystemCommandBuffer_shim( SHIM_CALL VdGetSystemCommandBuffer_shim(
PPCContext* ppc_state, KernelState* state) { PPCContext* ppc_state, KernelState* state) {
uint32_t p0_ptr = SHIM_GET_ARG_32(0); uint32_t p0_ptr = SHIM_GET_ARG_32(0);
@ -325,23 +266,8 @@ SHIM_CALL VdGetSystemCommandBuffer_shim(
p0_ptr, p0_ptr,
p1_ptr); p1_ptr);
uint32_t p0 = 0; SHIM_SET_MEM_32(p0_ptr, 0xBEEF0000);
uint32_t p1 = 0; SHIM_SET_MEM_32(p1_ptr, 0xBEEF0001);
xeVdGetSystemCommandBuffer(&p0, &p1);
SHIM_SET_MEM_32(p0_ptr, p0);
SHIM_SET_MEM_32(p1_ptr, p1);
}
void xeVdSetSystemCommandBufferGpuIdentifierAddress(uint32_t unk) {
KernelState* state = shared_kernel_state_;
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
}
// r3 = 0x2B10(d3d?) + 8
} }
@ -352,8 +278,8 @@ SHIM_CALL VdSetSystemCommandBufferGpuIdentifierAddress_shim(
XELOGD( XELOGD(
"VdSetSystemCommandBufferGpuIdentifierAddress(%.8X)", "VdSetSystemCommandBufferGpuIdentifierAddress(%.8X)",
unk); unk);
xeVdSetSystemCommandBufferGpuIdentifierAddress(unk); // r3 = 0x2B10(d3d?) + 8
} }
@ -487,32 +413,30 @@ void xe::kernel::xboxkrnl::RegisterVideoExports(
// Pointer to a global D3D device. Games only seem to set this, so we don't // Pointer to a global D3D device. Games only seem to set this, so we don't
// have to do anything. We may want to read it back later, though. // have to do anything. We may want to read it back later, though.
uint32_t pVdGlobalDevice = (uint32_t)memory->HeapAlloc(0, 4, 0); uint32_t pVdGlobalDevice = (uint32_t)memory->HeapAlloc(0, 4, 0);
export_resolver->SetVariableMapping( export_resolver->SetVariableMapping("xboxkrnl.exe", ordinals::VdGlobalDevice,
"xboxkrnl.exe", ordinals::VdGlobalDevice, pVdGlobalDevice);
pVdGlobalDevice);
poly::store_and_swap<uint32_t>(mem + pVdGlobalDevice, 0); poly::store_and_swap<uint32_t>(mem + pVdGlobalDevice, 0);
// VdGlobalXamDevice (4b) // VdGlobalXamDevice (4b)
// Pointer to the XAM D3D device, which we don't have. // Pointer to the XAM D3D device, which we don't have.
uint32_t pVdGlobalXamDevice = (uint32_t)memory->HeapAlloc(0, 4, 0); uint32_t pVdGlobalXamDevice = (uint32_t)memory->HeapAlloc(0, 4, 0);
export_resolver->SetVariableMapping( export_resolver->SetVariableMapping(
"xboxkrnl.exe", ordinals::VdGlobalXamDevice, "xboxkrnl.exe", ordinals::VdGlobalXamDevice, pVdGlobalXamDevice);
pVdGlobalXamDevice);
poly::store_and_swap<uint32_t>(mem + pVdGlobalXamDevice, 0); poly::store_and_swap<uint32_t>(mem + pVdGlobalXamDevice, 0);
// VdGpuClockInMHz (4b) // VdGpuClockInMHz (4b)
// GPU clock. Xenos is 500MHz. Hope nothing is relying on this timing... // GPU clock. Xenos is 500MHz. Hope nothing is relying on this timing...
uint32_t pVdGpuClockInMHz = (uint32_t)memory->HeapAlloc(0, 4, 0); uint32_t pVdGpuClockInMHz = (uint32_t)memory->HeapAlloc(0, 4, 0);
export_resolver->SetVariableMapping( export_resolver->SetVariableMapping("xboxkrnl.exe", ordinals::VdGpuClockInMHz,
"xboxkrnl.exe", ordinals::VdGpuClockInMHz, pVdGpuClockInMHz);
pVdGpuClockInMHz);
poly::store_and_swap<uint32_t>(mem + pVdGpuClockInMHz, 500); poly::store_and_swap<uint32_t>(mem + pVdGpuClockInMHz, 500);
// VdHSIOCalibrationLock (28b) // VdHSIOCalibrationLock (28b)
// CriticalSection. // CriticalSection.
uint32_t pVdHSIOCalibrationLock = (uint32_t)memory->HeapAlloc(0, 28, 0); uint32_t pVdHSIOCalibrationLock = (uint32_t)memory->HeapAlloc(0, 28, 0);
export_resolver->SetVariableMapping( export_resolver->SetVariableMapping(
"xboxkrnl.exe", ordinals::VdHSIOCalibrationLock, "xboxkrnl.exe", ordinals::VdHSIOCalibrationLock, pVdHSIOCalibrationLock);
pVdHSIOCalibrationLock); auto hsio_lock =
xeRtlInitializeCriticalSectionAndSpinCount(pVdHSIOCalibrationLock, 10000); reinterpret_cast<X_RTL_CRITICAL_SECTION*>(mem + pVdHSIOCalibrationLock);
xeRtlInitializeCriticalSectionAndSpinCount(hsio_lock, 10000);
} }

View File

@ -68,7 +68,7 @@ void XObject::Release() {
} }
X_STATUS XObject::Delete() { X_STATUS XObject::Delete() {
return shared_kernel_state_->object_table()->RemoveHandle(handle_); return kernel_state_->object_table()->RemoveHandle(handle_);
} }
uint32_t XObject::TimeoutTicksToMs(int64_t timeout_ticks) { uint32_t XObject::TimeoutTicksToMs(int64_t timeout_ticks) {
@ -149,11 +149,11 @@ X_STATUS XObject::WaitMultiple(
} }
void XObject::LockType() { void XObject::LockType() {
xe_mutex_lock(shared_kernel_state_->object_mutex_); xe_mutex_lock(KernelState::shared()->object_mutex_);
} }
void XObject::UnlockType() { void XObject::UnlockType() {
xe_mutex_unlock(shared_kernel_state_->object_mutex_); xe_mutex_unlock(KernelState::shared()->object_mutex_);
} }
void XObject::SetNativePointer(uint32_t native_ptr) { void XObject::SetNativePointer(uint32_t native_ptr) {