kernel: separate host objects from regular handle range

Burnout Paradise statically expects certain thread handle values based on how many objects it knows it is allocating ahead of time.
From this, it calculates an ID by subtracting the thread handle from a base handle of what it expects the first such thread to be assigned.
The value is statically declared in the executable and is not determined automatically.

The host objects in the handle range made these thread handles higher than what the game expects.
Removing these, and allowing 0xF8000000 to be assigned, allows the thread handles to fit perfectly in the range the game expects.

It is not clear what handle range the host objects should be taking. For now though, they're 0-based rather than 0xF8000000-based.
This commit is contained in:
Bo Anderson 2022-12-06 15:10:09 +00:00 committed by JeBobs
parent da2710f18c
commit 9489161d0b
8 changed files with 116 additions and 51 deletions

View File

@ -21,7 +21,7 @@ namespace kernel {
KernelModule::KernelModule(KernelState* kernel_state,
const std::string_view path)
: XModule(kernel_state, ModuleType::kKernelModule) {
: XModule(kernel_state, ModuleType::kKernelModule, true) {
emulator_ = kernel_state->emulator();
memory_ = emulator_->memory();
export_resolver_ = kernel_state->emulator()->export_resolver();
@ -29,9 +29,6 @@ KernelModule::KernelModule(KernelState* kernel_state,
path_ = path;
name_ = utf8::find_base_name_from_guest_path(path);
// Persist this object through reloads.
host_object_ = true;
// HACK: Allocates memory where xboxkrnl.exe would be!
// TODO: Need to free this memory when necessary.
auto heap = memory()->LookupHeap(0x80040000);

View File

@ -35,26 +35,37 @@ void ObjectTable::Reset() {
entry.object->Release();
}
}
for (uint32_t n = 0; n < host_table_capacity_; n++) {
ObjectTableEntry& entry = host_table_[n];
if (entry.object) {
entry.object->Release();
}
}
table_capacity_ = 0;
host_table_capacity_ = 0;
last_free_entry_ = 0;
last_free_host_entry_ = 0;
free(table_);
table_ = nullptr;
free(host_table_);
host_table_ = nullptr;
}
X_STATUS ObjectTable::FindFreeSlot(uint32_t* out_slot) {
X_STATUS ObjectTable::FindFreeSlot(uint32_t* out_slot, bool host) {
// Find a free slot.
uint32_t slot = last_free_entry_;
uint32_t slot = host ? last_free_host_entry_ : last_free_entry_;
uint32_t capacity = host ? host_table_capacity_ : table_capacity_;
uint32_t scan_count = 0;
while (scan_count < table_capacity_) {
ObjectTableEntry& entry = table_[slot];
while (scan_count < capacity) {
ObjectTableEntry& entry = host ? host_table_[slot] : table_[slot];
if (!entry.object) {
*out_slot = slot;
return X_STATUS_SUCCESS;
}
scan_count++;
slot = (slot + 1) % table_capacity_;
if (slot == 0) {
slot = (slot + 1) % capacity;
if (slot == 0 && host) {
// Never allow 0 handles.
scan_count++;
slot++;
@ -62,23 +73,24 @@ X_STATUS ObjectTable::FindFreeSlot(uint32_t* out_slot) {
}
// Table out of slots, expand.
uint32_t new_table_capacity = std::max(16 * 1024u, table_capacity_ * 2);
if (!Resize(new_table_capacity)) {
uint32_t new_table_capacity = std::max(16 * 1024u, capacity * 2);
if (!Resize(new_table_capacity, host)) {
return X_STATUS_NO_MEMORY;
}
// Never allow 0 handles.
slot = ++last_free_entry_;
// Never allow 0 handles on host.
slot = host ? ++last_free_host_entry_ : last_free_entry_++;
*out_slot = slot;
return X_STATUS_SUCCESS;
}
bool ObjectTable::Resize(uint32_t new_capacity) {
bool ObjectTable::Resize(uint32_t new_capacity, bool host) {
uint32_t capacity = host ? host_table_capacity_ : table_capacity_;
uint32_t new_size = new_capacity * sizeof(ObjectTableEntry);
uint32_t old_size = table_capacity_ * sizeof(ObjectTableEntry);
auto new_table =
reinterpret_cast<ObjectTableEntry*>(realloc(table_, new_size));
uint32_t old_size = capacity * sizeof(ObjectTableEntry);
auto new_table = reinterpret_cast<ObjectTableEntry*>(
realloc(host ? host_table_ : table_, new_size));
if (!new_table) {
return false;
}
@ -89,9 +101,15 @@ bool ObjectTable::Resize(uint32_t new_capacity) {
new_size - old_size);
}
last_free_entry_ = table_capacity_;
table_capacity_ = new_capacity;
table_ = new_table;
if (host) {
last_free_host_entry_ = capacity;
host_table_capacity_ = new_capacity;
host_table_ = new_table;
} else {
last_free_entry_ = capacity;
table_capacity_ = new_capacity;
table_ = new_table;
}
return true;
}
@ -105,14 +123,16 @@ X_STATUS ObjectTable::AddHandle(XObject* object, X_HANDLE* out_handle) {
// Find a free slot.
uint32_t slot = 0;
result = FindFreeSlot(&slot);
bool host_object = object->is_host_object();
result = FindFreeSlot(&slot, host_object);
// Stash.
if (XSUCCEEDED(result)) {
ObjectTableEntry& entry = table_[slot];
ObjectTableEntry& entry = host_object ? host_table_[slot] : table_[slot];
entry.object = object;
entry.handle_ref_count = 1;
handle = XObject::kHandleBase + (slot << 2);
handle = slot << 2;
if (!host_object) handle += XObject::kHandleBase;
object->handles().push_back(handle);
// Retain so long as the object is in the table.
@ -222,6 +242,14 @@ std::vector<object_ref<XObject>> ObjectTable::GetAllObjects() {
auto lock = global_critical_region_.Acquire();
std::vector<object_ref<XObject>> results;
for (uint32_t slot = 0; slot < host_table_capacity_; slot++) {
auto& entry = host_table_[slot];
if (entry.object && std::find(results.begin(), results.end(),
entry.object) == results.end()) {
entry.object->Retain();
results.push_back(object_ref<XObject>(entry.object));
}
}
for (uint32_t slot = 0; slot < table_capacity_; slot++) {
auto& entry = table_[slot];
if (entry.object && std::find(results.begin(), results.end(),
@ -238,7 +266,7 @@ void ObjectTable::PurgeAllObjects() {
auto lock = global_critical_region_.Acquire();
for (uint32_t slot = 0; slot < table_capacity_; slot++) {
auto& entry = table_[slot];
if (entry.object && !entry.object->is_host_object()) {
if (entry.object) {
entry.handle_ref_count = 0;
entry.object->Release();
@ -259,8 +287,13 @@ ObjectTable::ObjectTableEntry* ObjectTable::LookupTableInLock(X_HANDLE handle) {
}
// Lower 2 bits are ignored.
uint32_t slot = GetHandleSlot(handle);
if (slot <= table_capacity_) {
bool host = (handle < XObject::kHandleBase);
uint32_t slot = GetHandleSlot(handle, host);
if (host) {
if (slot <= host_table_capacity_) {
return &host_table_[slot];
}
} else if (slot <= table_capacity_) {
return &table_[slot];
}
@ -288,10 +321,18 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) {
}
// Lower 2 bits are ignored.
uint32_t slot = GetHandleSlot(handle);
bool host = (handle < XObject::kHandleBase);
uint32_t slot = GetHandleSlot(handle, host);
// Verify slot.
if (slot < table_capacity_) {
if (host) {
if (slot < host_table_capacity_) {
ObjectTableEntry& entry = host_table_[slot];
if (entry.object) {
object = entry.object;
}
}
} else if (slot < table_capacity_) {
ObjectTableEntry& entry = table_[slot];
if (entry.object) {
object = entry.object;
@ -313,6 +354,15 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) {
void ObjectTable::GetObjectsByType(XObject::Type type,
std::vector<object_ref<XObject>>* results) {
auto global_lock = global_critical_region_.Acquire();
for (uint32_t slot = 0; slot < host_table_capacity_; ++slot) {
auto& entry = host_table_[slot];
if (entry.object) {
if (entry.object->type() == type) {
entry.object->Retain();
results->push_back(object_ref<XObject>(entry.object));
}
}
}
for (uint32_t slot = 0; slot < table_capacity_; ++slot) {
auto& entry = table_[slot];
if (entry.object) {
@ -377,6 +427,12 @@ X_STATUS ObjectTable::GetObjectByName(const std::string_view name,
}
bool ObjectTable::Save(ByteStream* stream) {
stream->Write<uint32_t>(host_table_capacity_);
for (uint32_t i = 0; i < host_table_capacity_; i++) {
auto& entry = host_table_[i];
stream->Write<int32_t>(entry.handle_ref_count);
}
stream->Write<uint32_t>(table_capacity_);
for (uint32_t i = 0; i < table_capacity_; i++) {
auto& entry = table_[i];
@ -387,7 +443,14 @@ bool ObjectTable::Save(ByteStream* stream) {
}
bool ObjectTable::Restore(ByteStream* stream) {
Resize(stream->Read<uint32_t>());
Resize(stream->Read<uint32_t>(), true);
for (uint32_t i = 0; i < host_table_capacity_; i++) {
auto& entry = host_table_[i];
// entry.object = nullptr;
entry.handle_ref_count = stream->Read<int32_t>();
}
Resize(stream->Read<uint32_t>(), false);
for (uint32_t i = 0; i < table_capacity_; i++) {
auto& entry = table_[i];
// entry.object = nullptr;
@ -398,11 +461,13 @@ bool ObjectTable::Restore(ByteStream* stream) {
}
X_STATUS ObjectTable::RestoreHandle(X_HANDLE handle, XObject* object) {
uint32_t slot = GetHandleSlot(handle);
assert_true(table_capacity_ >= slot);
bool host = (handle < XObject::kHandleBase);
uint32_t slot = GetHandleSlot(handle, host);
uint32_t capacity = host ? host_table_capacity_ : table_capacity_;
assert_true(capacity >= slot);
if (table_capacity_ >= slot) {
auto& entry = table_[slot];
if (capacity >= slot) {
auto& entry = host ? host_table_[slot] : table_[slot];
entry.object = object;
object->Retain();
}

View File

@ -49,10 +49,10 @@ class ObjectTable {
X_STATUS RestoreHandle(X_HANDLE handle, XObject* object);
template <typename T>
object_ref<T> LookupObject(X_HANDLE handle, bool already_locked = false) {
auto object = LookupObject(handle, already_locked);
if (T::kObjectType == XObject::Type::Socket) {
object = LookupObject((handle | 0xF8000000), false);
handle |= XObject::kHandleBase;
}
auto object = LookupObject(handle, already_locked);
if (object) {
assert_true(object->type() == T::kObjectType);
}
@ -95,16 +95,20 @@ class ObjectTable {
std::vector<object_ref<XObject>>* results);
X_HANDLE TranslateHandle(X_HANDLE handle);
static constexpr uint32_t GetHandleSlot(X_HANDLE handle) {
return (handle - XObject::kHandleBase) >> 2;
static constexpr uint32_t GetHandleSlot(X_HANDLE handle, bool host) {
if (!host) handle -= XObject::kHandleBase;
return handle >> 2;
}
X_STATUS FindFreeSlot(uint32_t* out_slot);
bool Resize(uint32_t new_capacity);
X_STATUS FindFreeSlot(uint32_t* out_slot, bool host);
bool Resize(uint32_t new_capacity, bool host);
xe::global_critical_region global_critical_region_;
uint32_t table_capacity_ = 0;
uint32_t host_table_capacity_ = 0;
ObjectTableEntry* table_ = nullptr;
ObjectTableEntry* host_table_ = nullptr;
uint32_t last_free_entry_ = 0;
uint32_t last_free_host_entry_ = 0;
std::unordered_map<string_key_case, X_HANDLE> name_table_;
};

View File

@ -19,8 +19,9 @@
namespace xe {
namespace kernel {
XModule::XModule(KernelState* kernel_state, ModuleType module_type)
: XObject(kernel_state, kObjectType),
XModule::XModule(KernelState* kernel_state, ModuleType module_type,
bool host_object)
: XObject(kernel_state, kObjectType, host_object),
module_type_(module_type),
processor_module_(nullptr),
hmodule_ptr_(0) {

View File

@ -61,7 +61,8 @@ class XModule : public XObject {
static const XObject::Type kObjectType = XObject::Type::Module;
XModule(KernelState* kernel_state, ModuleType module_type);
XModule(KernelState* kernel_state, ModuleType module_type,
bool host_object = false);
virtual ~XModule();
ModuleType module_type() const { return module_type_; }

View File

@ -34,12 +34,13 @@ XObject::XObject(Type type)
handles_.reserve(10);
}
XObject::XObject(KernelState* kernel_state, Type type)
XObject::XObject(KernelState* kernel_state, Type type, bool host_object)
: kernel_state_(kernel_state),
type_(type),
pointer_ref_count_(1),
guest_object_ptr_(0),
allocated_guest_object_(false) {
allocated_guest_object_(false),
host_object_(host_object) {
handles_.reserve(10);
// TODO: Assert kernel_state != nullptr in this constructor.

View File

@ -136,7 +136,7 @@ class XObject {
};
XObject(Type type);
XObject(KernelState* kernel_state, Type type);
XObject(KernelState* kernel_state, Type type, bool host_object = false);
virtual ~XObject();
Emulator* emulator() const;

View File

@ -60,7 +60,7 @@ XThread::XThread(KernelState* kernel_state, uint32_t stack_size,
uint32_t xapi_thread_startup, uint32_t start_address,
uint32_t start_context, uint32_t creation_flags,
bool guest_thread, bool main_thread)
: XObject(kernel_state, kObjectType),
: XObject(kernel_state, kObjectType, !guest_thread),
thread_id_(++next_xthread_id_),
guest_thread_(guest_thread),
main_thread_(main_thread),
@ -79,10 +79,6 @@ XThread::XThread(KernelState* kernel_state, uint32_t stack_size,
creation_params_.stack_size = 16 * 1024;
}
if (!guest_thread_) {
host_object_ = true;
}
// The kernel does not take a reference. We must unregister in the dtor.
kernel_state_->RegisterThread(this);
}