[Kernel] Trigger memory callbacks after file read
This commit is contained in:
parent
028c784c5c
commit
f858631245
|
@ -170,23 +170,15 @@ dword_result_t NtReadFile(dword_t file_handle, dword_t event_handle,
|
|||
|
||||
if (XSUCCEEDED(result)) {
|
||||
if (true || file->is_synchronous()) {
|
||||
// some games NtReadFile() directly into texture memory
|
||||
auto heap = kernel_memory()->LookupHeap(buffer.guest_address());
|
||||
if (heap && heap->IsGuestPhysicalHeap()) {
|
||||
kernel_memory()->TriggerPhysicalMemoryCallbacks(
|
||||
xe::global_critical_region::AcquireDirect(), buffer.guest_address(),
|
||||
buffer_length, true, true);
|
||||
}
|
||||
|
||||
// Synchronous.
|
||||
size_t bytes_read = 0;
|
||||
uint32_t bytes_read = 0;
|
||||
result = file->Read(
|
||||
buffer, buffer_length,
|
||||
buffer.guest_address(), buffer_length,
|
||||
byte_offset_ptr ? static_cast<uint64_t>(*byte_offset_ptr) : -1,
|
||||
&bytes_read, apc_context);
|
||||
if (io_status_block) {
|
||||
io_status_block->status = result;
|
||||
io_status_block->information = static_cast<uint32_t>(bytes_read);
|
||||
io_status_block->information = bytes_read;
|
||||
}
|
||||
|
||||
// Queue the APC callback. It must be delivered via the APC mechanism even
|
||||
|
@ -218,7 +210,8 @@ dword_result_t NtReadFile(dword_t file_handle, dword_t event_handle,
|
|||
state, file,
|
||||
(XAsyncRequest::CompletionCallback)xeNtReadFileCompleted,
|
||||
call_state);*/
|
||||
// result = file->Read(buffer, buffer_length, byte_offset, request);
|
||||
// result = file->Read(buffer.guest_address(), buffer_length, byte_offset,
|
||||
// request);
|
||||
if (io_status_block) {
|
||||
io_status_block->status = X_STATUS_PENDING;
|
||||
io_status_block->information = 0;
|
||||
|
@ -270,13 +263,13 @@ dword_result_t NtWriteFile(dword_t file_handle, dword_t event_handle,
|
|||
// TODO(benvanik): async path.
|
||||
if (true || file->is_synchronous()) {
|
||||
// Synchronous request.
|
||||
size_t bytes_written = 0;
|
||||
uint32_t bytes_written = 0;
|
||||
result = file->Write(
|
||||
buffer, buffer_length,
|
||||
buffer.guest_address(), buffer_length,
|
||||
byte_offset_ptr ? static_cast<uint64_t>(*byte_offset_ptr) : -1,
|
||||
&bytes_written, apc_context);
|
||||
if (XSUCCEEDED(result)) {
|
||||
info = (int32_t)bytes_written;
|
||||
info = bytes_written;
|
||||
}
|
||||
|
||||
if (!file->is_synchronous()) {
|
||||
|
@ -515,10 +508,12 @@ dword_result_t NtQueryInformationFile(
|
|||
// XctdDecompression.
|
||||
/*
|
||||
uint32_t magic;
|
||||
size_t bytes_read;
|
||||
size_t cur_pos = file->position();
|
||||
uint32_t bytes_read;
|
||||
uint64_t cur_pos = file->position();
|
||||
|
||||
file->set_position(0);
|
||||
// FIXME(Triang3l): For now, XFile can be read only to guest buffers -
|
||||
// this line won't work, implement reading to host buffers if needed.
|
||||
result = file->Read(&magic, sizeof(magic), 0, &bytes_read);
|
||||
if (XSUCCEEDED(result)) {
|
||||
if (bytes_read == sizeof(magic)) {
|
||||
|
|
|
@ -13,8 +13,10 @@
|
|||
#include "xenia/base/byte_stream.h"
|
||||
#include "xenia/base/logging.h"
|
||||
#include "xenia/base/math.h"
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/kernel/kernel_state.h"
|
||||
#include "xenia/kernel/xevent.h"
|
||||
#include "xenia/memory.h"
|
||||
|
||||
namespace xe {
|
||||
namespace kernel {
|
||||
|
@ -87,19 +89,72 @@ X_STATUS XFile::QueryDirectory(X_FILE_DIRECTORY_INFORMATION* out_info,
|
|||
return X_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
X_STATUS XFile::Read(void* buffer, size_t buffer_length, size_t byte_offset,
|
||||
size_t* out_bytes_read, uint32_t apc_context) {
|
||||
if (byte_offset == -1) {
|
||||
X_STATUS XFile::Read(uint32_t buffer_guest_address, uint32_t buffer_length,
|
||||
uint64_t byte_offset, uint32_t* out_bytes_read,
|
||||
uint32_t apc_context) {
|
||||
if (byte_offset == uint64_t(-1)) {
|
||||
// Read from current position.
|
||||
byte_offset = position_;
|
||||
}
|
||||
|
||||
size_t bytes_read = 0;
|
||||
X_STATUS result =
|
||||
file_->ReadSync(buffer, buffer_length, byte_offset, &bytes_read);
|
||||
X_STATUS result = X_STATUS_SUCCESS;
|
||||
// Zero length means success for a valid file object according to Windows
|
||||
// tests.
|
||||
if (buffer_length) {
|
||||
if (UINT32_MAX - buffer_guest_address < buffer_length) {
|
||||
result = X_STATUS_ACCESS_VIOLATION;
|
||||
} else {
|
||||
// Games often read directly to texture/vertex buffer memory - in this
|
||||
// case, invalidation notifications must be sent. However, having any
|
||||
// memory callbacks in the range will result in STATUS_ACCESS_VIOLATION at
|
||||
// least on Windows, without anything being read or any callbacks being
|
||||
// triggered. So for physical memory, host protection must be bypassed,
|
||||
// and invalidation callbacks must be triggered manually (it's also wrong
|
||||
// to trigger invalidation callbacks before reading in this case, because
|
||||
// during the read, the guest may still access the data around the buffer
|
||||
// that is located in the same host pages as the buffer's start and end,
|
||||
// on the GPU - and that must not trigger a race condition).
|
||||
uint32_t buffer_guest_high_address =
|
||||
buffer_guest_address + buffer_length - 1;
|
||||
xe::BaseHeap* buffer_start_heap =
|
||||
memory()->LookupHeap(buffer_guest_address);
|
||||
const xe::BaseHeap* buffer_end_heap =
|
||||
memory()->LookupHeap(buffer_guest_high_address);
|
||||
if (!buffer_start_heap || !buffer_end_heap ||
|
||||
buffer_start_heap->IsGuestPhysicalHeap() !=
|
||||
buffer_end_heap->IsGuestPhysicalHeap() ||
|
||||
(buffer_start_heap->IsGuestPhysicalHeap() &&
|
||||
buffer_start_heap != buffer_end_heap)) {
|
||||
result = X_STATUS_ACCESS_VIOLATION;
|
||||
} else {
|
||||
xe::PhysicalHeap* buffer_physical_heap =
|
||||
buffer_start_heap->IsGuestPhysicalHeap()
|
||||
? static_cast<PhysicalHeap*>(buffer_start_heap)
|
||||
: nullptr;
|
||||
if (buffer_physical_heap &&
|
||||
buffer_physical_heap->QueryRangeAccess(buffer_guest_address,
|
||||
buffer_guest_high_address) !=
|
||||
memory::PageAccess::kReadWrite) {
|
||||
result = X_STATUS_ACCESS_VIOLATION;
|
||||
} else {
|
||||
result = file_->ReadSync(
|
||||
buffer_physical_heap
|
||||
? memory()->TranslatePhysical(buffer_guest_address)
|
||||
: memory()->TranslateVirtual(buffer_guest_address),
|
||||
buffer_length, size_t(byte_offset), &bytes_read);
|
||||
if (XSUCCEEDED(result)) {
|
||||
if (buffer_physical_heap) {
|
||||
buffer_physical_heap->TriggerCallbacks(
|
||||
xe::global_critical_region::AcquireDirect(),
|
||||
buffer_guest_address, buffer_length, true, true);
|
||||
}
|
||||
position_ += bytes_read;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
XIOCompletion::IONotification notify;
|
||||
notify.apc_context = apc_context;
|
||||
|
@ -109,24 +164,25 @@ X_STATUS XFile::Read(void* buffer, size_t buffer_length, size_t byte_offset,
|
|||
NotifyIOCompletionPorts(notify);
|
||||
|
||||
if (out_bytes_read) {
|
||||
*out_bytes_read = bytes_read;
|
||||
*out_bytes_read = uint32_t(bytes_read);
|
||||
}
|
||||
|
||||
async_event_->Set();
|
||||
return result;
|
||||
}
|
||||
|
||||
X_STATUS XFile::Write(const void* buffer, size_t buffer_length,
|
||||
size_t byte_offset, size_t* out_bytes_written,
|
||||
X_STATUS XFile::Write(uint32_t buffer_guest_address, uint32_t buffer_length,
|
||||
uint64_t byte_offset, uint32_t* out_bytes_written,
|
||||
uint32_t apc_context) {
|
||||
if (byte_offset == -1) {
|
||||
if (byte_offset == uint64_t(-1)) {
|
||||
// Write from current position.
|
||||
byte_offset = position_;
|
||||
}
|
||||
|
||||
size_t bytes_written = 0;
|
||||
X_STATUS result =
|
||||
file_->WriteSync(buffer, buffer_length, byte_offset, &bytes_written);
|
||||
file_->WriteSync(memory()->TranslateVirtual(buffer_guest_address),
|
||||
buffer_length, size_t(byte_offset), &bytes_written);
|
||||
if (XSUCCEEDED(result)) {
|
||||
position_ += bytes_written;
|
||||
}
|
||||
|
@ -139,7 +195,7 @@ X_STATUS XFile::Write(const void* buffer, size_t buffer_length,
|
|||
NotifyIOCompletionPorts(notify);
|
||||
|
||||
if (out_bytes_written) {
|
||||
*out_bytes_written = bytes_written;
|
||||
*out_bytes_written = uint32_t(bytes_written);
|
||||
}
|
||||
|
||||
async_event_->Set();
|
||||
|
|
|
@ -92,17 +92,22 @@ class XFile : public XObject {
|
|||
const std::string& path() const { return file_->entry()->path(); }
|
||||
const std::string& name() const { return file_->entry()->name(); }
|
||||
|
||||
size_t position() const { return position_; }
|
||||
void set_position(size_t value) { position_ = value; }
|
||||
uint64_t position() const { return position_; }
|
||||
void set_position(uint64_t value) { position_ = value; }
|
||||
|
||||
X_STATUS QueryDirectory(X_FILE_DIRECTORY_INFORMATION* out_info, size_t length,
|
||||
const char* file_name, bool restart);
|
||||
|
||||
X_STATUS Read(void* buffer, size_t buffer_length, size_t byte_offset,
|
||||
size_t* out_bytes_read, uint32_t apc_context);
|
||||
// Don't do within the global critical region because invalidation callbacks
|
||||
// may be triggered (as per the usual rule of not doing I/O within the global
|
||||
// critical region).
|
||||
X_STATUS Read(uint32_t buffer_guess_address, uint32_t buffer_length,
|
||||
uint64_t byte_offset, uint32_t* out_bytes_read,
|
||||
uint32_t apc_context);
|
||||
|
||||
X_STATUS Write(const void* buffer, size_t buffer_length, size_t byte_offset,
|
||||
size_t* out_bytes_written, uint32_t apc_context);
|
||||
X_STATUS Write(uint32_t buffer_guess_address, uint32_t buffer_length,
|
||||
uint64_t byte_offset, uint32_t* out_bytes_written,
|
||||
uint32_t apc_context);
|
||||
|
||||
X_STATUS SetLength(size_t length);
|
||||
|
||||
|
@ -133,7 +138,7 @@ class XFile : public XObject {
|
|||
|
||||
// TODO(benvanik): create flags, open state, etc.
|
||||
|
||||
size_t position_ = 0;
|
||||
uint64_t position_ = 0;
|
||||
|
||||
xe::filesystem::WildcardEngine find_engine_;
|
||||
size_t find_index_ = 0;
|
||||
|
|
|
@ -1297,6 +1297,24 @@ bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) {
|
|||
return true;
|
||||
}
|
||||
|
||||
xe::memory::PageAccess BaseHeap::QueryRangeAccess(uint32_t low_address,
|
||||
uint32_t high_address) {
|
||||
if (low_address >= high_address || low_address < heap_base_ ||
|
||||
(high_address - heap_base_) >= heap_size_) {
|
||||
return xe::memory::PageAccess::kNoAccess;
|
||||
}
|
||||
uint32_t low_page_number = (low_address - heap_base_) / page_size_;
|
||||
uint32_t high_page_number = (high_address - heap_base_) / page_size_;
|
||||
uint32_t protect = kMemoryProtectRead | kMemoryProtectWrite;
|
||||
{
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (uint32_t i = low_page_number; protect && i <= high_page_number; ++i) {
|
||||
protect &= page_table_[i].current_protect;
|
||||
}
|
||||
}
|
||||
return ToPageAccess(protect);
|
||||
}
|
||||
|
||||
VirtualHeap::VirtualHeap() = default;
|
||||
|
||||
VirtualHeap::~VirtualHeap() = default;
|
||||
|
|
|
@ -172,6 +172,11 @@ class BaseHeap {
|
|||
// address.
|
||||
bool QueryProtect(uint32_t address, uint32_t* out_protect);
|
||||
|
||||
// Queries the currently strictest readability and writability for the entire
|
||||
// range.
|
||||
xe::memory::PageAccess QueryRangeAccess(uint32_t low_address,
|
||||
uint32_t high_address);
|
||||
|
||||
// Whether the heap is a guest virtual memory mapping of the physical memory.
|
||||
virtual bool IsGuestPhysicalHeap() const { return false; }
|
||||
|
||||
|
|
Loading…
Reference in New Issue