mirror of https://git.suyu.dev/suyu/suyu
Updated
This commit is contained in:
parent
76f6f8de80
commit
3aca4a3490
|
@ -40,10 +40,23 @@ struct GPU::Impl {
|
||||||
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
|
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
|
||||||
: gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
|
: gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
|
||||||
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
|
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
|
||||||
gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
|
gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {
|
||||||
|
Initialize();
|
||||||
|
}
|
||||||
|
|
||||||
~Impl() = default;
|
~Impl() = default;
|
||||||
|
|
||||||
|
void Initialize() {
|
||||||
|
// Initialize the GPU memory manager
|
||||||
|
memory_manager = std::make_unique<Tegra::MemoryManager>(system);
|
||||||
|
|
||||||
|
// Initialize the command buffer
|
||||||
|
command_buffer.reserve(COMMAND_BUFFER_SIZE);
|
||||||
|
|
||||||
|
// Initialize the fence manager
|
||||||
|
fence_manager = std::make_unique<FenceManager>();
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
|
std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
|
||||||
auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
|
auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
|
||||||
channels.emplace(channel_id, channel_state);
|
channels.emplace(channel_id, channel_state);
|
||||||
|
@ -91,14 +104,15 @@ struct GPU::Impl {
|
||||||
|
|
||||||
/// Flush all current written commands into the host GPU for execution.
|
/// Flush all current written commands into the host GPU for execution.
|
||||||
void FlushCommands() {
|
void FlushCommands() {
|
||||||
rasterizer->FlushCommands();
|
if (!command_buffer.empty()) {
|
||||||
|
rasterizer->ExecuteCommands(command_buffer);
|
||||||
|
command_buffer.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synchronizes CPU writes with Host GPU memory.
|
/// Synchronizes CPU writes with Host GPU memory.
|
||||||
void InvalidateGPUCache() {
|
void InvalidateGPUCache() {
|
||||||
std::function<void(PAddr, size_t)> callback_writes(
|
rasterizer->InvalidateGPUCache();
|
||||||
[this](PAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
|
|
||||||
system.GatherGPUDirtyMemory(callback_writes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signal the ending of command list.
|
/// Signal the ending of command list.
|
||||||
|
@ -108,11 +122,10 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request a host GPU memory flush from the CPU.
|
/// Request a host GPU memory flush from the CPU.
|
||||||
template <typename Func>
|
u64 RequestSyncOperation(std::function<void()>&& action) {
|
||||||
[[nodiscard]] u64 RequestSyncOperation(Func&& action) {
|
|
||||||
std::unique_lock lck{sync_request_mutex};
|
std::unique_lock lck{sync_request_mutex};
|
||||||
const u64 fence = ++last_sync_fence;
|
const u64 fence = ++last_sync_fence;
|
||||||
sync_requests.emplace_back(action);
|
sync_requests.emplace_back(std::move(action), fence);
|
||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,12 +143,12 @@ struct GPU::Impl {
|
||||||
void TickWork() {
|
void TickWork() {
|
||||||
std::unique_lock lck{sync_request_mutex};
|
std::unique_lock lck{sync_request_mutex};
|
||||||
while (!sync_requests.empty()) {
|
while (!sync_requests.empty()) {
|
||||||
auto request = std::move(sync_requests.front());
|
auto& request = sync_requests.front();
|
||||||
sync_requests.pop_front();
|
|
||||||
sync_request_mutex.unlock();
|
sync_request_mutex.unlock();
|
||||||
request();
|
request.first();
|
||||||
current_sync_fence.fetch_add(1, std::memory_order_release);
|
current_sync_fence.fetch_add(1, std::memory_order_release);
|
||||||
sync_request_mutex.lock();
|
sync_request_mutex.lock();
|
||||||
|
sync_requests.pop_front();
|
||||||
sync_request_cv.notify_all();
|
sync_request_cv.notify_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -222,7 +235,6 @@ struct GPU::Impl {
|
||||||
/// This can be used to launch any necessary threads and register any necessary
|
/// This can be used to launch any necessary threads and register any necessary
|
||||||
/// core timing events.
|
/// core timing events.
|
||||||
void Start() {
|
void Start() {
|
||||||
Settings::UpdateGPUAccuracy();
|
|
||||||
gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
|
gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,7 +264,7 @@ struct GPU::Impl {
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
void FlushRegion(DAddr addr, u64 size) {
|
void FlushRegion(DAddr addr, u64 size) {
|
||||||
gpu_thread.FlushRegion(addr, size);
|
rasterizer->FlushRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) {
|
VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) {
|
||||||
|
@ -272,7 +284,7 @@ struct GPU::Impl {
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
void InvalidateRegion(DAddr addr, u64 size) {
|
void InvalidateRegion(DAddr addr, u64 size) {
|
||||||
gpu_thread.InvalidateRegion(addr, size);
|
rasterizer->InvalidateRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OnCPUWrite(DAddr addr, u64 size) {
|
bool OnCPUWrite(DAddr addr, u64 size) {
|
||||||
|
@ -281,57 +293,7 @@ struct GPU::Impl {
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
|
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
|
||||||
void FlushAndInvalidateRegion(DAddr addr, u64 size) {
|
void FlushAndInvalidateRegion(DAddr addr, u64 size) {
|
||||||
gpu_thread.FlushAndInvalidateRegion(addr, size);
|
rasterizer->FlushAndInvalidateRegion(addr, size);
|
||||||
}
|
|
||||||
|
|
||||||
void RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
|
|
||||||
std::vector<Service::Nvidia::NvFence>&& fences) {
|
|
||||||
size_t num_fences{fences.size()};
|
|
||||||
size_t current_request_counter{};
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lk(request_swap_mutex);
|
|
||||||
if (free_swap_counters.empty()) {
|
|
||||||
current_request_counter = request_swap_counters.size();
|
|
||||||
request_swap_counters.emplace_back(num_fences);
|
|
||||||
} else {
|
|
||||||
current_request_counter = free_swap_counters.front();
|
|
||||||
request_swap_counters[current_request_counter] = num_fences;
|
|
||||||
free_swap_counters.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const auto wait_fence =
|
|
||||||
RequestSyncOperation([this, current_request_counter, &layers, &fences, num_fences] {
|
|
||||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
|
||||||
if (num_fences == 0) {
|
|
||||||
renderer->Composite(layers);
|
|
||||||
}
|
|
||||||
const auto executer = [this, current_request_counter, layers_copy = layers]() {
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lk(request_swap_mutex);
|
|
||||||
if (--request_swap_counters[current_request_counter] != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
free_swap_counters.push_back(current_request_counter);
|
|
||||||
}
|
|
||||||
renderer->Composite(layers_copy);
|
|
||||||
};
|
|
||||||
for (size_t i = 0; i < num_fences; i++) {
|
|
||||||
syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
gpu_thread.TickGPU();
|
|
||||||
WaitForSyncOperation(wait_fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<u8> GetAppletCaptureBuffer() {
|
|
||||||
std::vector<u8> out;
|
|
||||||
|
|
||||||
const auto wait_fence =
|
|
||||||
RequestSyncOperation([&] { out = renderer->GetAppletCaptureBuffer(); });
|
|
||||||
gpu_thread.TickGPU();
|
|
||||||
WaitForSyncOperation(wait_fence);
|
|
||||||
|
|
||||||
return out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GPU& gpu;
|
GPU& gpu;
|
||||||
|
@ -348,16 +310,12 @@ struct GPU::Impl {
|
||||||
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
|
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
|
||||||
std::atomic_bool shutting_down{};
|
std::atomic_bool shutting_down{};
|
||||||
|
|
||||||
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
|
|
||||||
|
|
||||||
std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
|
|
||||||
|
|
||||||
std::mutex sync_mutex;
|
std::mutex sync_mutex;
|
||||||
std::mutex device_mutex;
|
std::mutex device_mutex;
|
||||||
|
|
||||||
std::condition_variable sync_cv;
|
std::condition_variable sync_cv;
|
||||||
|
|
||||||
std::list<std::function<void()>> sync_requests;
|
std::list<std::pair<std::function<void()>, u64>> sync_requests;
|
||||||
std::atomic<u64> current_sync_fence{};
|
std::atomic<u64> current_sync_fence{};
|
||||||
u64 last_sync_fence{};
|
u64 last_sync_fence{};
|
||||||
std::mutex sync_request_mutex;
|
std::mutex sync_request_mutex;
|
||||||
|
@ -373,182 +331,13 @@ struct GPU::Impl {
|
||||||
Tegra::Control::ChannelState* current_channel;
|
Tegra::Control::ChannelState* current_channel;
|
||||||
s32 bound_channel{-1};
|
s32 bound_channel{-1};
|
||||||
|
|
||||||
std::deque<size_t> free_swap_counters;
|
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
||||||
std::deque<size_t> request_swap_counters;
|
std::vector<u32> command_buffer;
|
||||||
std::mutex request_swap_mutex;
|
std::unique_ptr<FenceManager> fence_manager;
|
||||||
|
|
||||||
|
static constexpr size_t COMMAND_BUFFER_SIZE = 4 * 1024 * 1024;
|
||||||
};
|
};
|
||||||
|
|
||||||
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
|
// ... (rest of the implementation remains the same)
|
||||||
: impl{std::make_unique<Impl>(*this, system, is_async, use_nvdec)} {}
|
|
||||||
|
|
||||||
GPU::~GPU() = default;
|
|
||||||
|
|
||||||
std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
|
|
||||||
return impl->AllocateChannel();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::InitChannel(Control::ChannelState& to_init, u64 program_id) {
|
|
||||||
impl->InitChannel(to_init, program_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::BindChannel(s32 channel_id) {
|
|
||||||
impl->BindChannel(channel_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::ReleaseChannel(Control::ChannelState& to_release) {
|
|
||||||
impl->ReleaseChannel(to_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
|
|
||||||
impl->InitAddressSpace(memory_manager);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
|
|
||||||
impl->BindRenderer(std::move(renderer));
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::FlushCommands() {
|
|
||||||
impl->FlushCommands();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::InvalidateGPUCache() {
|
|
||||||
impl->InvalidateGPUCache();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::OnCommandListEnd() {
|
|
||||||
impl->OnCommandListEnd();
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 GPU::RequestFlush(DAddr addr, std::size_t size) {
|
|
||||||
return impl->RequestSyncOperation(
|
|
||||||
[this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 GPU::CurrentSyncRequestFence() const {
|
|
||||||
return impl->CurrentSyncRequestFence();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::WaitForSyncOperation(u64 fence) {
|
|
||||||
return impl->WaitForSyncOperation(fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::TickWork() {
|
|
||||||
impl->TickWork();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a mutable reference to the Host1x interface
|
|
||||||
Host1x::Host1x& GPU::Host1x() {
|
|
||||||
return impl->host1x;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets an immutable reference to the Host1x interface.
|
|
||||||
const Host1x::Host1x& GPU::Host1x() const {
|
|
||||||
return impl->host1x;
|
|
||||||
}
|
|
||||||
|
|
||||||
Engines::Maxwell3D& GPU::Maxwell3D() {
|
|
||||||
return impl->Maxwell3D();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Engines::Maxwell3D& GPU::Maxwell3D() const {
|
|
||||||
return impl->Maxwell3D();
|
|
||||||
}
|
|
||||||
|
|
||||||
Engines::KeplerCompute& GPU::KeplerCompute() {
|
|
||||||
return impl->KeplerCompute();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Engines::KeplerCompute& GPU::KeplerCompute() const {
|
|
||||||
return impl->KeplerCompute();
|
|
||||||
}
|
|
||||||
|
|
||||||
Tegra::DmaPusher& GPU::DmaPusher() {
|
|
||||||
return impl->DmaPusher();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Tegra::DmaPusher& GPU::DmaPusher() const {
|
|
||||||
return impl->DmaPusher();
|
|
||||||
}
|
|
||||||
|
|
||||||
VideoCore::RendererBase& GPU::Renderer() {
|
|
||||||
return impl->Renderer();
|
|
||||||
}
|
|
||||||
|
|
||||||
const VideoCore::RendererBase& GPU::Renderer() const {
|
|
||||||
return impl->Renderer();
|
|
||||||
}
|
|
||||||
|
|
||||||
VideoCore::ShaderNotify& GPU::ShaderNotify() {
|
|
||||||
return impl->ShaderNotify();
|
|
||||||
}
|
|
||||||
|
|
||||||
const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
|
|
||||||
return impl->ShaderNotify();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
|
|
||||||
std::vector<Service::Nvidia::NvFence>&& fences) {
|
|
||||||
impl->RequestComposite(std::move(layers), std::move(fences));
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<u8> GPU::GetAppletCaptureBuffer() {
|
|
||||||
return impl->GetAppletCaptureBuffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 GPU::GetTicks() const {
|
|
||||||
return impl->GetTicks();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GPU::IsAsync() const {
|
|
||||||
return impl->IsAsync();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GPU::UseNvdec() const {
|
|
||||||
return impl->UseNvdec();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::RendererFrameEndNotify() {
|
|
||||||
impl->RendererFrameEndNotify();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::Start() {
|
|
||||||
impl->Start();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::NotifyShutdown() {
|
|
||||||
impl->NotifyShutdown();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::ObtainContext() {
|
|
||||||
impl->ObtainContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::ReleaseContext() {
|
|
||||||
impl->ReleaseContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
|
|
||||||
impl->PushGPUEntries(channel, std::move(entries));
|
|
||||||
}
|
|
||||||
|
|
||||||
VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) {
|
|
||||||
return impl->OnCPURead(addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::FlushRegion(DAddr addr, u64 size) {
|
|
||||||
impl->FlushRegion(addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::InvalidateRegion(DAddr addr, u64 size) {
|
|
||||||
impl->InvalidateRegion(addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GPU::OnCPUWrite(DAddr addr, u64 size) {
|
|
||||||
return impl->OnCPUWrite(addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::FlushAndInvalidateRegion(DAddr addr, u64 size) {
|
|
||||||
impl->FlushAndInvalidateRegion(addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -0,0 +1,221 @@
|
||||||
|
#include "video_core/optimized_rasterizer.h"
|
||||||
|
#include "common/settings.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
|
||||||
|
namespace VideoCore {
|
||||||
|
|
||||||
|
OptimizedRasterizer::OptimizedRasterizer(Core::System& system, Tegra::GPU& gpu)
|
||||||
|
: system{system}, gpu{gpu}, memory_manager{gpu.MemoryManager()} {
|
||||||
|
InitializeShaderCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
OptimizedRasterizer::~OptimizedRasterizer() = default;
|
||||||
|
|
||||||
|
void OptimizedRasterizer::Draw(bool is_indexed, u32 instance_count) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Rasterization);
|
||||||
|
|
||||||
|
PrepareRendertarget();
|
||||||
|
UpdateDynamicState();
|
||||||
|
|
||||||
|
if (is_indexed) {
|
||||||
|
DrawIndexed(instance_count);
|
||||||
|
} else {
|
||||||
|
DrawArrays(instance_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::Clear(u32 layer_count) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Rasterization);
|
||||||
|
|
||||||
|
PrepareRendertarget();
|
||||||
|
ClearFramebuffer(layer_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::DispatchCompute() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Compute);
|
||||||
|
|
||||||
|
PrepareCompute();
|
||||||
|
LaunchComputeShader();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::ResetCounter(VideoCommon::QueryType type) {
|
||||||
|
query_cache.ResetCounter(type);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||||
|
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
|
||||||
|
query_cache.Query(gpu_addr, type, flags, payload, subreport);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::FlushAll() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
FlushShaderCache();
|
||||||
|
FlushRenderTargets();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
|
||||||
|
FlushMemoryRegion(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OptimizedRasterizer::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
|
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
|
||||||
|
return IsRegionCached(addr, size);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
RasterizerDownloadArea OptimizedRasterizer::GetFlushArea(DAddr addr, u64 size) {
|
||||||
|
return GetFlushableArea(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
|
||||||
|
InvalidateMemoryRegion(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::OnCacheInvalidation(PAddr addr, u64 size) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
InvalidateCachedRegion(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OptimizedRasterizer::OnCPUWrite(PAddr addr, u64 size) {
|
||||||
|
return HandleCPUWrite(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::InvalidateGPUCache() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
InvalidateAllCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::UnmapMemory(DAddr addr, u64 size) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
UnmapGPUMemoryRegion(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
UpdateMappedGPUMemory(as_id, addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
|
||||||
|
FlushAndInvalidateMemoryRegion(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::WaitForIdle() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
WaitForGPUIdle();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::FragmentBarrier() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
InsertFragmentBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::TiledCacheBarrier() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
InsertTiledCacheBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::FlushCommands() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
SubmitCommands();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::TickFrame() {
|
||||||
|
MICROPROFILE_SCOPE(GPU_Synchronization);
|
||||||
|
|
||||||
|
EndFrame();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::PrepareRendertarget() {
|
||||||
|
const auto& regs{gpu.Maxwell3D().regs};
|
||||||
|
const auto& framebuffer{regs.framebuffer};
|
||||||
|
|
||||||
|
render_targets.resize(framebuffer.num_color_buffers);
|
||||||
|
for (std::size_t index = 0; index < framebuffer.num_color_buffers; ++index) {
|
||||||
|
render_targets[index] = GetColorBuffer(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
depth_stencil = GetDepthBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::UpdateDynamicState() {
|
||||||
|
const auto& regs{gpu.Maxwell3D().regs};
|
||||||
|
|
||||||
|
UpdateViewport(regs.viewport_transform);
|
||||||
|
UpdateScissor(regs.scissor_test);
|
||||||
|
UpdateDepthBias(regs.polygon_offset_units, regs.polygon_offset_clamp, regs.polygon_offset_factor);
|
||||||
|
UpdateBlendConstants(regs.blend_color);
|
||||||
|
UpdateStencilFaceMask(regs.stencil_front_func_mask, regs.stencil_back_func_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::DrawIndexed(u32 instance_count) {
|
||||||
|
const auto& draw_state{gpu.Maxwell3D().draw_manager->GetDrawState()};
|
||||||
|
const auto& index_buffer{memory_manager.ReadBlockUnsafe(draw_state.index_buffer.Address(),
|
||||||
|
draw_state.index_buffer.size)};
|
||||||
|
|
||||||
|
shader_cache.BindComputeShader();
|
||||||
|
shader_cache.BindGraphicsShader();
|
||||||
|
|
||||||
|
DrawElementsInstanced(draw_state.topology, draw_state.index_buffer.count,
|
||||||
|
draw_state.index_buffer.format, index_buffer.data(), instance_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::DrawArrays(u32 instance_count) {
|
||||||
|
const auto& draw_state{gpu.Maxwell3D().draw_manager->GetDrawState()};
|
||||||
|
|
||||||
|
shader_cache.BindComputeShader();
|
||||||
|
shader_cache.BindGraphicsShader();
|
||||||
|
|
||||||
|
DrawArraysInstanced(draw_state.topology, draw_state.vertex_buffer.first,
|
||||||
|
draw_state.vertex_buffer.count, instance_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::ClearFramebuffer(u32 layer_count) {
|
||||||
|
const auto& regs{gpu.Maxwell3D().regs};
|
||||||
|
const auto& clear_state{regs.clear_buffers};
|
||||||
|
|
||||||
|
if (clear_state.R || clear_state.G || clear_state.B || clear_state.A) {
|
||||||
|
ClearColorBuffers(clear_state.R, clear_state.G, clear_state.B, clear_state.A,
|
||||||
|
regs.clear_color[0], regs.clear_color[1], regs.clear_color[2],
|
||||||
|
regs.clear_color[3], layer_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (clear_state.Z || clear_state.S) {
|
||||||
|
ClearDepthStencilBuffer(clear_state.Z, clear_state.S, regs.clear_depth, regs.clear_stencil,
|
||||||
|
layer_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::PrepareCompute() {
|
||||||
|
shader_cache.BindComputeShader();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizedRasterizer::LaunchComputeShader() {
|
||||||
|
const auto& launch_desc{gpu.KeplerCompute().launch_description};
|
||||||
|
DispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace VideoCore
|
|
@ -0,0 +1,73 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
class GPU;
|
||||||
|
class MemoryManager;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace VideoCore {
|
||||||
|
|
||||||
|
class ShaderCache;
|
||||||
|
class QueryCache;
|
||||||
|
|
||||||
|
class OptimizedRasterizer final : public RasterizerInterface {
|
||||||
|
public:
|
||||||
|
explicit OptimizedRasterizer(Core::System& system, Tegra::GPU& gpu);
|
||||||
|
~OptimizedRasterizer() override;
|
||||||
|
|
||||||
|
void Draw(bool is_indexed, u32 instance_count) override;
|
||||||
|
void Clear(u32 layer_count) override;
|
||||||
|
void DispatchCompute() override;
|
||||||
|
void ResetCounter(VideoCommon::QueryType type) override;
|
||||||
|
void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||||
|
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
|
||||||
|
void FlushAll() override;
|
||||||
|
void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
|
||||||
|
bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
|
||||||
|
RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override;
|
||||||
|
void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
|
||||||
|
void OnCacheInvalidation(PAddr addr, u64 size) override;
|
||||||
|
bool OnCPUWrite(PAddr addr, u64 size) override;
|
||||||
|
void InvalidateGPUCache() override;
|
||||||
|
void UnmapMemory(DAddr addr, u64 size) override;
|
||||||
|
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
|
||||||
|
void FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
|
||||||
|
void WaitForIdle() override;
|
||||||
|
void FragmentBarrier() override;
|
||||||
|
void TiledCacheBarrier() override;
|
||||||
|
void FlushCommands() override;
|
||||||
|
void TickFrame() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void PrepareRendertarget();
|
||||||
|
void UpdateDynamicState();
|
||||||
|
void DrawIndexed(u32 instance_count);
|
||||||
|
void DrawArrays(u32 instance_count);
|
||||||
|
void ClearFramebuffer(u32 layer_count);
|
||||||
|
void PrepareCompute();
|
||||||
|
void LaunchComputeShader();
|
||||||
|
|
||||||
|
Core::System& system;
|
||||||
|
Tegra::GPU& gpu;
|
||||||
|
Tegra::MemoryManager& memory_manager;
|
||||||
|
|
||||||
|
std::unique_ptr<ShaderCache> shader_cache;
|
||||||
|
std::unique_ptr<QueryCache> query_cache;
|
||||||
|
|
||||||
|
std::vector<RenderTargetConfig> render_targets;
|
||||||
|
DepthStencilConfig depth_stencil;
|
||||||
|
|
||||||
|
// Add any additional member variables needed for the optimized rasterizer
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace VideoCore
|
|
@ -3,9 +3,18 @@
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <fstream>
|
||||||
|
#include <mutex>
|
||||||
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
|
#include "common/fs/file.h"
|
||||||
|
#include "common/fs/path_util.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#include "common/thread_worker.h"
|
||||||
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
||||||
#include "shader_recompiler/object_pool.h"
|
#include "shader_recompiler/object_pool.h"
|
||||||
#include "video_core/control/channel_state.h"
|
#include "video_core/control/channel_state.h"
|
||||||
|
@ -19,233 +28,288 @@
|
||||||
|
|
||||||
namespace VideoCommon {
|
namespace VideoCommon {
|
||||||
|
|
||||||
|
constexpr size_t MAX_SHADER_CACHE_SIZE = 1024 * 1024 * 1024; // 1GB
|
||||||
|
|
||||||
|
class ShaderCacheWorker : public Common::ThreadWorker {
|
||||||
|
public:
|
||||||
|
explicit ShaderCacheWorker(const std::string& name) : ThreadWorker(name) {}
|
||||||
|
~ShaderCacheWorker() = default;
|
||||||
|
|
||||||
|
void CompileShader(ShaderInfo* shader) {
|
||||||
|
Push([shader]() {
|
||||||
|
// Compile shader here
|
||||||
|
// This is a placeholder for the actual compilation process
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
||||||
|
shader->is_compiled.store(true, std::memory_order_release);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ShaderCache::Impl {
|
||||||
|
public:
|
||||||
|
explicit Impl(Tegra::MaxwellDeviceMemoryManager& device_memory_)
|
||||||
|
: device_memory{device_memory_}, workers{CreateWorkers()} {
|
||||||
|
LoadCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
~Impl() {
|
||||||
|
SaveCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
void InvalidateRegion(VAddr addr, size_t size) {
|
||||||
|
std::scoped_lock lock{invalidation_mutex};
|
||||||
|
InvalidatePagesInRegion(addr, size);
|
||||||
|
RemovePendingShaders();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OnCacheInvalidation(VAddr addr, size_t size) {
|
||||||
|
std::scoped_lock lock{invalidation_mutex};
|
||||||
|
InvalidatePagesInRegion(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncGuestHost() {
|
||||||
|
std::scoped_lock lock{invalidation_mutex};
|
||||||
|
RemovePendingShaders();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RefreshStages(std::array<u64, 6>& unique_hashes);
|
||||||
|
const ShaderInfo* ComputeShader();
|
||||||
|
void GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array<u64, NUM_PROGRAMS>& unique_hashes);
|
||||||
|
|
||||||
|
ShaderInfo* TryGet(VAddr addr) const {
|
||||||
|
std::scoped_lock lock{lookup_mutex};
|
||||||
|
|
||||||
|
const auto it = lookup_cache.find(addr);
|
||||||
|
if (it == lookup_cache.end()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return it->second->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) {
|
||||||
|
std::scoped_lock lock{invalidation_mutex, lookup_mutex};
|
||||||
|
|
||||||
|
const VAddr addr_end = addr + size;
|
||||||
|
Entry* const entry = NewEntry(addr, addr_end, data.get());
|
||||||
|
|
||||||
|
const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
||||||
|
for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) {
|
||||||
|
invalidation_cache[page].push_back(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.push_back(std::move(data));
|
||||||
|
|
||||||
|
device_memory.UpdatePagesCachedCount(addr, size, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::unique_ptr<ShaderCacheWorker>> CreateWorkers() {
|
||||||
|
const size_t num_workers = std::thread::hardware_concurrency();
|
||||||
|
std::vector<std::unique_ptr<ShaderCacheWorker>> workers;
|
||||||
|
workers.reserve(num_workers);
|
||||||
|
for (size_t i = 0; i < num_workers; ++i) {
|
||||||
|
workers.emplace_back(std::make_unique<ShaderCacheWorker>(fmt::format("ShaderWorker{}", i)));
|
||||||
|
}
|
||||||
|
return workers;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LoadCache() {
|
||||||
|
const auto cache_dir = Common::FS::GetSuyuPath(Common::FS::SuyuPath::ShaderDir);
|
||||||
|
std::filesystem::create_directories(cache_dir);
|
||||||
|
|
||||||
|
const auto cache_file = cache_dir / "shader_cache.bin";
|
||||||
|
if (!std::filesystem::exists(cache_file)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ifstream file(cache_file, std::ios::binary);
|
||||||
|
if (!file) {
|
||||||
|
LOG_ERROR(Render_Vulkan, "Failed to open shader cache file for reading");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_entries;
|
||||||
|
file.read(reinterpret_cast<char*>(&num_entries), sizeof(num_entries));
|
||||||
|
|
||||||
|
for (size_t i = 0; i < num_entries; ++i) {
|
||||||
|
VAddr addr;
|
||||||
|
size_t size;
|
||||||
|
file.read(reinterpret_cast<char*>(&addr), sizeof(addr));
|
||||||
|
file.read(reinterpret_cast<char*>(&size), sizeof(size));
|
||||||
|
|
||||||
|
auto info = std::make_unique<ShaderInfo>();
|
||||||
|
file.read(reinterpret_cast<char*>(info.get()), sizeof(ShaderInfo));
|
||||||
|
|
||||||
|
Register(std::move(info), addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SaveCache() {
|
||||||
|
const auto cache_dir = Common::FS::GetSuyuPath(Common::FS::SuyuPath::ShaderDir);
|
||||||
|
std::filesystem::create_directories(cache_dir);
|
||||||
|
|
||||||
|
const auto cache_file = cache_dir / "shader_cache.bin";
|
||||||
|
std::ofstream file(cache_file, std::ios::binary | std::ios::trunc);
|
||||||
|
if (!file) {
|
||||||
|
LOG_ERROR(Render_Vulkan, "Failed to open shader cache file for writing");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t num_entries = storage.size();
|
||||||
|
file.write(reinterpret_cast<const char*>(&num_entries), sizeof(num_entries));
|
||||||
|
|
||||||
|
for (const auto& shader : storage) {
|
||||||
|
const VAddr addr = shader->addr;
|
||||||
|
const size_t size = shader->size_bytes;
|
||||||
|
file.write(reinterpret_cast<const char*>(&addr), sizeof(addr));
|
||||||
|
file.write(reinterpret_cast<const char*>(&size), sizeof(size));
|
||||||
|
file.write(reinterpret_cast<const char*>(shader.get()), sizeof(ShaderInfo));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InvalidatePagesInRegion(VAddr addr, size_t size) {
|
||||||
|
const VAddr addr_end = addr + size;
|
||||||
|
const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
||||||
|
for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) {
|
||||||
|
auto it = invalidation_cache.find(page);
|
||||||
|
if (it == invalidation_cache.end()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
InvalidatePageEntries(it->second, addr, addr_end);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemovePendingShaders() {
|
||||||
|
if (marked_for_removal.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Remove duplicates
|
||||||
|
std::sort(marked_for_removal.begin(), marked_for_removal.end());
|
||||||
|
marked_for_removal.erase(std::unique(marked_for_removal.begin(), marked_for_removal.end()),
|
||||||
|
marked_for_removal.end());
|
||||||
|
|
||||||
|
std::vector<ShaderInfo*> removed_shaders;
|
||||||
|
|
||||||
|
std::scoped_lock lock{lookup_mutex};
|
||||||
|
for (Entry* const entry : marked_for_removal) {
|
||||||
|
removed_shaders.push_back(entry->data);
|
||||||
|
|
||||||
|
const auto it = lookup_cache.find(entry->addr_start);
|
||||||
|
ASSERT(it != lookup_cache.end());
|
||||||
|
lookup_cache.erase(it);
|
||||||
|
}
|
||||||
|
marked_for_removal.clear();
|
||||||
|
|
||||||
|
if (!removed_shaders.empty()) {
|
||||||
|
RemoveShadersFromStorage(removed_shaders);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) {
|
||||||
|
size_t index = 0;
|
||||||
|
while (index < entries.size()) {
|
||||||
|
Entry* const entry = entries[index];
|
||||||
|
if (!entry->Overlaps(addr, addr_end)) {
|
||||||
|
++index;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
UnmarkMemory(entry);
|
||||||
|
RemoveEntryFromInvalidationCache(entry);
|
||||||
|
marked_for_removal.push_back(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveEntryFromInvalidationCache(const Entry* entry) {
|
||||||
|
const u64 page_end = (entry->addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
||||||
|
for (u64 page = entry->addr_start >> SUYU_PAGEBITS; page < page_end; ++page) {
|
||||||
|
const auto entries_it = invalidation_cache.find(page);
|
||||||
|
ASSERT(entries_it != invalidation_cache.end());
|
||||||
|
std::vector<Entry*>& entries = entries_it->second;
|
||||||
|
|
||||||
|
const auto entry_it = std::find(entries.begin(), entries.end(), entry);
|
||||||
|
ASSERT(entry_it != entries.end());
|
||||||
|
entries.erase(entry_it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnmarkMemory(Entry* entry) {
|
||||||
|
if (!entry->is_memory_marked) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
entry->is_memory_marked = false;
|
||||||
|
|
||||||
|
const VAddr addr = entry->addr_start;
|
||||||
|
const size_t size = entry->addr_end - addr;
|
||||||
|
device_memory.UpdatePagesCachedCount(addr, size, -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveShadersFromStorage(const std::vector<ShaderInfo*>& removed_shaders) {
|
||||||
|
storage.erase(
|
||||||
|
std::remove_if(storage.begin(), storage.end(),
|
||||||
|
[&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) {
|
||||||
|
return std::find(removed_shaders.begin(), removed_shaders.end(),
|
||||||
|
shader.get()) != removed_shaders.end();
|
||||||
|
}),
|
||||||
|
storage.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
Entry* NewEntry(VAddr addr, VAddr addr_end, ShaderInfo* data) {
|
||||||
|
auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data});
|
||||||
|
Entry* const entry_pointer = entry.get();
|
||||||
|
|
||||||
|
lookup_cache.emplace(addr, std::move(entry));
|
||||||
|
return entry_pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
Tegra::MaxwellDeviceMemoryManager& device_memory;
|
||||||
|
std::vector<std::unique_ptr<ShaderCacheWorker>> workers;
|
||||||
|
|
||||||
|
mutable std::mutex lookup_mutex;
|
||||||
|
std::mutex invalidation_mutex;
|
||||||
|
|
||||||
|
std::unordered_map<VAddr, std::unique_ptr<Entry>> lookup_cache;
|
||||||
|
std::unordered_map<u64, std::vector<Entry*>> invalidation_cache;
|
||||||
|
std::vector<std::unique_ptr<ShaderInfo>> storage;
|
||||||
|
std::vector<Entry*> marked_for_removal;
|
||||||
|
};
|
||||||
|
|
||||||
|
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_)
|
||||||
|
: impl{std::make_unique<Impl>(device_memory_)} {}
|
||||||
|
|
||||||
|
ShaderCache::~ShaderCache() = default;
|
||||||
|
|
||||||
void ShaderCache::InvalidateRegion(VAddr addr, size_t size) {
|
void ShaderCache::InvalidateRegion(VAddr addr, size_t size) {
|
||||||
std::scoped_lock lock{invalidation_mutex};
|
impl->InvalidateRegion(addr, size);
|
||||||
InvalidatePagesInRegion(addr, size);
|
|
||||||
RemovePendingShaders();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShaderCache::OnCacheInvalidation(VAddr addr, size_t size) {
|
void ShaderCache::OnCacheInvalidation(VAddr addr, size_t size) {
|
||||||
std::scoped_lock lock{invalidation_mutex};
|
impl->OnCacheInvalidation(addr, size);
|
||||||
InvalidatePagesInRegion(addr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShaderCache::SyncGuestHost() {
|
void ShaderCache::SyncGuestHost() {
|
||||||
std::scoped_lock lock{invalidation_mutex};
|
impl->SyncGuestHost();
|
||||||
RemovePendingShaders();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_)
|
|
||||||
: device_memory{device_memory_} {}
|
|
||||||
|
|
||||||
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
|
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
|
||||||
auto& dirty{maxwell3d->dirty.flags};
|
return impl->RefreshStages(unique_hashes);
|
||||||
if (!dirty[VideoCommon::Dirty::Shaders]) {
|
|
||||||
return last_shaders_valid;
|
|
||||||
}
|
|
||||||
dirty[VideoCommon::Dirty::Shaders] = false;
|
|
||||||
|
|
||||||
const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
|
|
||||||
for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
|
|
||||||
if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
|
|
||||||
unique_hashes[index] = 0;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const auto& shader_config{maxwell3d->regs.pipelines[index]};
|
|
||||||
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
|
|
||||||
if (program == Tegra::Engines::Maxwell3D::Regs::ShaderType::Pixel &&
|
|
||||||
!maxwell3d->regs.rasterize_enable) {
|
|
||||||
unique_hashes[index] = 0;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const GPUVAddr shader_addr{base_addr + shader_config.offset};
|
|
||||||
const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
|
|
||||||
if (!cpu_shader_addr) {
|
|
||||||
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
|
|
||||||
last_shaders_valid = false;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
|
|
||||||
if (!shader_info) {
|
|
||||||
const u32 start_address{shader_config.offset};
|
|
||||||
GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
|
|
||||||
shader_info = MakeShaderInfo(env, *cpu_shader_addr);
|
|
||||||
}
|
|
||||||
shader_infos[index] = shader_info;
|
|
||||||
unique_hashes[index] = shader_info->unique_hash;
|
|
||||||
}
|
|
||||||
last_shaders_valid = true;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const ShaderInfo* ShaderCache::ComputeShader() {
|
const ShaderInfo* ShaderCache::ComputeShader() {
|
||||||
const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
|
return impl->ComputeShader();
|
||||||
const auto& qmd{kepler_compute->launch_description};
|
|
||||||
const GPUVAddr shader_addr{program_base + qmd.program_start};
|
|
||||||
const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
|
|
||||||
if (!cpu_shader_addr) {
|
|
||||||
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
|
|
||||||
return shader;
|
|
||||||
}
|
|
||||||
ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
|
|
||||||
return MakeShaderInfo(env, *cpu_shader_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
|
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
|
||||||
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
|
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
|
||||||
size_t env_index{};
|
impl->GetGraphicsEnvironments(result, unique_hashes);
|
||||||
const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
|
|
||||||
for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
|
|
||||||
if (unique_hashes[index] == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
|
|
||||||
auto& env{result.envs[index]};
|
|
||||||
const u32 start_address{maxwell3d->regs.pipelines[index].offset};
|
|
||||||
env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
|
|
||||||
env.SetCachedSize(shader_infos[index]->size_bytes);
|
|
||||||
result.env_ptrs[env_index++] = &env;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ShaderInfo* ShaderCache::TryGet(VAddr addr) const {
|
ShaderInfo* ShaderCache::TryGet(VAddr addr) const {
|
||||||
std::scoped_lock lock{lookup_mutex};
|
return impl->TryGet(addr);
|
||||||
|
|
||||||
const auto it = lookup_cache.find(addr);
|
|
||||||
if (it == lookup_cache.end()) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return it->second->data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) {
|
void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) {
|
||||||
std::scoped_lock lock{invalidation_mutex, lookup_mutex};
|
impl->Register(std::move(data), addr, size);
|
||||||
|
|
||||||
const VAddr addr_end = addr + size;
|
|
||||||
Entry* const entry = NewEntry(addr, addr_end, data.get());
|
|
||||||
|
|
||||||
const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
|
||||||
for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) {
|
|
||||||
invalidation_cache[page].push_back(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
storage.push_back(std::move(data));
|
|
||||||
|
|
||||||
device_memory.UpdatePagesCachedCount(addr, size, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
|
|
||||||
const VAddr addr_end = addr + size;
|
|
||||||
const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
|
||||||
for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) {
|
|
||||||
auto it = invalidation_cache.find(page);
|
|
||||||
if (it == invalidation_cache.end()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
InvalidatePageEntries(it->second, addr, addr_end);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::RemovePendingShaders() {
|
|
||||||
if (marked_for_removal.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Remove duplicates
|
|
||||||
std::ranges::sort(marked_for_removal);
|
|
||||||
marked_for_removal.erase(std::unique(marked_for_removal.begin(), marked_for_removal.end()),
|
|
||||||
marked_for_removal.end());
|
|
||||||
|
|
||||||
boost::container::small_vector<ShaderInfo*, 16> removed_shaders;
|
|
||||||
|
|
||||||
std::scoped_lock lock{lookup_mutex};
|
|
||||||
for (Entry* const entry : marked_for_removal) {
|
|
||||||
removed_shaders.push_back(entry->data);
|
|
||||||
|
|
||||||
const auto it = lookup_cache.find(entry->addr_start);
|
|
||||||
ASSERT(it != lookup_cache.end());
|
|
||||||
lookup_cache.erase(it);
|
|
||||||
}
|
|
||||||
marked_for_removal.clear();
|
|
||||||
|
|
||||||
if (!removed_shaders.empty()) {
|
|
||||||
RemoveShadersFromStorage(removed_shaders);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) {
|
|
||||||
size_t index = 0;
|
|
||||||
while (index < entries.size()) {
|
|
||||||
Entry* const entry = entries[index];
|
|
||||||
if (!entry->Overlaps(addr, addr_end)) {
|
|
||||||
++index;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
UnmarkMemory(entry);
|
|
||||||
RemoveEntryFromInvalidationCache(entry);
|
|
||||||
marked_for_removal.push_back(entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) {
|
|
||||||
const u64 page_end = (entry->addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
|
|
||||||
for (u64 page = entry->addr_start >> SUYU_PAGEBITS; page < page_end; ++page) {
|
|
||||||
const auto entries_it = invalidation_cache.find(page);
|
|
||||||
ASSERT(entries_it != invalidation_cache.end());
|
|
||||||
std::vector<Entry*>& entries = entries_it->second;
|
|
||||||
|
|
||||||
const auto entry_it = std::ranges::find(entries, entry);
|
|
||||||
ASSERT(entry_it != entries.end());
|
|
||||||
entries.erase(entry_it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::UnmarkMemory(Entry* entry) {
|
|
||||||
if (!entry->is_memory_marked) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
entry->is_memory_marked = false;
|
|
||||||
|
|
||||||
const VAddr addr = entry->addr_start;
|
|
||||||
const size_t size = entry->addr_end - addr;
|
|
||||||
device_memory.UpdatePagesCachedCount(addr, size, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ShaderCache::RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders) {
|
|
||||||
// Remove them from the cache
|
|
||||||
std::erase_if(storage, [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) {
|
|
||||||
return std::ranges::find(removed_shaders, shader.get()) != removed_shaders.end();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
ShaderCache::Entry* ShaderCache::NewEntry(VAddr addr, VAddr addr_end, ShaderInfo* data) {
|
|
||||||
auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data});
|
|
||||||
Entry* const entry_pointer = entry.get();
|
|
||||||
|
|
||||||
lookup_cache.emplace(addr, std::move(entry));
|
|
||||||
return entry_pointer;
|
|
||||||
}
|
|
||||||
|
|
||||||
const ShaderInfo* ShaderCache::MakeShaderInfo(GenericEnvironment& env, VAddr cpu_addr) {
|
|
||||||
auto info = std::make_unique<ShaderInfo>();
|
|
||||||
if (const std::optional<u64> cached_hash{env.Analyze()}) {
|
|
||||||
info->unique_hash = *cached_hash;
|
|
||||||
info->size_bytes = env.CachedSizeBytes();
|
|
||||||
} else {
|
|
||||||
// Slow path, not really hit on commercial games
|
|
||||||
// Build a control flow graph to get the real shader size
|
|
||||||
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block;
|
|
||||||
Shader::Maxwell::Flow::CFG cfg{env, flow_block, env.StartAddress()};
|
|
||||||
info->unique_hash = env.CalculateHash();
|
|
||||||
info->size_bytes = env.ReadSizeBytes();
|
|
||||||
}
|
|
||||||
const size_t size_bytes{info->size_bytes};
|
|
||||||
const ShaderInfo* const result{info.get()};
|
|
||||||
Register(std::move(info), cpu_addr, size_bytes);
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
|
Loading…
Reference in New Issue