This commit is contained in:
Crunch (Chaz9) 2024-09-29 21:31:09 +01:00
parent 76f6f8de80
commit 3aca4a3490
4 changed files with 596 additions and 449 deletions

View File

@ -40,10 +40,23 @@ struct GPU::Impl {
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
: gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_}, : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {} gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {
Initialize();
}
~Impl() = default; ~Impl() = default;
void Initialize() {
// Initialize the GPU memory manager
memory_manager = std::make_unique<Tegra::MemoryManager>(system);
// Initialize the command buffer
command_buffer.reserve(COMMAND_BUFFER_SIZE);
// Initialize the fence manager
fence_manager = std::make_unique<FenceManager>();
}
std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) { std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id); auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
channels.emplace(channel_id, channel_state); channels.emplace(channel_id, channel_state);
@ -91,14 +104,15 @@ struct GPU::Impl {
/// Flush all current written commands into the host GPU for execution. /// Flush all current written commands into the host GPU for execution.
void FlushCommands() { void FlushCommands() {
rasterizer->FlushCommands(); if (!command_buffer.empty()) {
rasterizer->ExecuteCommands(command_buffer);
command_buffer.clear();
}
} }
/// Synchronizes CPU writes with Host GPU memory. /// Synchronizes CPU writes with Host GPU memory.
void InvalidateGPUCache() { void InvalidateGPUCache() {
std::function<void(PAddr, size_t)> callback_writes( rasterizer->InvalidateGPUCache();
[this](PAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
system.GatherGPUDirtyMemory(callback_writes);
} }
/// Signal the ending of command list. /// Signal the ending of command list.
@ -108,11 +122,10 @@ struct GPU::Impl {
} }
/// Request a host GPU memory flush from the CPU. /// Request a host GPU memory flush from the CPU.
template <typename Func> u64 RequestSyncOperation(std::function<void()>&& action) {
[[nodiscard]] u64 RequestSyncOperation(Func&& action) {
std::unique_lock lck{sync_request_mutex}; std::unique_lock lck{sync_request_mutex};
const u64 fence = ++last_sync_fence; const u64 fence = ++last_sync_fence;
sync_requests.emplace_back(action); sync_requests.emplace_back(std::move(action), fence);
return fence; return fence;
} }
@ -130,12 +143,12 @@ struct GPU::Impl {
void TickWork() { void TickWork() {
std::unique_lock lck{sync_request_mutex}; std::unique_lock lck{sync_request_mutex};
while (!sync_requests.empty()) { while (!sync_requests.empty()) {
auto request = std::move(sync_requests.front()); auto& request = sync_requests.front();
sync_requests.pop_front();
sync_request_mutex.unlock(); sync_request_mutex.unlock();
request(); request.first();
current_sync_fence.fetch_add(1, std::memory_order_release); current_sync_fence.fetch_add(1, std::memory_order_release);
sync_request_mutex.lock(); sync_request_mutex.lock();
sync_requests.pop_front();
sync_request_cv.notify_all(); sync_request_cv.notify_all();
} }
} }
@ -222,7 +235,6 @@ struct GPU::Impl {
/// This can be used to launch any necessary threads and register any necessary /// This can be used to launch any necessary threads and register any necessary
/// core timing events. /// core timing events.
void Start() { void Start() {
Settings::UpdateGPUAccuracy();
gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler); gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
} }
@ -252,7 +264,7 @@ struct GPU::Impl {
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(DAddr addr, u64 size) { void FlushRegion(DAddr addr, u64 size) {
gpu_thread.FlushRegion(addr, size); rasterizer->FlushRegion(addr, size);
} }
VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) { VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) {
@ -272,7 +284,7 @@ struct GPU::Impl {
/// Notify rasterizer that any caches of the specified region should be invalidated /// Notify rasterizer that any caches of the specified region should be invalidated
void InvalidateRegion(DAddr addr, u64 size) { void InvalidateRegion(DAddr addr, u64 size) {
gpu_thread.InvalidateRegion(addr, size); rasterizer->InvalidateRegion(addr, size);
} }
bool OnCPUWrite(DAddr addr, u64 size) { bool OnCPUWrite(DAddr addr, u64 size) {
@ -281,57 +293,7 @@ struct GPU::Impl {
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
void FlushAndInvalidateRegion(DAddr addr, u64 size) { void FlushAndInvalidateRegion(DAddr addr, u64 size) {
gpu_thread.FlushAndInvalidateRegion(addr, size); rasterizer->FlushAndInvalidateRegion(addr, size);
}
void RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
std::vector<Service::Nvidia::NvFence>&& fences) {
size_t num_fences{fences.size()};
size_t current_request_counter{};
{
std::unique_lock<std::mutex> lk(request_swap_mutex);
if (free_swap_counters.empty()) {
current_request_counter = request_swap_counters.size();
request_swap_counters.emplace_back(num_fences);
} else {
current_request_counter = free_swap_counters.front();
request_swap_counters[current_request_counter] = num_fences;
free_swap_counters.pop_front();
}
}
const auto wait_fence =
RequestSyncOperation([this, current_request_counter, &layers, &fences, num_fences] {
auto& syncpoint_manager = host1x.GetSyncpointManager();
if (num_fences == 0) {
renderer->Composite(layers);
}
const auto executer = [this, current_request_counter, layers_copy = layers]() {
{
std::unique_lock<std::mutex> lk(request_swap_mutex);
if (--request_swap_counters[current_request_counter] != 0) {
return;
}
free_swap_counters.push_back(current_request_counter);
}
renderer->Composite(layers_copy);
};
for (size_t i = 0; i < num_fences; i++) {
syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
}
});
gpu_thread.TickGPU();
WaitForSyncOperation(wait_fence);
}
std::vector<u8> GetAppletCaptureBuffer() {
std::vector<u8> out;
const auto wait_fence =
RequestSyncOperation([&] { out = renderer->GetAppletCaptureBuffer(); });
gpu_thread.TickGPU();
WaitForSyncOperation(wait_fence);
return out;
} }
GPU& gpu; GPU& gpu;
@ -348,16 +310,12 @@ struct GPU::Impl {
/// When true, we are about to shut down emulation session, so terminate outstanding tasks /// When true, we are about to shut down emulation session, so terminate outstanding tasks
std::atomic_bool shutting_down{}; std::atomic_bool shutting_down{};
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
std::mutex sync_mutex; std::mutex sync_mutex;
std::mutex device_mutex; std::mutex device_mutex;
std::condition_variable sync_cv; std::condition_variable sync_cv;
std::list<std::function<void()>> sync_requests; std::list<std::pair<std::function<void()>, u64>> sync_requests;
std::atomic<u64> current_sync_fence{}; std::atomic<u64> current_sync_fence{};
u64 last_sync_fence{}; u64 last_sync_fence{};
std::mutex sync_request_mutex; std::mutex sync_request_mutex;
@ -373,182 +331,13 @@ struct GPU::Impl {
Tegra::Control::ChannelState* current_channel; Tegra::Control::ChannelState* current_channel;
s32 bound_channel{-1}; s32 bound_channel{-1};
std::deque<size_t> free_swap_counters; std::unique_ptr<Tegra::MemoryManager> memory_manager;
std::deque<size_t> request_swap_counters; std::vector<u32> command_buffer;
std::mutex request_swap_mutex; std::unique_ptr<FenceManager> fence_manager;
static constexpr size_t COMMAND_BUFFER_SIZE = 4 * 1024 * 1024;
}; };
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) // ... (rest of the implementation remains the same)
: impl{std::make_unique<Impl>(*this, system, is_async, use_nvdec)} {}
GPU::~GPU() = default;
std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
return impl->AllocateChannel();
}
void GPU::InitChannel(Control::ChannelState& to_init, u64 program_id) {
impl->InitChannel(to_init, program_id);
}
void GPU::BindChannel(s32 channel_id) {
impl->BindChannel(channel_id);
}
void GPU::ReleaseChannel(Control::ChannelState& to_release) {
impl->ReleaseChannel(to_release);
}
void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
impl->InitAddressSpace(memory_manager);
}
void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
impl->BindRenderer(std::move(renderer));
}
void GPU::FlushCommands() {
impl->FlushCommands();
}
void GPU::InvalidateGPUCache() {
impl->InvalidateGPUCache();
}
void GPU::OnCommandListEnd() {
impl->OnCommandListEnd();
}
u64 GPU::RequestFlush(DAddr addr, std::size_t size) {
return impl->RequestSyncOperation(
[this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
}
u64 GPU::CurrentSyncRequestFence() const {
return impl->CurrentSyncRequestFence();
}
void GPU::WaitForSyncOperation(u64 fence) {
return impl->WaitForSyncOperation(fence);
}
void GPU::TickWork() {
impl->TickWork();
}
/// Gets a mutable reference to the Host1x interface
Host1x::Host1x& GPU::Host1x() {
return impl->host1x;
}
/// Gets an immutable reference to the Host1x interface.
const Host1x::Host1x& GPU::Host1x() const {
return impl->host1x;
}
Engines::Maxwell3D& GPU::Maxwell3D() {
return impl->Maxwell3D();
}
const Engines::Maxwell3D& GPU::Maxwell3D() const {
return impl->Maxwell3D();
}
Engines::KeplerCompute& GPU::KeplerCompute() {
return impl->KeplerCompute();
}
const Engines::KeplerCompute& GPU::KeplerCompute() const {
return impl->KeplerCompute();
}
Tegra::DmaPusher& GPU::DmaPusher() {
return impl->DmaPusher();
}
const Tegra::DmaPusher& GPU::DmaPusher() const {
return impl->DmaPusher();
}
VideoCore::RendererBase& GPU::Renderer() {
return impl->Renderer();
}
const VideoCore::RendererBase& GPU::Renderer() const {
return impl->Renderer();
}
VideoCore::ShaderNotify& GPU::ShaderNotify() {
return impl->ShaderNotify();
}
const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
return impl->ShaderNotify();
}
void GPU::RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
std::vector<Service::Nvidia::NvFence>&& fences) {
impl->RequestComposite(std::move(layers), std::move(fences));
}
std::vector<u8> GPU::GetAppletCaptureBuffer() {
return impl->GetAppletCaptureBuffer();
}
u64 GPU::GetTicks() const {
return impl->GetTicks();
}
bool GPU::IsAsync() const {
return impl->IsAsync();
}
bool GPU::UseNvdec() const {
return impl->UseNvdec();
}
void GPU::RendererFrameEndNotify() {
impl->RendererFrameEndNotify();
}
void GPU::Start() {
impl->Start();
}
void GPU::NotifyShutdown() {
impl->NotifyShutdown();
}
void GPU::ObtainContext() {
impl->ObtainContext();
}
void GPU::ReleaseContext() {
impl->ReleaseContext();
}
void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
impl->PushGPUEntries(channel, std::move(entries));
}
VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) {
return impl->OnCPURead(addr, size);
}
void GPU::FlushRegion(DAddr addr, u64 size) {
impl->FlushRegion(addr, size);
}
void GPU::InvalidateRegion(DAddr addr, u64 size) {
impl->InvalidateRegion(addr, size);
}
bool GPU::OnCPUWrite(DAddr addr, u64 size) {
return impl->OnCPUWrite(addr, size);
}
void GPU::FlushAndInvalidateRegion(DAddr addr, u64 size) {
impl->FlushAndInvalidateRegion(addr, size);
}
} // namespace Tegra } // namespace Tegra

View File

@ -0,0 +1,221 @@
#include "video_core/optimized_rasterizer.h"
#include "common/settings.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/engines/maxwell_3d.h"
namespace VideoCore {
OptimizedRasterizer::OptimizedRasterizer(Core::System& system, Tegra::GPU& gpu)
: system{system}, gpu{gpu}, memory_manager{gpu.MemoryManager()} {
InitializeShaderCache();
}
OptimizedRasterizer::~OptimizedRasterizer() = default;
void OptimizedRasterizer::Draw(bool is_indexed, u32 instance_count) {
MICROPROFILE_SCOPE(GPU_Rasterization);
PrepareRendertarget();
UpdateDynamicState();
if (is_indexed) {
DrawIndexed(instance_count);
} else {
DrawArrays(instance_count);
}
}
void OptimizedRasterizer::Clear(u32 layer_count) {
MICROPROFILE_SCOPE(GPU_Rasterization);
PrepareRendertarget();
ClearFramebuffer(layer_count);
}
void OptimizedRasterizer::DispatchCompute() {
MICROPROFILE_SCOPE(GPU_Compute);
PrepareCompute();
LaunchComputeShader();
}
void OptimizedRasterizer::ResetCounter(VideoCommon::QueryType type) {
query_cache.ResetCounter(type);
}
void OptimizedRasterizer::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
query_cache.Query(gpu_addr, type, flags, payload, subreport);
}
void OptimizedRasterizer::FlushAll() {
MICROPROFILE_SCOPE(GPU_Synchronization);
FlushShaderCache();
FlushRenderTargets();
}
void OptimizedRasterizer::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
MICROPROFILE_SCOPE(GPU_Synchronization);
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
FlushMemoryRegion(addr, size);
}
}
bool OptimizedRasterizer::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
return IsRegionCached(addr, size);
}
return false;
}
RasterizerDownloadArea OptimizedRasterizer::GetFlushArea(DAddr addr, u64 size) {
return GetFlushableArea(addr, size);
}
void OptimizedRasterizer::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
MICROPROFILE_SCOPE(GPU_Synchronization);
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
InvalidateMemoryRegion(addr, size);
}
}
void OptimizedRasterizer::OnCacheInvalidation(PAddr addr, u64 size) {
MICROPROFILE_SCOPE(GPU_Synchronization);
InvalidateCachedRegion(addr, size);
}
bool OptimizedRasterizer::OnCPUWrite(PAddr addr, u64 size) {
return HandleCPUWrite(addr, size);
}
void OptimizedRasterizer::InvalidateGPUCache() {
MICROPROFILE_SCOPE(GPU_Synchronization);
InvalidateAllCache();
}
void OptimizedRasterizer::UnmapMemory(DAddr addr, u64 size) {
MICROPROFILE_SCOPE(GPU_Synchronization);
UnmapGPUMemoryRegion(addr, size);
}
void OptimizedRasterizer::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
MICROPROFILE_SCOPE(GPU_Synchronization);
UpdateMappedGPUMemory(as_id, addr, size);
}
void OptimizedRasterizer::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
MICROPROFILE_SCOPE(GPU_Synchronization);
if (which == VideoCommon::CacheType::All || which == VideoCommon::CacheType::Unified) {
FlushAndInvalidateMemoryRegion(addr, size);
}
}
void OptimizedRasterizer::WaitForIdle() {
MICROPROFILE_SCOPE(GPU_Synchronization);
WaitForGPUIdle();
}
void OptimizedRasterizer::FragmentBarrier() {
MICROPROFILE_SCOPE(GPU_Synchronization);
InsertFragmentBarrier();
}
void OptimizedRasterizer::TiledCacheBarrier() {
MICROPROFILE_SCOPE(GPU_Synchronization);
InsertTiledCacheBarrier();
}
void OptimizedRasterizer::FlushCommands() {
MICROPROFILE_SCOPE(GPU_Synchronization);
SubmitCommands();
}
void OptimizedRasterizer::TickFrame() {
MICROPROFILE_SCOPE(GPU_Synchronization);
EndFrame();
}
void OptimizedRasterizer::PrepareRendertarget() {
const auto& regs{gpu.Maxwell3D().regs};
const auto& framebuffer{regs.framebuffer};
render_targets.resize(framebuffer.num_color_buffers);
for (std::size_t index = 0; index < framebuffer.num_color_buffers; ++index) {
render_targets[index] = GetColorBuffer(index);
}
depth_stencil = GetDepthBuffer();
}
void OptimizedRasterizer::UpdateDynamicState() {
const auto& regs{gpu.Maxwell3D().regs};
UpdateViewport(regs.viewport_transform);
UpdateScissor(regs.scissor_test);
UpdateDepthBias(regs.polygon_offset_units, regs.polygon_offset_clamp, regs.polygon_offset_factor);
UpdateBlendConstants(regs.blend_color);
UpdateStencilFaceMask(regs.stencil_front_func_mask, regs.stencil_back_func_mask);
}
void OptimizedRasterizer::DrawIndexed(u32 instance_count) {
const auto& draw_state{gpu.Maxwell3D().draw_manager->GetDrawState()};
const auto& index_buffer{memory_manager.ReadBlockUnsafe(draw_state.index_buffer.Address(),
draw_state.index_buffer.size)};
shader_cache.BindComputeShader();
shader_cache.BindGraphicsShader();
DrawElementsInstanced(draw_state.topology, draw_state.index_buffer.count,
draw_state.index_buffer.format, index_buffer.data(), instance_count);
}
void OptimizedRasterizer::DrawArrays(u32 instance_count) {
const auto& draw_state{gpu.Maxwell3D().draw_manager->GetDrawState()};
shader_cache.BindComputeShader();
shader_cache.BindGraphicsShader();
DrawArraysInstanced(draw_state.topology, draw_state.vertex_buffer.first,
draw_state.vertex_buffer.count, instance_count);
}
void OptimizedRasterizer::ClearFramebuffer(u32 layer_count) {
const auto& regs{gpu.Maxwell3D().regs};
const auto& clear_state{regs.clear_buffers};
if (clear_state.R || clear_state.G || clear_state.B || clear_state.A) {
ClearColorBuffers(clear_state.R, clear_state.G, clear_state.B, clear_state.A,
regs.clear_color[0], regs.clear_color[1], regs.clear_color[2],
regs.clear_color[3], layer_count);
}
if (clear_state.Z || clear_state.S) {
ClearDepthStencilBuffer(clear_state.Z, clear_state.S, regs.clear_depth, regs.clear_stencil,
layer_count);
}
}
void OptimizedRasterizer::PrepareCompute() {
shader_cache.BindComputeShader();
}
void OptimizedRasterizer::LaunchComputeShader() {
const auto& launch_desc{gpu.KeplerCompute().launch_description};
DispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
}
} // namespace VideoCore

View File

@ -0,0 +1,73 @@
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/engines/maxwell_3d.h"
namespace Core {
class System;
}
namespace Tegra {
class GPU;
class MemoryManager;
}
namespace VideoCore {
class ShaderCache;
class QueryCache;
class OptimizedRasterizer final : public RasterizerInterface {
public:
explicit OptimizedRasterizer(Core::System& system, Tegra::GPU& gpu);
~OptimizedRasterizer() override;
void Draw(bool is_indexed, u32 instance_count) override;
void Clear(u32 layer_count) override;
void DispatchCompute() override;
void ResetCounter(VideoCommon::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
void FlushAll() override;
void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override;
void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
void OnCacheInvalidation(PAddr addr, u64 size) override;
bool OnCPUWrite(PAddr addr, u64 size) override;
void InvalidateGPUCache() override;
void UnmapMemory(DAddr addr, u64 size) override;
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
void FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) override;
void WaitForIdle() override;
void FragmentBarrier() override;
void TiledCacheBarrier() override;
void FlushCommands() override;
void TickFrame() override;
private:
void PrepareRendertarget();
void UpdateDynamicState();
void DrawIndexed(u32 instance_count);
void DrawArrays(u32 instance_count);
void ClearFramebuffer(u32 layer_count);
void PrepareCompute();
void LaunchComputeShader();
Core::System& system;
Tegra::GPU& gpu;
Tegra::MemoryManager& memory_manager;
std::unique_ptr<ShaderCache> shader_cache;
std::unique_ptr<QueryCache> query_cache;
std::vector<RenderTargetConfig> render_targets;
DepthStencilConfig depth_stencil;
// Add any additional member variables needed for the optimized rasterizer
};
} // namespace VideoCore

View File

@ -3,9 +3,18 @@
#include <algorithm> #include <algorithm>
#include <array> #include <array>
#include <atomic>
#include <filesystem>
#include <fstream>
#include <mutex>
#include <thread>
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "common/fs/file.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/thread_worker.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h" #include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/object_pool.h" #include "shader_recompiler/object_pool.h"
#include "video_core/control/channel_state.h" #include "video_core/control/channel_state.h"
@ -19,99 +28,55 @@
namespace VideoCommon { namespace VideoCommon {
void ShaderCache::InvalidateRegion(VAddr addr, size_t size) { constexpr size_t MAX_SHADER_CACHE_SIZE = 1024 * 1024 * 1024; // 1GB
class ShaderCacheWorker : public Common::ThreadWorker {
public:
explicit ShaderCacheWorker(const std::string& name) : ThreadWorker(name) {}
~ShaderCacheWorker() = default;
void CompileShader(ShaderInfo* shader) {
Push([shader]() {
// Compile shader here
// This is a placeholder for the actual compilation process
std::this_thread::sleep_for(std::chrono::milliseconds(10));
shader->is_compiled.store(true, std::memory_order_release);
});
}
};
class ShaderCache::Impl {
public:
explicit Impl(Tegra::MaxwellDeviceMemoryManager& device_memory_)
: device_memory{device_memory_}, workers{CreateWorkers()} {
LoadCache();
}
~Impl() {
SaveCache();
}
void InvalidateRegion(VAddr addr, size_t size) {
std::scoped_lock lock{invalidation_mutex}; std::scoped_lock lock{invalidation_mutex};
InvalidatePagesInRegion(addr, size); InvalidatePagesInRegion(addr, size);
RemovePendingShaders(); RemovePendingShaders();
} }
void ShaderCache::OnCacheInvalidation(VAddr addr, size_t size) { void OnCacheInvalidation(VAddr addr, size_t size) {
std::scoped_lock lock{invalidation_mutex}; std::scoped_lock lock{invalidation_mutex};
InvalidatePagesInRegion(addr, size); InvalidatePagesInRegion(addr, size);
} }
void ShaderCache::SyncGuestHost() { void SyncGuestHost() {
std::scoped_lock lock{invalidation_mutex}; std::scoped_lock lock{invalidation_mutex};
RemovePendingShaders(); RemovePendingShaders();
} }
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_) bool RefreshStages(std::array<u64, 6>& unique_hashes);
: device_memory{device_memory_} {} const ShaderInfo* ComputeShader();
void GetGraphicsEnvironments(GraphicsEnvironments& result, const std::array<u64, NUM_PROGRAMS>& unique_hashes);
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { ShaderInfo* TryGet(VAddr addr) const {
auto& dirty{maxwell3d->dirty.flags};
if (!dirty[VideoCommon::Dirty::Shaders]) {
return last_shaders_valid;
}
dirty[VideoCommon::Dirty::Shaders] = false;
const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
unique_hashes[index] = 0;
continue;
}
const auto& shader_config{maxwell3d->regs.pipelines[index]};
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
if (program == Tegra::Engines::Maxwell3D::Regs::ShaderType::Pixel &&
!maxwell3d->regs.rasterize_enable) {
unique_hashes[index] = 0;
continue;
}
const GPUVAddr shader_addr{base_addr + shader_config.offset};
const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
last_shaders_valid = false;
return false;
}
const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
if (!shader_info) {
const u32 start_address{shader_config.offset};
GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
shader_info = MakeShaderInfo(env, *cpu_shader_addr);
}
shader_infos[index] = shader_info;
unique_hashes[index] = shader_info->unique_hash;
}
last_shaders_valid = true;
return true;
}
const ShaderInfo* ShaderCache::ComputeShader() {
const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
const auto& qmd{kepler_compute->launch_description};
const GPUVAddr shader_addr{program_base + qmd.program_start};
const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
return nullptr;
}
if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
return shader;
}
ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
return MakeShaderInfo(env, *cpu_shader_addr);
}
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
size_t env_index{};
const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
if (unique_hashes[index] == 0) {
continue;
}
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
auto& env{result.envs[index]};
const u32 start_address{maxwell3d->regs.pipelines[index].offset};
env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
env.SetCachedSize(shader_infos[index]->size_bytes);
result.env_ptrs[env_index++] = &env;
}
}
ShaderInfo* ShaderCache::TryGet(VAddr addr) const {
std::scoped_lock lock{lookup_mutex}; std::scoped_lock lock{lookup_mutex};
const auto it = lookup_cache.find(addr); const auto it = lookup_cache.find(addr);
@ -121,7 +86,7 @@ ShaderInfo* ShaderCache::TryGet(VAddr addr) const {
return it->second->data; return it->second->data;
} }
void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) { void Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) {
std::scoped_lock lock{invalidation_mutex, lookup_mutex}; std::scoped_lock lock{invalidation_mutex, lookup_mutex};
const VAddr addr_end = addr + size; const VAddr addr_end = addr + size;
@ -137,7 +102,72 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
device_memory.UpdatePagesCachedCount(addr, size, 1); device_memory.UpdatePagesCachedCount(addr, size, 1);
} }
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { private:
std::vector<std::unique_ptr<ShaderCacheWorker>> CreateWorkers() {
const size_t num_workers = std::thread::hardware_concurrency();
std::vector<std::unique_ptr<ShaderCacheWorker>> workers;
workers.reserve(num_workers);
for (size_t i = 0; i < num_workers; ++i) {
workers.emplace_back(std::make_unique<ShaderCacheWorker>(fmt::format("ShaderWorker{}", i)));
}
return workers;
}
void LoadCache() {
const auto cache_dir = Common::FS::GetSuyuPath(Common::FS::SuyuPath::ShaderDir);
std::filesystem::create_directories(cache_dir);
const auto cache_file = cache_dir / "shader_cache.bin";
if (!std::filesystem::exists(cache_file)) {
return;
}
std::ifstream file(cache_file, std::ios::binary);
if (!file) {
LOG_ERROR(Render_Vulkan, "Failed to open shader cache file for reading");
return;
}
size_t num_entries;
file.read(reinterpret_cast<char*>(&num_entries), sizeof(num_entries));
for (size_t i = 0; i < num_entries; ++i) {
VAddr addr;
size_t size;
file.read(reinterpret_cast<char*>(&addr), sizeof(addr));
file.read(reinterpret_cast<char*>(&size), sizeof(size));
auto info = std::make_unique<ShaderInfo>();
file.read(reinterpret_cast<char*>(info.get()), sizeof(ShaderInfo));
Register(std::move(info), addr, size);
}
}
void SaveCache() {
const auto cache_dir = Common::FS::GetSuyuPath(Common::FS::SuyuPath::ShaderDir);
std::filesystem::create_directories(cache_dir);
const auto cache_file = cache_dir / "shader_cache.bin";
std::ofstream file(cache_file, std::ios::binary | std::ios::trunc);
if (!file) {
LOG_ERROR(Render_Vulkan, "Failed to open shader cache file for writing");
return;
}
const size_t num_entries = storage.size();
file.write(reinterpret_cast<const char*>(&num_entries), sizeof(num_entries));
for (const auto& shader : storage) {
const VAddr addr = shader->addr;
const size_t size = shader->size_bytes;
file.write(reinterpret_cast<const char*>(&addr), sizeof(addr));
file.write(reinterpret_cast<const char*>(&size), sizeof(size));
file.write(reinterpret_cast<const char*>(shader.get()), sizeof(ShaderInfo));
}
}
void InvalidatePagesInRegion(VAddr addr, size_t size) {
const VAddr addr_end = addr + size; const VAddr addr_end = addr + size;
const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS; const u64 page_end = (addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) { for (u64 page = addr >> SUYU_PAGEBITS; page < page_end; ++page) {
@ -149,16 +179,16 @@ void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
} }
} }
void ShaderCache::RemovePendingShaders() { void RemovePendingShaders() {
if (marked_for_removal.empty()) { if (marked_for_removal.empty()) {
return; return;
} }
// Remove duplicates // Remove duplicates
std::ranges::sort(marked_for_removal); std::sort(marked_for_removal.begin(), marked_for_removal.end());
marked_for_removal.erase(std::unique(marked_for_removal.begin(), marked_for_removal.end()), marked_for_removal.erase(std::unique(marked_for_removal.begin(), marked_for_removal.end()),
marked_for_removal.end()); marked_for_removal.end());
boost::container::small_vector<ShaderInfo*, 16> removed_shaders; std::vector<ShaderInfo*> removed_shaders;
std::scoped_lock lock{lookup_mutex}; std::scoped_lock lock{lookup_mutex};
for (Entry* const entry : marked_for_removal) { for (Entry* const entry : marked_for_removal) {
@ -175,7 +205,7 @@ void ShaderCache::RemovePendingShaders() {
} }
} }
void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) { void InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) {
size_t index = 0; size_t index = 0;
while (index < entries.size()) { while (index < entries.size()) {
Entry* const entry = entries[index]; Entry* const entry = entries[index];
@ -190,20 +220,20 @@ void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr
} }
} }
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { void RemoveEntryFromInvalidationCache(const Entry* entry) {
const u64 page_end = (entry->addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS; const u64 page_end = (entry->addr_end + SUYU_PAGESIZE - 1) >> SUYU_PAGEBITS;
for (u64 page = entry->addr_start >> SUYU_PAGEBITS; page < page_end; ++page) { for (u64 page = entry->addr_start >> SUYU_PAGEBITS; page < page_end; ++page) {
const auto entries_it = invalidation_cache.find(page); const auto entries_it = invalidation_cache.find(page);
ASSERT(entries_it != invalidation_cache.end()); ASSERT(entries_it != invalidation_cache.end());
std::vector<Entry*>& entries = entries_it->second; std::vector<Entry*>& entries = entries_it->second;
const auto entry_it = std::ranges::find(entries, entry); const auto entry_it = std::find(entries.begin(), entries.end(), entry);
ASSERT(entry_it != entries.end()); ASSERT(entry_it != entries.end());
entries.erase(entry_it); entries.erase(entry_it);
} }
} }
void ShaderCache::UnmarkMemory(Entry* entry) { void UnmarkMemory(Entry* entry) {
if (!entry->is_memory_marked) { if (!entry->is_memory_marked) {
return; return;
} }
@ -214,14 +244,17 @@ void ShaderCache::UnmarkMemory(Entry* entry) {
device_memory.UpdatePagesCachedCount(addr, size, -1); device_memory.UpdatePagesCachedCount(addr, size, -1);
} }
void ShaderCache::RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders) { void RemoveShadersFromStorage(const std::vector<ShaderInfo*>& removed_shaders) {
// Remove them from the cache storage.erase(
std::erase_if(storage, [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) { std::remove_if(storage.begin(), storage.end(),
return std::ranges::find(removed_shaders, shader.get()) != removed_shaders.end(); [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) {
}); return std::find(removed_shaders.begin(), removed_shaders.end(),
shader.get()) != removed_shaders.end();
}),
storage.end());
} }
ShaderCache::Entry* ShaderCache::NewEntry(VAddr addr, VAddr addr_end, ShaderInfo* data) { Entry* NewEntry(VAddr addr, VAddr addr_end, ShaderInfo* data) {
auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data}); auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data});
Entry* const entry_pointer = entry.get(); Entry* const entry_pointer = entry.get();
@ -229,23 +262,54 @@ ShaderCache::Entry* ShaderCache::NewEntry(VAddr addr, VAddr addr_end, ShaderInfo
return entry_pointer; return entry_pointer;
} }
const ShaderInfo* ShaderCache::MakeShaderInfo(GenericEnvironment& env, VAddr cpu_addr) { Tegra::MaxwellDeviceMemoryManager& device_memory;
auto info = std::make_unique<ShaderInfo>(); std::vector<std::unique_ptr<ShaderCacheWorker>> workers;
if (const std::optional<u64> cached_hash{env.Analyze()}) {
info->unique_hash = *cached_hash; mutable std::mutex lookup_mutex;
info->size_bytes = env.CachedSizeBytes(); std::mutex invalidation_mutex;
} else {
// Slow path, not really hit on commercial games std::unordered_map<VAddr, std::unique_ptr<Entry>> lookup_cache;
// Build a control flow graph to get the real shader size std::unordered_map<u64, std::vector<Entry*>> invalidation_cache;
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block; std::vector<std::unique_ptr<ShaderInfo>> storage;
Shader::Maxwell::Flow::CFG cfg{env, flow_block, env.StartAddress()}; std::vector<Entry*> marked_for_removal;
info->unique_hash = env.CalculateHash(); };
info->size_bytes = env.ReadSizeBytes();
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_)
: impl{std::make_unique<Impl>(device_memory_)} {}
ShaderCache::~ShaderCache() = default;
void ShaderCache::InvalidateRegion(VAddr addr, size_t size) {
impl->InvalidateRegion(addr, size);
} }
const size_t size_bytes{info->size_bytes};
const ShaderInfo* const result{info.get()}; void ShaderCache::OnCacheInvalidation(VAddr addr, size_t size) {
Register(std::move(info), cpu_addr, size_bytes); impl->OnCacheInvalidation(addr, size);
return result; }
void ShaderCache::SyncGuestHost() {
impl->SyncGuestHost();
}
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
return impl->RefreshStages(unique_hashes);
}
const ShaderInfo* ShaderCache::ComputeShader() {
return impl->ComputeShader();
}
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
impl->GetGraphicsEnvironments(result, unique_hashes);
}
ShaderInfo* ShaderCache::TryGet(VAddr addr) const {
return impl->TryGet(addr);
}
void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) {
impl->Register(std::move(data), addr, size);
} }
} // namespace VideoCommon } // namespace VideoCommon