forked from ShuriZma/suyu
video_core: Replace VKScheduler with Scheduler
This commit is contained in:
parent
a262dc02b5
commit
9775fae4eb
|
@ -349,7 +349,7 @@ VkExtent2D GetConversionExtent(const ImageView& src_image_view) {
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
|
BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
|
||||||
StateTracker& state_tracker_, DescriptorPool& descriptor_pool)
|
StateTracker& state_tracker_, DescriptorPool& descriptor_pool)
|
||||||
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
|
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
|
||||||
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
|
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
|
||||||
|
|
|
@ -16,7 +16,7 @@ class Device;
|
||||||
class Framebuffer;
|
class Framebuffer;
|
||||||
class ImageView;
|
class ImageView;
|
||||||
class StateTracker;
|
class StateTracker;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
struct BlitImagePipelineKey {
|
struct BlitImagePipelineKey {
|
||||||
constexpr auto operator<=>(const BlitImagePipelineKey&) const noexcept = default;
|
constexpr auto operator<=>(const BlitImagePipelineKey&) const noexcept = default;
|
||||||
|
@ -27,7 +27,7 @@ struct BlitImagePipelineKey {
|
||||||
|
|
||||||
class BlitImageHelper {
|
class BlitImageHelper {
|
||||||
public:
|
public:
|
||||||
explicit BlitImageHelper(const Device& device, VKScheduler& scheduler,
|
explicit BlitImageHelper(const Device& device, Scheduler& scheduler,
|
||||||
StateTracker& state_tracker, DescriptorPool& descriptor_pool);
|
StateTracker& state_tracker, DescriptorPool& descriptor_pool);
|
||||||
~BlitImageHelper();
|
~BlitImageHelper();
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ private:
|
||||||
vk::ShaderModule& module);
|
vk::ShaderModule& module);
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
StateTracker& state_tracker;
|
StateTracker& state_tracker;
|
||||||
|
|
||||||
vk::DescriptorSetLayout one_texture_set_layout;
|
vk::DescriptorSetLayout one_texture_set_layout;
|
||||||
|
|
|
@ -65,12 +65,12 @@ private:
|
||||||
vk::DebugUtilsMessenger debug_callback;
|
vk::DebugUtilsMessenger debug_callback;
|
||||||
vk::SurfaceKHR surface;
|
vk::SurfaceKHR surface;
|
||||||
|
|
||||||
VKScreenInfo screen_info;
|
ScreenInfo screen_info;
|
||||||
|
|
||||||
Device device;
|
Device device;
|
||||||
MemoryAllocator memory_allocator;
|
MemoryAllocator memory_allocator;
|
||||||
StateTracker state_tracker;
|
StateTracker state_tracker;
|
||||||
VKScheduler scheduler;
|
Scheduler scheduler;
|
||||||
VKSwapchain swapchain;
|
VKSwapchain swapchain;
|
||||||
BlitScreen blit_screen;
|
BlitScreen blit_screen;
|
||||||
RasterizerVulkan rasterizer;
|
RasterizerVulkan rasterizer;
|
||||||
|
|
|
@ -120,8 +120,8 @@ struct BlitScreen::BufferData {
|
||||||
|
|
||||||
BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWindow& render_window_,
|
BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWindow& render_window_,
|
||||||
const Device& device_, MemoryAllocator& memory_allocator_,
|
const Device& device_, MemoryAllocator& memory_allocator_,
|
||||||
VKSwapchain& swapchain_, VKScheduler& scheduler_,
|
VKSwapchain& swapchain_, Scheduler& scheduler_,
|
||||||
const VKScreenInfo& screen_info_)
|
const ScreenInfo& screen_info_)
|
||||||
: cpu_memory{cpu_memory_}, render_window{render_window_}, device{device_},
|
: cpu_memory{cpu_memory_}, render_window{render_window_}, device{device_},
|
||||||
memory_allocator{memory_allocator_}, swapchain{swapchain_}, scheduler{scheduler_},
|
memory_allocator{memory_allocator_}, swapchain{swapchain_}, scheduler{scheduler_},
|
||||||
image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
|
image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
|
||||||
|
|
|
@ -35,10 +35,10 @@ struct ScreenInfo;
|
||||||
class Device;
|
class Device;
|
||||||
class FSR;
|
class FSR;
|
||||||
class RasterizerVulkan;
|
class RasterizerVulkan;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
class VKSwapchain;
|
class VKSwapchain;
|
||||||
|
|
||||||
struct VKScreenInfo {
|
struct ScreenInfo {
|
||||||
VkImageView image_view{};
|
VkImageView image_view{};
|
||||||
u32 width{};
|
u32 width{};
|
||||||
u32 height{};
|
u32 height{};
|
||||||
|
@ -49,8 +49,8 @@ class BlitScreen {
|
||||||
public:
|
public:
|
||||||
explicit BlitScreen(Core::Memory::Memory& cpu_memory, Core::Frontend::EmuWindow& render_window,
|
explicit BlitScreen(Core::Memory::Memory& cpu_memory, Core::Frontend::EmuWindow& render_window,
|
||||||
const Device& device, MemoryAllocator& memory_manager,
|
const Device& device, MemoryAllocator& memory_manager,
|
||||||
VKSwapchain& swapchain, VKScheduler& scheduler,
|
VKSwapchain& swapchain, Scheduler& scheduler,
|
||||||
const VKScreenInfo& screen_info);
|
const ScreenInfo& screen_info);
|
||||||
~BlitScreen();
|
~BlitScreen();
|
||||||
|
|
||||||
void Recreate();
|
void Recreate();
|
||||||
|
@ -109,9 +109,9 @@ private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
VKSwapchain& swapchain;
|
VKSwapchain& swapchain;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
const std::size_t image_count;
|
const std::size_t image_count;
|
||||||
const VKScreenInfo& screen_info;
|
const ScreenInfo& screen_info;
|
||||||
|
|
||||||
vk::ShaderModule vertex_shader;
|
vk::ShaderModule vertex_shader;
|
||||||
vk::ShaderModule fxaa_vertex_shader;
|
vk::ShaderModule fxaa_vertex_shader;
|
||||||
|
|
|
@ -124,7 +124,7 @@ VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
|
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
|
||||||
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
|
Scheduler& scheduler_, StagingBufferPool& staging_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||||
DescriptorPool& descriptor_pool)
|
DescriptorPool& descriptor_pool)
|
||||||
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
|
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
|
||||||
|
|
|
@ -16,7 +16,7 @@ namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class DescriptorPool;
|
class DescriptorPool;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class BufferCacheRuntime;
|
class BufferCacheRuntime;
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ class BufferCacheRuntime {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
|
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
|
||||||
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
|
Scheduler& scheduler_, StagingBufferPool& staging_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||||
DescriptorPool& descriptor_pool);
|
DescriptorPool& descriptor_pool);
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ private:
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
StagingBufferPool& staging_pool;
|
StagingBufferPool& staging_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
|
|
||||||
|
|
|
@ -200,8 +200,8 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
|
||||||
|
|
||||||
ComputePass::~ComputePass() = default;
|
ComputePass::~ComputePass() = default;
|
||||||
|
|
||||||
Uint8Pass::Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
Uint8Pass::Uint8Pass(const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool,
|
||||||
DescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||||
: ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
|
: ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
|
||||||
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
|
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
|
||||||
|
@ -241,7 +241,7 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
|
||||||
return {staging.buffer, staging.offset};
|
return {staging.buffer, staging.offset};
|
||||||
}
|
}
|
||||||
|
|
||||||
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
QuadIndexedPass::QuadIndexedPass(const Device& device_, Scheduler& scheduler_,
|
||||||
DescriptorPool& descriptor_pool_,
|
DescriptorPool& descriptor_pool_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||||
|
@ -303,7 +303,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
||||||
return {staging.buffer, staging.offset};
|
return {staging.buffer, staging.offset};
|
||||||
}
|
}
|
||||||
|
|
||||||
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
|
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
|
||||||
DescriptorPool& descriptor_pool_,
|
DescriptorPool& descriptor_pool_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||||
|
|
|
@ -20,7 +20,7 @@ namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class StagingBufferPool;
|
class StagingBufferPool;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
class VKUpdateDescriptorQueue;
|
class VKUpdateDescriptorQueue;
|
||||||
class Image;
|
class Image;
|
||||||
struct StagingBufferRef;
|
struct StagingBufferRef;
|
||||||
|
@ -48,7 +48,7 @@ private:
|
||||||
|
|
||||||
class Uint8Pass final : public ComputePass {
|
class Uint8Pass final : public ComputePass {
|
||||||
public:
|
public:
|
||||||
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
explicit Uint8Pass(const Device& device_, Scheduler& scheduler_,
|
||||||
DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
|
DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||||
~Uint8Pass();
|
~Uint8Pass();
|
||||||
|
@ -59,14 +59,14 @@ public:
|
||||||
u32 src_offset);
|
u32 src_offset);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
StagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
class QuadIndexedPass final : public ComputePass {
|
class QuadIndexedPass final : public ComputePass {
|
||||||
public:
|
public:
|
||||||
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
explicit QuadIndexedPass(const Device& device_, Scheduler& scheduler_,
|
||||||
DescriptorPool& descriptor_pool_,
|
DescriptorPool& descriptor_pool_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||||
|
@ -77,14 +77,14 @@ public:
|
||||||
u32 base_vertex, VkBuffer src_buffer, u32 src_offset);
|
u32 base_vertex, VkBuffer src_buffer, u32 src_offset);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
StagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
class ASTCDecoderPass final : public ComputePass {
|
class ASTCDecoderPass final : public ComputePass {
|
||||||
public:
|
public:
|
||||||
explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
|
explicit ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
|
||||||
DescriptorPool& descriptor_pool_,
|
DescriptorPool& descriptor_pool_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||||
|
@ -95,7 +95,7 @@ public:
|
||||||
std::span<const VideoCommon::SwizzleParameters> swizzles);
|
std::span<const VideoCommon::SwizzleParameters> swizzles);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
StagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
|
|
|
@ -91,7 +91,7 @@ ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descript
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
||||||
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
|
Tegra::MemoryManager& gpu_memory, Scheduler& scheduler,
|
||||||
BufferCache& buffer_cache, TextureCache& texture_cache) {
|
BufferCache& buffer_cache, TextureCache& texture_cache) {
|
||||||
update_descriptor_queue.Acquire();
|
update_descriptor_queue.Acquire();
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class PipelineStatistics;
|
class PipelineStatistics;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class ComputePipeline {
|
class ComputePipeline {
|
||||||
public:
|
public:
|
||||||
|
@ -42,7 +42,7 @@ public:
|
||||||
ComputePipeline(const ComputePipeline&) = delete;
|
ComputePipeline(const ComputePipeline&) = delete;
|
||||||
|
|
||||||
void Configure(Tegra::Engines::KeplerCompute& kepler_compute, Tegra::MemoryManager& gpu_memory,
|
void Configure(Tegra::Engines::KeplerCompute& kepler_compute, Tegra::MemoryManager& gpu_memory,
|
||||||
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
|
Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
|
|
|
@ -121,7 +121,7 @@ vk::DescriptorSets DescriptorAllocator::AllocateDescriptors(size_t count) {
|
||||||
throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
|
throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
|
||||||
}
|
}
|
||||||
|
|
||||||
DescriptorPool::DescriptorPool(const Device& device_, VKScheduler& scheduler)
|
DescriptorPool::DescriptorPool(const Device& device_, Scheduler& scheduler)
|
||||||
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()} {}
|
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()} {}
|
||||||
|
|
||||||
DescriptorPool::~DescriptorPool() = default;
|
DescriptorPool::~DescriptorPool() = default;
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
struct DescriptorBank;
|
struct DescriptorBank;
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ private:
|
||||||
|
|
||||||
class DescriptorPool {
|
class DescriptorPool {
|
||||||
public:
|
public:
|
||||||
explicit DescriptorPool(const Device& device, VKScheduler& scheduler);
|
explicit DescriptorPool(const Device& device, Scheduler& scheduler);
|
||||||
~DescriptorPool();
|
~DescriptorPool();
|
||||||
|
|
||||||
DescriptorPool& operator=(const DescriptorPool&) = delete;
|
DescriptorPool& operator=(const DescriptorPool&) = delete;
|
||||||
|
|
|
@ -11,10 +11,10 @@
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
InnerFence::InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_)
|
InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_)
|
||||||
: FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
|
: FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
|
||||||
|
|
||||||
InnerFence::InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
|
InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
|
||||||
: FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
|
: FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
|
||||||
|
|
||||||
InnerFence::~InnerFence() = default;
|
InnerFence::~InnerFence() = default;
|
||||||
|
@ -44,8 +44,7 @@ void InnerFence::Wait() {
|
||||||
|
|
||||||
FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
||||||
TextureCache& texture_cache_, BufferCache& buffer_cache_,
|
TextureCache& texture_cache_, BufferCache& buffer_cache_,
|
||||||
VKQueryCache& query_cache_, const Device& device_,
|
VKQueryCache& query_cache_, const Device& device_, Scheduler& scheduler_)
|
||||||
VKScheduler& scheduler_)
|
|
||||||
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
|
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
|
||||||
scheduler{scheduler_} {}
|
scheduler{scheduler_} {}
|
||||||
|
|
||||||
|
|
|
@ -21,12 +21,12 @@ namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKQueryCache;
|
class VKQueryCache;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class InnerFence : public VideoCommon::FenceBase {
|
class InnerFence : public VideoCommon::FenceBase {
|
||||||
public:
|
public:
|
||||||
explicit InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_);
|
explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_);
|
||||||
explicit InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
|
explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
|
||||||
~InnerFence();
|
~InnerFence();
|
||||||
|
|
||||||
void Queue();
|
void Queue();
|
||||||
|
@ -36,7 +36,7 @@ public:
|
||||||
void Wait();
|
void Wait();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
u64 wait_tick = 0;
|
u64 wait_tick = 0;
|
||||||
};
|
};
|
||||||
using Fence = std::shared_ptr<InnerFence>;
|
using Fence = std::shared_ptr<InnerFence>;
|
||||||
|
@ -48,7 +48,7 @@ class FenceManager final : public GenericFenceManager {
|
||||||
public:
|
public:
|
||||||
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
|
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
|
||||||
TextureCache& texture_cache, BufferCache& buffer_cache,
|
TextureCache& texture_cache, BufferCache& buffer_cache,
|
||||||
VKQueryCache& query_cache, const Device& device, VKScheduler& scheduler);
|
VKQueryCache& query_cache, const Device& device, Scheduler& scheduler);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Fence CreateFence(u32 value, bool is_stubbed) override;
|
Fence CreateFence(u32 value, bool is_stubbed) override;
|
||||||
|
@ -58,7 +58,7 @@ protected:
|
||||||
void WaitFence(Fence& fence) override;
|
void WaitFence(Fence& fence) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -172,7 +172,7 @@ FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image
|
||||||
CreatePipeline();
|
CreatePipeline();
|
||||||
}
|
}
|
||||||
|
|
||||||
VkImageView FSR::Draw(VKScheduler& scheduler, size_t image_index, VkImageView image_view,
|
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
||||||
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect) {
|
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect) {
|
||||||
|
|
||||||
UpdateDescriptorSet(image_index, image_view);
|
UpdateDescriptorSet(image_index, image_view);
|
||||||
|
|
|
@ -10,13 +10,13 @@
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class FSR {
|
class FSR {
|
||||||
public:
|
public:
|
||||||
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
|
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
|
||||||
VkExtent2D output_size);
|
VkExtent2D output_size);
|
||||||
VkImageView Draw(VKScheduler& scheduler, size_t image_index, VkImageView image_view,
|
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
||||||
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect);
|
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -215,8 +215,8 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
GraphicsPipeline::GraphicsPipeline(
|
GraphicsPipeline::GraphicsPipeline(
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_,
|
||||||
VKScheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
|
BufferCache& buffer_cache_, TextureCache& texture_cache_,
|
||||||
VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
|
VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
|
VKUpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
|
||||||
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
|
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
|
||||||
|
|
|
@ -62,7 +62,7 @@ class Device;
|
||||||
class PipelineStatistics;
|
class PipelineStatistics;
|
||||||
class RenderPassCache;
|
class RenderPassCache;
|
||||||
class RescalingPushConstant;
|
class RescalingPushConstant;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
class VKUpdateDescriptorQueue;
|
class VKUpdateDescriptorQueue;
|
||||||
|
|
||||||
class GraphicsPipeline {
|
class GraphicsPipeline {
|
||||||
|
@ -71,7 +71,7 @@ class GraphicsPipeline {
|
||||||
public:
|
public:
|
||||||
explicit GraphicsPipeline(
|
explicit GraphicsPipeline(
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
|
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
|
||||||
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
|
Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
|
||||||
VideoCore::ShaderNotify* shader_notify, const Device& device,
|
VideoCore::ShaderNotify* shader_notify, const Device& device,
|
||||||
DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
|
DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||||
Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics,
|
Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics,
|
||||||
|
@ -125,7 +125,7 @@ private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
TextureCache& texture_cache;
|
TextureCache& texture_cache;
|
||||||
BufferCache& buffer_cache;
|
BufferCache& buffer_cache;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
|
|
||||||
void (*configure_func)(GraphicsPipeline*, bool){};
|
void (*configure_func)(GraphicsPipeline*, bool){};
|
||||||
|
|
|
@ -262,7 +262,7 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
|
||||||
PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
|
PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
Tegra::Engines::KeplerCompute& kepler_compute_,
|
||||||
Tegra::MemoryManager& gpu_memory_, const Device& device_,
|
Tegra::MemoryManager& gpu_memory_, const Device& device_,
|
||||||
VKScheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||||
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
|
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
|
||||||
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
|
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
|
||||||
|
|
|
@ -81,7 +81,7 @@ class Device;
|
||||||
class PipelineStatistics;
|
class PipelineStatistics;
|
||||||
class RasterizerVulkan;
|
class RasterizerVulkan;
|
||||||
class RenderPassCache;
|
class RenderPassCache;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
class VKUpdateDescriptorQueue;
|
class VKUpdateDescriptorQueue;
|
||||||
|
|
||||||
using VideoCommon::ShaderInfo;
|
using VideoCommon::ShaderInfo;
|
||||||
|
@ -103,7 +103,7 @@ public:
|
||||||
explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
|
explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute,
|
Tegra::Engines::KeplerCompute& kepler_compute,
|
||||||
Tegra::MemoryManager& gpu_memory, const Device& device,
|
Tegra::MemoryManager& gpu_memory, const Device& device,
|
||||||
VKScheduler& scheduler, DescriptorPool& descriptor_pool,
|
Scheduler& scheduler, DescriptorPool& descriptor_pool,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue,
|
VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||||
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
|
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
|
||||||
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
|
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
|
||||||
|
@ -138,7 +138,7 @@ private:
|
||||||
bool build_in_parallel);
|
bool build_in_parallel);
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
DescriptorPool& descriptor_pool;
|
DescriptorPool& descriptor_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
RenderPassCache& render_pass_cache;
|
RenderPassCache& render_pass_cache;
|
||||||
|
|
|
@ -26,7 +26,7 @@ constexpr VkQueryType GetTarget(QueryType type) {
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
QueryPool::QueryPool(const Device& device_, VKScheduler& scheduler, QueryType type_)
|
QueryPool::QueryPool(const Device& device_, Scheduler& scheduler, QueryType type_)
|
||||||
: ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
|
: ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
|
||||||
|
|
||||||
QueryPool::~QueryPool() = default;
|
QueryPool::~QueryPool() = default;
|
||||||
|
@ -67,7 +67,7 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
|
||||||
|
|
||||||
VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
||||||
const Device& device_, VKScheduler& scheduler_)
|
const Device& device_, Scheduler& scheduler_)
|
||||||
: QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
|
: QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
|
||||||
query_pools{
|
query_pools{
|
||||||
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
|
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
|
||||||
|
|
|
@ -23,13 +23,13 @@ class CachedQuery;
|
||||||
class Device;
|
class Device;
|
||||||
class HostCounter;
|
class HostCounter;
|
||||||
class VKQueryCache;
|
class VKQueryCache;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
|
using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
|
||||||
|
|
||||||
class QueryPool final : public ResourcePool {
|
class QueryPool final : public ResourcePool {
|
||||||
public:
|
public:
|
||||||
explicit QueryPool(const Device& device, VKScheduler& scheduler, VideoCore::QueryType type);
|
explicit QueryPool(const Device& device, Scheduler& scheduler, VideoCore::QueryType type);
|
||||||
~QueryPool() override;
|
~QueryPool() override;
|
||||||
|
|
||||||
std::pair<VkQueryPool, u32> Commit();
|
std::pair<VkQueryPool, u32> Commit();
|
||||||
|
@ -54,7 +54,7 @@ class VKQueryCache final
|
||||||
public:
|
public:
|
||||||
explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
||||||
const Device& device_, VKScheduler& scheduler_);
|
const Device& device_, Scheduler& scheduler_);
|
||||||
~VKQueryCache();
|
~VKQueryCache();
|
||||||
|
|
||||||
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
|
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
|
||||||
|
@ -65,13 +65,13 @@ public:
|
||||||
return device;
|
return device;
|
||||||
}
|
}
|
||||||
|
|
||||||
VKScheduler& GetScheduler() const noexcept {
|
Scheduler& GetScheduler() const noexcept {
|
||||||
return scheduler;
|
return scheduler;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
|
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -142,9 +142,9 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
|
||||||
|
|
||||||
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
||||||
Tegra::MemoryManager& gpu_memory_,
|
Tegra::MemoryManager& gpu_memory_,
|
||||||
Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_,
|
Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
|
||||||
const Device& device_, MemoryAllocator& memory_allocator_,
|
const Device& device_, MemoryAllocator& memory_allocator_,
|
||||||
StateTracker& state_tracker_, VKScheduler& scheduler_)
|
StateTracker& state_tracker_, Scheduler& scheduler_)
|
||||||
: RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
|
: RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
|
||||||
gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
|
gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
|
||||||
screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
|
screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
|
||||||
|
|
|
@ -38,7 +38,7 @@ class Maxwell3D;
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
struct VKScreenInfo;
|
struct ScreenInfo;
|
||||||
|
|
||||||
class StateTracker;
|
class StateTracker;
|
||||||
|
|
||||||
|
@ -58,9 +58,9 @@ class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
|
||||||
public:
|
public:
|
||||||
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
||||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
||||||
VKScreenInfo& screen_info_, const Device& device_,
|
ScreenInfo& screen_info_, const Device& device_,
|
||||||
MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
|
MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
|
||||||
VKScheduler& scheduler_);
|
Scheduler& scheduler_);
|
||||||
~RasterizerVulkan() override;
|
~RasterizerVulkan() override;
|
||||||
|
|
||||||
void Draw(bool is_indexed, bool is_instanced) override;
|
void Draw(bool is_indexed, bool is_instanced) override;
|
||||||
|
@ -138,11 +138,11 @@ private:
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute;
|
Tegra::Engines::KeplerCompute& kepler_compute;
|
||||||
|
|
||||||
VKScreenInfo& screen_info;
|
ScreenInfo& screen_info;
|
||||||
const Device& device;
|
const Device& device;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
StateTracker& state_tracker;
|
StateTracker& state_tracker;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
|
|
||||||
StagingBufferPool staging_pool;
|
StagingBufferPool staging_pool;
|
||||||
DescriptorPool descriptor_pool;
|
DescriptorPool descriptor_pool;
|
||||||
|
|
|
@ -21,7 +21,7 @@ namespace Vulkan {
|
||||||
|
|
||||||
MICROPROFILE_DECLARE(Vulkan_WaitForWorker);
|
MICROPROFILE_DECLARE(Vulkan_WaitForWorker);
|
||||||
|
|
||||||
void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
void Scheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
||||||
auto command = first;
|
auto command = first;
|
||||||
while (command != nullptr) {
|
while (command != nullptr) {
|
||||||
auto next = command->GetNext();
|
auto next = command->GetNext();
|
||||||
|
@ -35,7 +35,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
||||||
last = nullptr;
|
last = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
Scheduler::Scheduler(const Device& device_, StateTracker& state_tracker_)
|
||||||
: device{device_}, state_tracker{state_tracker_},
|
: device{device_}, state_tracker{state_tracker_},
|
||||||
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
||||||
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
||||||
|
@ -44,14 +44,14 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
||||||
worker_thread = std::jthread([this](std::stop_token token) { WorkerThread(token); });
|
worker_thread = std::jthread([this](std::stop_token token) { WorkerThread(token); });
|
||||||
}
|
}
|
||||||
|
|
||||||
VKScheduler::~VKScheduler() = default;
|
Scheduler::~Scheduler() = default;
|
||||||
|
|
||||||
void VKScheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void Scheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
SubmitExecution(signal_semaphore, wait_semaphore);
|
SubmitExecution(signal_semaphore, wait_semaphore);
|
||||||
AllocateNewContext();
|
AllocateNewContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void Scheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
const u64 presubmit_tick = CurrentTick();
|
const u64 presubmit_tick = CurrentTick();
|
||||||
SubmitExecution(signal_semaphore, wait_semaphore);
|
SubmitExecution(signal_semaphore, wait_semaphore);
|
||||||
WaitWorker();
|
WaitWorker();
|
||||||
|
@ -59,7 +59,7 @@ void VKScheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphor
|
||||||
AllocateNewContext();
|
AllocateNewContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::WaitWorker() {
|
void Scheduler::WaitWorker() {
|
||||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||||
DispatchWork();
|
DispatchWork();
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ void VKScheduler::WaitWorker() {
|
||||||
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::DispatchWork() {
|
void Scheduler::DispatchWork() {
|
||||||
if (chunk->Empty()) {
|
if (chunk->Empty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ void VKScheduler::DispatchWork() {
|
||||||
AcquireNewChunk();
|
AcquireNewChunk();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::RequestRenderpass(const Framebuffer* framebuffer) {
|
void Scheduler::RequestRenderpass(const Framebuffer* framebuffer) {
|
||||||
const VkRenderPass renderpass = framebuffer->RenderPass();
|
const VkRenderPass renderpass = framebuffer->RenderPass();
|
||||||
const VkFramebuffer framebuffer_handle = framebuffer->Handle();
|
const VkFramebuffer framebuffer_handle = framebuffer->Handle();
|
||||||
const VkExtent2D render_area = framebuffer->RenderArea();
|
const VkExtent2D render_area = framebuffer->RenderArea();
|
||||||
|
@ -114,11 +114,11 @@ void VKScheduler::RequestRenderpass(const Framebuffer* framebuffer) {
|
||||||
renderpass_image_ranges = framebuffer->ImageRanges();
|
renderpass_image_ranges = framebuffer->ImageRanges();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::RequestOutsideRenderPassOperationContext() {
|
void Scheduler::RequestOutsideRenderPassOperationContext() {
|
||||||
EndRenderPass();
|
EndRenderPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
|
bool Scheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
|
||||||
if (state.graphics_pipeline == pipeline) {
|
if (state.graphics_pipeline == pipeline) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKScheduler::UpdateRescaling(bool is_rescaling) {
|
bool Scheduler::UpdateRescaling(bool is_rescaling) {
|
||||||
if (state.rescaling_defined && is_rescaling == state.is_rescaling) {
|
if (state.rescaling_defined && is_rescaling == state.is_rescaling) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ bool VKScheduler::UpdateRescaling(bool is_rescaling) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::WorkerThread(std::stop_token stop_token) {
|
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||||
Common::SetCurrentThreadName("yuzu:VulkanWorker");
|
Common::SetCurrentThreadName("yuzu:VulkanWorker");
|
||||||
do {
|
do {
|
||||||
std::unique_ptr<CommandChunk> work;
|
std::unique_ptr<CommandChunk> work;
|
||||||
|
@ -161,7 +161,7 @@ void VKScheduler::WorkerThread(std::stop_token stop_token) {
|
||||||
} while (!stop_token.stop_requested());
|
} while (!stop_token.stop_requested());
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::AllocateWorkerCommandBuffer() {
|
void Scheduler::AllocateWorkerCommandBuffer() {
|
||||||
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
|
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
|
||||||
current_cmdbuf.Begin({
|
current_cmdbuf.Begin({
|
||||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||||
|
@ -171,7 +171,7 @@ void VKScheduler::AllocateWorkerCommandBuffer() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void Scheduler::SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
EndPendingOperations();
|
EndPendingOperations();
|
||||||
InvalidateState();
|
InvalidateState();
|
||||||
|
|
||||||
|
@ -225,25 +225,25 @@ void VKScheduler::SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait
|
||||||
DispatchWork();
|
DispatchWork();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::AllocateNewContext() {
|
void Scheduler::AllocateNewContext() {
|
||||||
// Enable counters once again. These are disabled when a command buffer is finished.
|
// Enable counters once again. These are disabled when a command buffer is finished.
|
||||||
if (query_cache) {
|
if (query_cache) {
|
||||||
query_cache->UpdateCounters();
|
query_cache->UpdateCounters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::InvalidateState() {
|
void Scheduler::InvalidateState() {
|
||||||
state.graphics_pipeline = nullptr;
|
state.graphics_pipeline = nullptr;
|
||||||
state.rescaling_defined = false;
|
state.rescaling_defined = false;
|
||||||
state_tracker.InvalidateCommandBufferState();
|
state_tracker.InvalidateCommandBufferState();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::EndPendingOperations() {
|
void Scheduler::EndPendingOperations() {
|
||||||
query_cache->DisableStreams();
|
query_cache->DisableStreams();
|
||||||
EndRenderPass();
|
EndRenderPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::EndRenderPass() {
|
void Scheduler::EndRenderPass() {
|
||||||
if (!state.renderpass) {
|
if (!state.renderpass) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ void VKScheduler::EndRenderPass() {
|
||||||
num_renderpass_images = 0;
|
num_renderpass_images = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::AcquireNewChunk() {
|
void Scheduler::AcquireNewChunk() {
|
||||||
std::scoped_lock lock{reserve_mutex};
|
std::scoped_lock lock{reserve_mutex};
|
||||||
if (chunk_reserve.empty()) {
|
if (chunk_reserve.empty()) {
|
||||||
chunk = std::make_unique<CommandChunk>();
|
chunk = std::make_unique<CommandChunk>();
|
||||||
|
|
|
@ -26,10 +26,10 @@ class VKQueryCache;
|
||||||
|
|
||||||
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
|
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
|
||||||
/// OpenGL-like operations on Vulkan command buffers.
|
/// OpenGL-like operations on Vulkan command buffers.
|
||||||
class VKScheduler {
|
class Scheduler {
|
||||||
public:
|
public:
|
||||||
explicit VKScheduler(const Device& device, StateTracker& state_tracker);
|
explicit Scheduler(const Device& device, StateTracker& state_tracker);
|
||||||
~VKScheduler();
|
~Scheduler();
|
||||||
|
|
||||||
/// Sends the current execution context to the GPU.
|
/// Sends the current execution context to the GPU.
|
||||||
void Flush(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
|
void Flush(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
|
||||||
|
|
|
@ -85,7 +85,7 @@ size_t Region(size_t iterator) noexcept {
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
|
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
|
||||||
VKScheduler& scheduler_)
|
Scheduler& scheduler_)
|
||||||
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
|
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
|
||||||
const vk::Device& dev = device.GetLogical();
|
const vk::Device& dev = device.GetLogical();
|
||||||
stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
|
stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
struct StagingBufferRef {
|
struct StagingBufferRef {
|
||||||
VkBuffer buffer;
|
VkBuffer buffer;
|
||||||
|
@ -27,7 +27,7 @@ public:
|
||||||
static constexpr size_t NUM_SYNCS = 16;
|
static constexpr size_t NUM_SYNCS = 16;
|
||||||
|
|
||||||
explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator,
|
explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator,
|
||||||
VKScheduler& scheduler);
|
Scheduler& scheduler);
|
||||||
~StagingBufferPool();
|
~StagingBufferPool();
|
||||||
|
|
||||||
StagingBufferRef Request(size_t size, MemoryUsage usage);
|
StagingBufferRef Request(size_t size, MemoryUsage usage);
|
||||||
|
@ -82,7 +82,7 @@ private:
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
|
|
||||||
vk::Buffer stream_buffer;
|
vk::Buffer stream_buffer;
|
||||||
vk::DeviceMemory stream_memory;
|
vk::DeviceMemory stream_memory;
|
||||||
|
|
|
@ -64,7 +64,7 @@ VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 wi
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKScheduler& scheduler_,
|
VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, Scheduler& scheduler_,
|
||||||
u32 width, u32 height, bool srgb)
|
u32 width, u32 height, bool srgb)
|
||||||
: surface{surface_}, device{device_}, scheduler{scheduler_} {
|
: surface{surface_}, device{device_}, scheduler{scheduler_} {
|
||||||
Create(width, height, srgb);
|
Create(width, height, srgb);
|
||||||
|
|
|
@ -15,11 +15,11 @@ struct FramebufferLayout;
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class VKSwapchain {
|
class VKSwapchain {
|
||||||
public:
|
public:
|
||||||
explicit VKSwapchain(VkSurfaceKHR surface, const Device& device, VKScheduler& scheduler,
|
explicit VKSwapchain(VkSurfaceKHR surface, const Device& device, Scheduler& scheduler,
|
||||||
u32 width, u32 height, bool srgb);
|
u32 width, u32 height, bool srgb);
|
||||||
~VKSwapchain();
|
~VKSwapchain();
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ private:
|
||||||
|
|
||||||
const VkSurfaceKHR surface;
|
const VkSurfaceKHR surface;
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
|
|
||||||
vk::SwapchainKHR swapchain;
|
vk::SwapchainKHR swapchain;
|
||||||
|
|
||||||
|
|
|
@ -648,7 +648,7 @@ struct RangedBarrierRange {
|
||||||
return VK_FORMAT_R32_UINT;
|
return VK_FORMAT_R32_UINT;
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlitScale(VKScheduler& scheduler, VkImage src_image, VkImage dst_image, const ImageInfo& info,
|
void BlitScale(Scheduler& scheduler, VkImage src_image, VkImage dst_image, const ImageInfo& info,
|
||||||
VkImageAspectFlags aspect_mask, const Settings::ResolutionScalingInfo& resolution,
|
VkImageAspectFlags aspect_mask, const Settings::ResolutionScalingInfo& resolution,
|
||||||
bool up_scaling = true) {
|
bool up_scaling = true) {
|
||||||
const bool is_2d = info.type == ImageType::e2D;
|
const bool is_2d = info.type == ImageType::e2D;
|
||||||
|
@ -788,7 +788,7 @@ void BlitScale(VKScheduler& scheduler, VkImage src_image, VkImage dst_image, con
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
TextureCacheRuntime::TextureCacheRuntime(const Device& device_, VKScheduler& scheduler_,
|
TextureCacheRuntime::TextureCacheRuntime(const Device& device_, Scheduler& scheduler_,
|
||||||
MemoryAllocator& memory_allocator_,
|
MemoryAllocator& memory_allocator_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
BlitImageHelper& blit_image_helper_,
|
BlitImageHelper& blit_image_helper_,
|
||||||
|
|
|
@ -33,11 +33,11 @@ class ImageView;
|
||||||
class Framebuffer;
|
class Framebuffer;
|
||||||
class RenderPassCache;
|
class RenderPassCache;
|
||||||
class StagingBufferPool;
|
class StagingBufferPool;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
class TextureCacheRuntime {
|
class TextureCacheRuntime {
|
||||||
public:
|
public:
|
||||||
explicit TextureCacheRuntime(const Device& device_, VKScheduler& scheduler_,
|
explicit TextureCacheRuntime(const Device& device_, Scheduler& scheduler_,
|
||||||
MemoryAllocator& memory_allocator_,
|
MemoryAllocator& memory_allocator_,
|
||||||
StagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
BlitImageHelper& blit_image_helper_,
|
BlitImageHelper& blit_image_helper_,
|
||||||
|
@ -93,7 +93,7 @@ public:
|
||||||
[[nodiscard]] VkBuffer GetTemporaryBuffer(size_t needed_size);
|
[[nodiscard]] VkBuffer GetTemporaryBuffer(size_t needed_size);
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
MemoryAllocator& memory_allocator;
|
MemoryAllocator& memory_allocator;
|
||||||
StagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
BlitImageHelper& blit_image_helper;
|
BlitImageHelper& blit_image_helper;
|
||||||
|
@ -154,7 +154,7 @@ private:
|
||||||
|
|
||||||
bool NeedsScaleHelper() const;
|
bool NeedsScaleHelper() const;
|
||||||
|
|
||||||
VKScheduler* scheduler{};
|
Scheduler* scheduler{};
|
||||||
TextureCacheRuntime* runtime{};
|
TextureCacheRuntime* runtime{};
|
||||||
|
|
||||||
vk::Image original_image;
|
vk::Image original_image;
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_)
|
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, Scheduler& scheduler_)
|
||||||
: device{device_}, scheduler{scheduler_} {
|
: device{device_}, scheduler{scheduler_} {
|
||||||
payload_cursor = payload.data();
|
payload_cursor = payload.data();
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class Scheduler;
|
||||||
|
|
||||||
struct DescriptorUpdateEntry {
|
struct DescriptorUpdateEntry {
|
||||||
struct Empty {};
|
struct Empty {};
|
||||||
|
@ -30,7 +30,7 @@ struct DescriptorUpdateEntry {
|
||||||
|
|
||||||
class VKUpdateDescriptorQueue final {
|
class VKUpdateDescriptorQueue final {
|
||||||
public:
|
public:
|
||||||
explicit VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_);
|
explicit VKUpdateDescriptorQueue(const Device& device_, Scheduler& scheduler_);
|
||||||
~VKUpdateDescriptorQueue();
|
~VKUpdateDescriptorQueue();
|
||||||
|
|
||||||
void TickFrame();
|
void TickFrame();
|
||||||
|
@ -71,7 +71,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
|
|
||||||
DescriptorUpdateEntry* payload_cursor = nullptr;
|
DescriptorUpdateEntry* payload_cursor = nullptr;
|
||||||
const DescriptorUpdateEntry* upload_start = nullptr;
|
const DescriptorUpdateEntry* upload_start = nullptr;
|
||||||
|
|
Loading…
Reference in New Issue