forked from ShuriZma/suyu
1
0
Fork 0

Merge pull request #5311 from ReinUsesLisp/fence-wait

vk_fence_manager: Use timeline semaphores instead of spin waits
This commit is contained in:
bunnei 2021-01-12 21:00:05 -08:00 committed by GitHub
commit de1a316369
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 18 additions and 54 deletions

View File

@ -3,7 +3,6 @@
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <memory> #include <memory>
#include <thread>
#include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_fence_manager.h" #include "video_core/renderer_vulkan/vk_fence_manager.h"
@ -14,13 +13,11 @@
namespace Vulkan { namespace Vulkan {
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_, InnerFence::InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_)
bool is_stubbed_) : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
: FenceBase{payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_, InnerFence::InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
u32 payload_, bool is_stubbed_) : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
: FenceBase{address_, payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
InnerFence::~InnerFence() = default; InnerFence::~InnerFence() = default;
@ -28,63 +25,38 @@ void InnerFence::Queue() {
if (is_stubbed) { if (is_stubbed) {
return; return;
} }
ASSERT(!event); // Get the current tick so we can wait for it
wait_tick = scheduler.CurrentTick();
event = device.GetLogical().CreateEvent(); scheduler.Flush();
ticks = scheduler.CurrentTick();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
});
} }
bool InnerFence::IsSignaled() const { bool InnerFence::IsSignaled() const {
if (is_stubbed) { if (is_stubbed) {
return true; return true;
} }
ASSERT(event); return scheduler.IsFree(wait_tick);
return IsEventSignalled();
} }
void InnerFence::Wait() { void InnerFence::Wait() {
if (is_stubbed) { if (is_stubbed) {
return; return;
} }
ASSERT(event); scheduler.Wait(wait_tick);
if (ticks >= scheduler.CurrentTick()) {
scheduler.Flush();
}
while (!IsEventSignalled()) {
std::this_thread::yield();
}
}
bool InnerFence::IsEventSignalled() const {
switch (const VkResult result = event.GetStatus()) {
case VK_EVENT_SET:
return true;
case VK_EVENT_RESET:
return false;
default:
throw vk::Exception(result);
}
} }
VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_, Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_, VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
const Device& device_, VKScheduler& scheduler_) VKScheduler& scheduler_)
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
device{device_}, scheduler{scheduler_} {} scheduler{scheduler_} {}
Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) { Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed); return std::make_shared<InnerFence>(scheduler, value, is_stubbed);
} }
Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed); return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
} }
void VKFenceManager::QueueFence(Fence& fence) { void VKFenceManager::QueueFence(Fence& fence) {

View File

@ -28,10 +28,8 @@ class VKScheduler;
class InnerFence : public VideoCommon::FenceBase { class InnerFence : public VideoCommon::FenceBase {
public: public:
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_, explicit InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_);
bool is_stubbed_); explicit InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
u32 payload_, bool is_stubbed_);
~InnerFence(); ~InnerFence();
void Queue(); void Queue();
@ -41,12 +39,8 @@ public:
void Wait(); void Wait();
private: private:
bool IsEventSignalled() const;
const Device& device;
VKScheduler& scheduler; VKScheduler& scheduler;
vk::Event event; u64 wait_tick = 0;
u64 ticks = 0;
}; };
using Fence = std::shared_ptr<InnerFence>; using Fence = std::shared_ptr<InnerFence>;
@ -58,7 +52,7 @@ public:
explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_, Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_, VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
const Device& device_, VKScheduler& scheduler_); VKScheduler& scheduler_);
protected: protected:
Fence CreateFence(u32 value, bool is_stubbed) override; Fence CreateFence(u32 value, bool is_stubbed) override;
@ -68,7 +62,6 @@ protected:
void WaitFence(Fence& fence) override; void WaitFence(Fence& fence) override;
private: private:
const Device& device;
VKScheduler& scheduler; VKScheduler& scheduler;
}; };

View File

@ -428,8 +428,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer, buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer,
staging_pool), staging_pool),
query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, query_cache{*this, maxwell3d, gpu_memory, device, scheduler},
fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, scheduler),
scheduler),
wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) { wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
scheduler.SetQueryCache(query_cache); scheduler.SetQueryCache(query_cache);
if (device.UseAsynchronousShaders()) { if (device.UseAsynchronousShaders()) {