GS/Vulkan: Combine GSDeviceVK and VKContext

Less indirection.
This commit is contained in:
Stenzek 2023-07-13 21:37:46 +10:00 committed by Connor McLaughlin
parent e2fc68ff2d
commit 9678bf1e2f
19 changed files with 2919 additions and 3401 deletions

View File

@ -654,25 +654,21 @@ if(USE_VULKAN)
GS/Renderers/Vulkan/GSTextureVK.cpp
GS/Renderers/Vulkan/vk_mem_alloc.cpp
GS/Renderers/Vulkan/VKBuilders.cpp
GS/Renderers/Vulkan/VKContext.cpp
GS/Renderers/Vulkan/VKLoader.cpp
GS/Renderers/Vulkan/VKShaderCache.cpp
GS/Renderers/Vulkan/VKStreamBuffer.cpp
GS/Renderers/Vulkan/VKSwapChain.cpp
GS/Renderers/Vulkan/VKUtil.cpp
)
list(APPEND pcsx2GSHeaders
GS/Renderers/Vulkan/GSDeviceVK.h
GS/Renderers/Vulkan/GSTextureVK.h
GS/Renderers/Vulkan/VKBuilders.h
GS/Renderers/Vulkan/VKContext.h
GS/Renderers/Vulkan/VKEntryPoints.h
GS/Renderers/Vulkan/VKEntryPoints.inl
GS/Renderers/Vulkan/VKLoader.h
GS/Renderers/Vulkan/VKShaderCache.h
GS/Renderers/Vulkan/VKStreamBuffer.h
GS/Renderers/Vulkan/VKSwapChain.h
GS/Renderers/Vulkan/VKUtil.h
)
target_link_libraries(PCSX2_FLAGS INTERFACE Vulkan-Headers glslang)
endif()

File diff suppressed because it is too large Load Diff

View File

@ -22,17 +22,317 @@
#include "GS/GSVector.h"
#include "common/HashCombine.h"
#include "common/ReadbackSpinManager.h"
#include <array>
#include <unordered_map>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
namespace Vulkan
{
class VKSwapChain;
}
class GSDeviceVK final : public GSDevice
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 3,
};
struct OptionalExtensions
{
bool vk_ext_provoking_vertex : 1;
bool vk_ext_memory_budget : 1;
bool vk_ext_calibrated_timestamps : 1;
bool vk_ext_line_rasterization : 1;
bool vk_ext_rasterization_order_attachment_access : 1;
bool vk_ext_attachment_feedback_loop_layout : 1;
bool vk_ext_full_screen_exclusive : 1;
bool vk_khr_driver_properties : 1;
bool vk_khr_fragment_shader_barycentric : 1;
bool vk_khr_shader_draw_parameters : 1;
};
// Global state accessors
__fi VkInstance GetVulkanInstance() const { return m_instance; }
__fi VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
__fi VkDevice GetDevice() const { return m_device; }
__fi VmaAllocator GetAllocator() const { return m_allocator; }
__fi u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
__fi u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
__fi const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
__fi const OptionalExtensions& GetOptionalExtensions() const { return m_optional_extensions; }
// The interaction between raster order attachment access and fbfetch is unclear.
__fi bool UseFeedbackLoopLayout() const
{
return (m_optional_extensions.vk_ext_attachment_feedback_loop_layout &&
!m_optional_extensions.vk_ext_rasterization_order_attachment_access);
}
// Helpers for getting constants
__fi u32 GetBufferCopyOffsetAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment);
}
__fi u32 GetBufferCopyRowPitchAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment);
}
/// Returns true if running on an NVIDIA GPU.
__fi bool IsDeviceNVIDIA() const { return (m_device_properties.vendorID == 0x10DE); }
// Creates a simple render pass.
VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format,
VkAttachmentLoadOp color_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp color_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp depth_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp depth_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp stencil_load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VkAttachmentStoreOp stencil_store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE, bool color_feedback_loop = false,
bool depth_sampling = false);
// Gets a non-clearing version of the specified render pass. Slow, don't call in hot path.
VkRenderPass GetRenderPassForRestarting(VkRenderPass pass);
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
__fi VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
__fi VKStreamBuffer& GetTextureUploadBuffer() { return m_texture_stream_buffer; }
VkCommandBuffer GetCurrentInitCommandBuffer();
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocatePersistentDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreePersistentDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
__fi VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object, VmaAllocation allocation);
void DeferImageViewDestruction(VkImageView object);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
private:
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(const WindowInfo& wi, bool enable_debug_utils, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<std::pair<VkPhysicalDevice, std::string>>;
static GPUList EnumerateGPUs(VkInstance instance);
static void GPUListToAdapterNames(std::vector<std::string>* dest, VkInstance instance);
// Enable/disable debug message runtime.
bool EnableDebugUtils();
void DisableDebugUtils();
void SubmitCommandBuffer(VKSwapChain* present_swap_chain = nullptr, bool submit_on_thread = false);
void MoveToNextCommandBuffer();
enum class WaitType
{
None,
Sleep,
Spin,
};
static WaitType GetWaitType(bool wait, bool spin);
void ExecuteCommandBuffer(WaitType wait_for_completion);
void WaitForPresentComplete();
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
bool CheckLastSubmitFail();
// Allocates a temporary CPU staging buffer, fires the callback with it to populate, then copies to a GPU buffer.
bool AllocatePreinitializedGPUBuffer(u32 size, VkBuffer* gpu_buffer, VmaAllocation* gpu_allocation,
VkBufferUsageFlags gpu_usage, const std::function<void(void*)>& fill_callback);
union RenderPassCacheKey
{
struct
{
u32 color_format : 8;
u32 depth_format : 8;
u32 color_load_op : 2;
u32 color_store_op : 1;
u32 depth_load_op : 2;
u32 depth_store_op : 1;
u32 stencil_load_op : 2;
u32 stencil_store_op : 1;
u32 color_feedback_loop : 1;
u32 depth_sampling : 1;
};
u32 key;
};
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(
ExtensionList* extension_list, const WindowInfo& wi, bool enable_debug_utils);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures();
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer);
void ProcessDeviceExtensions();
bool CreateAllocator();
bool CreateCommandBuffers();
bool CreateGlobalDescriptorPool();
VkRenderPass CreateCachedRenderPass(RenderPassCacheKey key);
void CommandBufferCompleted(u32 index);
void ActivateCommandBuffer(u32 index);
void ScanForCommandBufferCompletion();
void WaitForCommandBufferCompletion(u32 index);
void DoSubmitCommandBuffer(u32 index, VKSwapChain* present_swap_chain, u32 spin_cycles);
void DoPresent(VKSwapChain* present_swap_chain);
void WaitForPresentComplete(std::unique_lock<std::mutex>& lock);
void PresentThread();
void StartPresentThread();
void StopPresentThread();
bool InitSpinResources();
void DestroySpinResources();
void WaitForSpinCompletion(u32 index);
void SpinCommandCompleted(u32 index);
void SubmitSpinCommand(u32 index, u32 cycles);
void CalibrateSpinTimestamp();
u64 GetCPUTimestamp();
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
std::array<VkCommandBuffer, 2> command_buffers{VK_NULL_HANDLE, VK_NULL_HANDLE};
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
s32 spin_id = -1;
u32 submit_timestamp = 0;
bool init_buffer_used = false;
bool needs_fence_wait = false;
bool timestamp_written = false;
std::vector<std::function<void()>> cleanup_resources;
};
struct SpinResources
{
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkSemaphore semaphore = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u32 cycles = 0;
bool in_progress = false;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VmaAllocator m_allocator = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
u32 m_present_queue_family_index = 0;
ReadbackSpinManager m_spin_manager;
VkQueue m_spin_queue = VK_NULL_HANDLE;
VkDescriptorSetLayout m_spin_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_spin_pipeline_layout = VK_NULL_HANDLE;
VkPipeline m_spin_pipeline = VK_NULL_HANDLE;
VkBuffer m_spin_buffer = VK_NULL_HANDLE;
VmaAllocation m_spin_buffer_allocation = VK_NULL_HANDLE;
VkDescriptorSet m_spin_descriptor_set = VK_NULL_HANDLE;
std::array<SpinResources, NUM_COMMAND_BUFFERS> m_spin_resources;
#ifdef _WIN32
double m_queryperfcounter_to_ns = 0;
#endif
double m_spin_timestamp_scale = 0;
double m_spin_timestamp_offset = 0;
u32 m_spin_queue_family_index = 0;
u32 m_command_buffer_render_passes = 0;
u32 m_spin_timer = 0;
bool m_spinning_supported = false;
bool m_spin_queue_is_graphics_queue = false;
bool m_spin_buffer_initialized = false;
VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
bool m_gpu_timing_supported = false;
bool m_wants_new_timestamp_calibration = false;
VkTimeDomainEXT m_calibrated_timestamp_type = VK_TIME_DOMAIN_DEVICE_EXT;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame = 0;
std::atomic_bool m_last_submit_failed{false};
std::atomic_bool m_last_present_failed{false};
std::atomic_bool m_present_done{true};
std::mutex m_present_mutex;
std::condition_variable m_present_queued_cv;
std::condition_variable m_present_done_cv;
std::thread m_present_thread;
std::atomic_bool m_present_thread_done{false};
struct QueuedPresent
{
VKSwapChain* swap_chain;
u32 command_buffer_index;
u32 spin_cycles;
};
QueuedPresent m_queued_present = {};
std::map<u32, VkRenderPass> m_render_pass_cache;
VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE;
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {};
OptionalExtensions m_optional_extensions = {};
public:
enum FeedbackLoopFlag : u8
{
@ -95,11 +395,6 @@ public:
NUM_CONVERT_SAMPLERS = 1,
CONVERT_PUSH_CONSTANTS_SIZE = 96,
VERTEX_BUFFER_SIZE = 32 * 1024 * 1024,
INDEX_BUFFER_SIZE = 16 * 1024 * 1024,
VERTEX_UNIFORM_BUFFER_SIZE = 8 * 1024 * 1024,
FRAGMENT_UNIFORM_BUFFER_SIZE = 8 * 1024 * 1024,
NUM_CAS_PIPELINES = 2,
};
enum TFX_DESCRIPTOR_SET : u32
@ -132,6 +427,7 @@ private:
VKStreamBuffer m_index_stream_buffer;
VKStreamBuffer m_vertex_uniform_stream_buffer;
VKStreamBuffer m_fragment_uniform_stream_buffer;
VKStreamBuffer m_texture_stream_buffer;
VkBuffer m_expand_index_buffer = VK_NULL_HANDLE;
VmaAllocation m_expand_index_buffer_allocation = VK_NULL_HANDLE;
@ -397,7 +693,6 @@ private:
// Which bindings/state has to be updated before the next draw.
u32 m_dirty_flags = 0;
FeedbackLoopFlag m_current_framebuffer_feedback_loop = FeedbackLoopFlag_None;
bool m_has_feedback_loop_layout = false;
bool m_warned_slow_spin = false;
// input assembly

View File

@ -18,8 +18,6 @@
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/GSTextureVK.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/GSPerfMon.h"
#include "GS/GSGL.h"
@ -47,14 +45,14 @@ static VkImageLayout GetVkImageLayout(GSTextureVK::Layout layout)
VK_IMAGE_LAYOUT_GENERAL, // ComputeReadWriteImage
VK_IMAGE_LAYOUT_GENERAL, // General
}};
return (layout == GSTextureVK::Layout::FeedbackLoop && g_vulkan_context->UseFeedbackLoopLayout()) ?
return (layout == GSTextureVK::Layout::FeedbackLoop && GSDeviceVK::GetInstance()->UseFeedbackLoopLayout()) ?
VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT :
s_vk_layout_mapping[static_cast<u32>(layout)];
}
static VkAccessFlagBits GetFeedbackLoopInputAccessBits()
{
return g_vulkan_context->UseFeedbackLoopLayout() ? VK_ACCESS_SHADER_READ_BIT : VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
return GSDeviceVK::GetInstance()->UseFeedbackLoopLayout() ? VK_ACCESS_SHADER_READ_BIT : VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
}
GSTextureVK::GSTextureVK(Type type, Format format, int width, int height, int levels, VkImage image,
@ -115,7 +113,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, Format format, int w
pxAssert(levels == 1);
ici.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
(g_vulkan_context->UseFeedbackLoopLayout() ? VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT :
(GSDeviceVK::GetInstance()->UseFeedbackLoopLayout() ? VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT :
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
}
break;
@ -126,7 +124,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, Format format, int w
ici.usage =
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
(g_vulkan_context->UseFeedbackLoopLayout() ? VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT : 0);
(GSDeviceVK::GetInstance()->UseFeedbackLoopLayout() ? VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT : 0);
vci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
}
break;
@ -149,12 +147,12 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, Format format, int w
VkImage image = VK_NULL_HANDLE;
VmaAllocation allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateImage(g_vulkan_context->GetAllocator(), &ici, &aci, &image, &allocation, nullptr);
VkResult res = vmaCreateImage(GSDeviceVK::GetInstance()->GetAllocator(), &ici, &aci, &image, &allocation, nullptr);
if (aci.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT && res != VK_SUCCESS)
{
// try without dedicated allocation
aci.flags &= ~VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
res = vmaCreateImage(g_vulkan_context->GetAllocator(), &ici, &aci, &image, &allocation, nullptr);
res = vmaCreateImage(GSDeviceVK::GetInstance()->GetAllocator(), &ici, &aci, &image, &allocation, nullptr);
}
if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY)
{
@ -169,30 +167,30 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, Format format, int w
VkImageView view = VK_NULL_HANDLE;
vci.image = image;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &vci, nullptr, &view);
res = vkCreateImageView(GSDeviceVK::GetInstance()->GetDevice(), &vci, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vmaDestroyImage(g_vulkan_context->GetAllocator(), image, allocation);
vmaDestroyImage(GSDeviceVK::GetInstance()->GetAllocator(), image, allocation);
return {};
}
switch (type)
{
case Type::Texture:
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), image, "%dx%d texture", width, height);
Vulkan::SetObjectName(GSDeviceVK::GetInstance()->GetDevice(), image, "%dx%d texture", width, height);
break;
case Type::RenderTarget:
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), image, "%dx%d render target", width, height);
Vulkan::SetObjectName(GSDeviceVK::GetInstance()->GetDevice(), image, "%dx%d render target", width, height);
break;
case Type::DepthStencil:
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), image, "%dx%d depth stencil", width, height);
Vulkan::SetObjectName(GSDeviceVK::GetInstance()->GetDevice(), image, "%dx%d depth stencil", width, height);
break;
case Type::RWTexture:
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), image, "%dx%d RW texture", width, height);
Vulkan::SetObjectName(GSDeviceVK::GetInstance()->GetDevice(), image, "%dx%d RW texture", width, height);
break;
default:
@ -215,7 +213,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Adopt(
// Memory is managed by the owner of the image.
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
VkResult res = vkCreateImageView(GSDeviceVK::GetInstance()->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
@ -248,9 +246,9 @@ void GSTextureVK::Destroy(bool defer)
}
if (defer)
g_vulkan_context->DeferFramebufferDestruction(fb);
GSDeviceVK::GetInstance()->DeferFramebufferDestruction(fb);
else
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
vkDestroyFramebuffer(GSDeviceVK::GetInstance()->GetDevice(), fb, nullptr);
}
m_framebuffers.clear();
}
@ -258,9 +256,9 @@ void GSTextureVK::Destroy(bool defer)
if (m_view != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageViewDestruction(m_view);
GSDeviceVK::GetInstance()->DeferImageViewDestruction(m_view);
else
vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr);
vkDestroyImageView(GSDeviceVK::GetInstance()->GetDevice(), m_view, nullptr);
m_view = VK_NULL_HANDLE;
}
@ -268,9 +266,9 @@ void GSTextureVK::Destroy(bool defer)
if (m_allocation != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageDestruction(m_image, m_allocation);
GSDeviceVK::GetInstance()->DeferImageDestruction(m_image, m_allocation);
else
vmaDestroyImage(g_vulkan_context->GetAllocator(), m_image, m_allocation);
vmaDestroyImage(GSDeviceVK::GetInstance()->GetAllocator(), m_image, m_allocation);
m_image = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
}
@ -288,14 +286,14 @@ void* GSTextureVK::GetNativeHandle() const
VkCommandBuffer GSTextureVK::GetCommandBufferForUpdate()
{
if (m_type != Type::Texture || m_use_fence_counter == g_vulkan_context->GetCurrentFenceCounter())
if (m_type != Type::Texture || m_use_fence_counter == GSDeviceVK::GetInstance()->GetCurrentFenceCounter())
{
// Console.WriteLn("Texture update within frame, can't use do beforehand");
GSDeviceVK::GetInstance()->EndRenderPass();
return g_vulkan_context->GetCurrentCommandBuffer();
return GSDeviceVK::GetInstance()->GetCurrentCommandBuffer();
}
return g_vulkan_context->GetCurrentInitCommandBuffer();
return GSDeviceVK::GetInstance()->GetCurrentInitCommandBuffer();
}
void GSTextureVK::CopyTextureDataForUpload(void* dst, const void* src, u32 pitch, u32 upload_pitch, u32 height) const
@ -321,7 +319,7 @@ VkBuffer GSTextureVK::AllocateUploadStagingBuffer(const void* data, u32 pitch, u
VmaAllocationInfo ai;
VkBuffer buffer;
VmaAllocation allocation;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &buffer, &allocation, &ai);
VkResult res = vmaCreateBuffer(GSDeviceVK::GetInstance()->GetAllocator(), &bci, &aci, &buffer, &allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "(AllocateUploadStagingBuffer) vmaCreateBuffer() failed: ");
@ -329,11 +327,11 @@ VkBuffer GSTextureVK::AllocateUploadStagingBuffer(const void* data, u32 pitch, u
}
// Immediately queue it for freeing after the command buffer finishes, since it's only needed for the copy.
g_vulkan_context->DeferBufferDestruction(buffer, allocation);
GSDeviceVK::GetInstance()->DeferBufferDestruction(buffer, allocation);
// And write the data.
CopyTextureDataForUpload(ai.pMappedData, data, pitch, upload_pitch, height);
vmaFlushAllocation(g_vulkan_context->GetAllocator(), allocation, 0, size);
vmaFlushAllocation(GSDeviceVK::GetInstance()->GetAllocator(), allocation, 0, size);
return buffer;
}
@ -363,14 +361,14 @@ bool GSTextureVK::Update(const GSVector4i& r, const void* data, int pitch, int l
const u32 width = r.width();
const u32 height = r.height();
const u32 upload_pitch = Common::AlignUpPow2(pitch, g_vulkan_context->GetBufferCopyRowPitchAlignment());
const u32 upload_pitch = Common::AlignUpPow2(pitch, GSDeviceVK::GetInstance()->GetBufferCopyRowPitchAlignment());
const u32 required_size = CalcUploadSize(height, upload_pitch);
// If the texture is larger than half our streaming buffer size, use a separate buffer.
// Otherwise allocation will either fail, or require lots of cmdbuffer submissions.
VkBuffer buffer;
u32 buffer_offset;
if (required_size > (g_vulkan_context->GetTextureUploadBuffer().GetCurrentSize() / 2))
if (required_size > (GSDeviceVK::GetInstance()->GetTextureUploadBuffer().GetCurrentSize() / 2))
{
buffer_offset = 0;
buffer = AllocateUploadStagingBuffer(data, pitch, upload_pitch, height);
@ -379,12 +377,12 @@ bool GSTextureVK::Update(const GSVector4i& r, const void* data, int pitch, int l
}
else
{
VKStreamBuffer& sbuffer = g_vulkan_context->GetTextureUploadBuffer();
if (!sbuffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
VKStreamBuffer& sbuffer = GSDeviceVK::GetInstance()->GetTextureUploadBuffer();
if (!sbuffer.ReserveMemory(required_size, GSDeviceVK::GetInstance()->GetBufferCopyOffsetAlignment()))
{
GSDeviceVK::GetInstance()->ExecuteCommandBuffer(
false, "While waiting for %u bytes in texture upload buffer", required_size);
if (!sbuffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
if (!sbuffer.ReserveMemory(required_size, GSDeviceVK::GetInstance()->GetBufferCopyOffsetAlignment()))
{
Console.Error("Failed to reserve texture upload memory (%u bytes).", required_size);
return false;
@ -433,19 +431,19 @@ bool GSTextureVK::Map(GSMap& m, const GSVector4i* r, int layer)
m_map_level = layer;
m.pitch =
Common::AlignUpPow2(CalcUploadPitch(m_map_area.width()), g_vulkan_context->GetBufferCopyRowPitchAlignment());
Common::AlignUpPow2(CalcUploadPitch(m_map_area.width()), GSDeviceVK::GetInstance()->GetBufferCopyRowPitchAlignment());
// see note in Update() for the reason why.
const u32 required_size = CalcUploadSize(m_map_area.height(), m.pitch);
VKStreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
VKStreamBuffer& buffer = GSDeviceVK::GetInstance()->GetTextureUploadBuffer();
if (required_size >= (buffer.GetCurrentSize() / 2))
return false;
if (!buffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
if (!buffer.ReserveMemory(required_size, GSDeviceVK::GetInstance()->GetBufferCopyOffsetAlignment()))
{
GSDeviceVK::GetInstance()->ExecuteCommandBuffer(
false, "While waiting for %u bytes in texture upload buffer", required_size);
if (!buffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
if (!buffer.ReserveMemory(required_size, GSDeviceVK::GetInstance()->GetBufferCopyOffsetAlignment()))
pxFailRel("Failed to reserve texture upload memory");
}
@ -461,9 +459,9 @@ void GSTextureVK::Unmap()
const u32 width = m_map_area.width();
const u32 height = m_map_area.height();
const u32 pitch = Common::AlignUpPow2(CalcUploadPitch(width), g_vulkan_context->GetBufferCopyRowPitchAlignment());
const u32 pitch = Common::AlignUpPow2(CalcUploadPitch(width), GSDeviceVK::GetInstance()->GetBufferCopyRowPitchAlignment());
const u32 required_size = CalcUploadSize(height, pitch);
VKStreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
VKStreamBuffer& buffer = GSDeviceVK::GetInstance()->GetTextureUploadBuffer();
const u32 buffer_offset = buffer.GetCurrentOffset();
buffer.CommitMemory(required_size);
@ -545,7 +543,7 @@ void GSTextureVK::CommitClear()
GSDeviceVK::GetInstance()->EndRenderPass();
CommitClear(g_vulkan_context->GetCurrentCommandBuffer());
CommitClear(GSDeviceVK::GetInstance()->GetCurrentCommandBuffer());
}
void GSTextureVK::CommitClear(VkCommandBuffer cmdbuf)
@ -576,7 +574,7 @@ void GSTextureVK::OverrideImageLayout(Layout new_layout)
void GSTextureVK::TransitionToLayout(Layout layout)
{
TransitionToLayout(g_vulkan_context->GetCurrentCommandBuffer(), layout);
TransitionToLayout(GSDeviceVK::GetInstance()->GetCurrentCommandBuffer(), layout);
}
void GSTextureVK::TransitionToLayout(VkCommandBuffer command_buffer, Layout new_layout)
@ -788,7 +786,7 @@ VkFramebuffer GSTextureVK::GetLinkedFramebuffer(GSTextureVK* depth_texture, bool
return fb;
}
const VkRenderPass rp = g_vulkan_context->GetRenderPass(
const VkRenderPass rp = GSDeviceVK::GetInstance()->GetRenderPass(
(m_type != GSTexture::Type::DepthStencil) ? m_vk_format : VK_FORMAT_UNDEFINED,
(m_type != GSTexture::Type::DepthStencil) ? (depth_texture ? depth_texture->m_vk_format : VK_FORMAT_UNDEFINED) :
m_vk_format,
@ -804,7 +802,7 @@ VkFramebuffer GSTextureVK::GetLinkedFramebuffer(GSTextureVK* depth_texture, bool
fbb.SetSize(m_size.x, m_size.y, 1);
fbb.SetRenderPass(rp);
VkFramebuffer fb = fbb.Create(g_vulkan_context->GetDevice());
VkFramebuffer fb = fbb.Create(GSDeviceVK::GetInstance()->GetDevice());
if (!fb)
return VK_NULL_HANDLE;
@ -823,12 +821,12 @@ GSDownloadTextureVK::~GSDownloadTextureVK()
{
// Buffer was created mapped, no need to manually unmap.
if (m_buffer != VK_NULL_HANDLE)
g_vulkan_context->DeferBufferDestruction(m_buffer, m_allocation);
GSDeviceVK::GetInstance()->DeferBufferDestruction(m_buffer, m_allocation);
}
std::unique_ptr<GSDownloadTextureVK> GSDownloadTextureVK::Create(u32 width, u32 height, GSTexture::Format format)
{
const u32 buffer_size = GetBufferSize(width, height, format, g_vulkan_context->GetBufferCopyRowPitchAlignment());
const u32 buffer_size = GetBufferSize(width, height, format, GSDeviceVK::GetInstance()->GetBufferCopyRowPitchAlignment());
const VkBufferCreateInfo bci = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, nullptr, 0u, buffer_size,
VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_SHARING_MODE_EXCLUSIVE, 0u, nullptr};
@ -841,7 +839,7 @@ std::unique_ptr<GSDownloadTextureVK> GSDownloadTextureVK::Create(u32 width, u32
VmaAllocationInfo ai = {};
VmaAllocation allocation;
VkBuffer buffer;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &buffer, &allocation, &ai);
VkResult res = vmaCreateBuffer(GSDeviceVK::GetInstance()->GetAllocator(), &bci, &aci, &buffer, &allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vmaCreateBuffer() failed: ");
@ -871,14 +869,14 @@ void GSDownloadTextureVK::CopyFromTexture(
u32 copy_offset, copy_size, copy_rows;
m_current_pitch = GetTransferPitch(use_transfer_pitch ? static_cast<u32>(drc.width()) : m_width,
g_vulkan_context->GetBufferCopyRowPitchAlignment());
GSDeviceVK::GetInstance()->GetBufferCopyRowPitchAlignment());
GetTransferSize(drc, &copy_offset, &copy_size, &copy_rows);
g_perfmon.Put(GSPerfMon::Readbacks, 1);
GSDeviceVK::GetInstance()->EndRenderPass();
vkTex->CommitClear();
const VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer();
const VkCommandBuffer cmdbuf = GSDeviceVK::GetInstance()->GetCurrentCommandBuffer();
GL_INS("GSDownloadTextureVK::CopyFromTexture: {%d,%d} %ux%u", src.left, src.top, src.width(), src.height());
GSTextureVK::Layout old_layout = vkTex->GetLayout();
@ -898,13 +896,24 @@ void GSDownloadTextureVK::CopyFromTexture(
vkCmdCopyImageToBuffer(cmdbuf, vkTex->GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer, 1, &image_copy);
// flush gpu cache
Vulkan::BufferMemoryBarrier(cmdbuf, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, 0, copy_size,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_HOST_BIT);
const VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_buffer, // VkBuffer buffer
0, // VkDeviceSize offset
copy_size // VkDeviceSize size
};
vkCmdPipelineBarrier(
cmdbuf, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
if (old_layout != GSTextureVK::Layout::TransferSrc)
vkTex->TransitionSubresourcesToLayout(cmdbuf, src_level, 1, GSTextureVK::Layout::TransferSrc, old_layout);
m_copy_fence_counter = g_vulkan_context->GetCurrentFenceCounter();
m_copy_fence_counter = GSDeviceVK::GetInstance()->GetCurrentFenceCounter();
m_needs_cache_invalidate = true;
m_needs_flush = true;
}
@ -916,7 +925,7 @@ bool GSDownloadTextureVK::Map(const GSVector4i& read_rc)
{
u32 copy_offset, copy_size, copy_rows;
GetTransferSize(read_rc, &copy_offset, &copy_size, &copy_rows);
vmaInvalidateAllocation(g_vulkan_context->GetAllocator(), m_allocation, copy_offset, copy_size);
vmaInvalidateAllocation(GSDeviceVK::GetInstance()->GetAllocator(), m_allocation, copy_offset, copy_size);
m_needs_cache_invalidate = false;
}
@ -935,12 +944,12 @@ void GSDownloadTextureVK::Flush()
m_needs_flush = false;
if (g_vulkan_context->GetCompletedFenceCounter() >= m_copy_fence_counter)
if (GSDeviceVK::GetInstance()->GetCompletedFenceCounter() >= m_copy_fence_counter)
return;
// Need to execute command buffer.
if (g_vulkan_context->GetCurrentFenceCounter() == m_copy_fence_counter)
if (GSDeviceVK::GetInstance()->GetCurrentFenceCounter() == m_copy_fence_counter)
GSDeviceVK::GetInstance()->ExecuteCommandBufferForReadback();
else
g_vulkan_context->WaitForFenceCounter(m_copy_fence_counter);
GSDeviceVK::GetInstance()->WaitForFenceCounter(m_copy_fence_counter);
}

View File

@ -17,7 +17,6 @@
#include "GS/GS.h"
#include "GS/Renderers/Common/GSTexture.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKLoader.h"
#include <limits>
@ -86,7 +85,7 @@ public:
VkFramebuffer GetLinkedFramebuffer(GSTextureVK* depth_texture, bool feedback_loop);
// Call when the texture is bound to the pipeline, or read from in a copy.
__fi void SetUsedThisCommandBuffer() { m_use_fence_counter = g_vulkan_context->GetCurrentFenceCounter(); }
__fi void SetUseFenceCounter(u64 counter) { m_use_fence_counter = counter; }
private:
GSTextureVK(Type type, Format format, int width, int height, int levels, VkImage image, VmaAllocation allocation,

View File

@ -16,12 +16,117 @@
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "common/Assertions.h"
#include <limits>
void Vulkan::AddPointerToChain(void* head, const void* ptr)
{
VkBaseInStructure* last_st = static_cast<VkBaseInStructure*>(head);
while (last_st->pNext)
{
if (last_st->pNext == ptr)
return;
last_st = const_cast<VkBaseInStructure*>(last_st->pNext);
}
last_st->pNext = static_cast<const VkBaseInStructure*>(ptr);
}
const char* Vulkan::VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
void Vulkan::LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Console.Error("(%s) %s (%d: %s)", func_name, real_msg.c_str(), static_cast<int>(res), VkResultToString(res));
}
Vulkan::DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder()
{
Clear();

View File

@ -17,10 +17,26 @@
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "common/StringUtil.h"
#include <array>
#include <cstdarg>
#include <string_view>
#ifdef _DEBUG
#define ENABLE_VULKAN_DEBUG_OBJECTS 1
#endif
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::LogVulkanResult(__func__, res, __VA_ARGS__)
namespace Vulkan
{
// Adds a structure to a chain.
void AddPointerToChain(void* head, const void* ptr);
const char* VkResultToString(VkResult res);
void LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...);
class DescriptorSetLayoutBuilder
{
public:
@ -329,4 +345,72 @@ namespace Vulkan
VkBufferViewCreateInfo m_ci;
};
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
// Provides a compile-time mapping between a Vulkan-type into its matching VkObjectType
template <typename T>
struct VkObjectTypeMap;
// clang-format off
template<> struct VkObjectTypeMap<VkInstance > { using type = VkInstance; static constexpr VkObjectType value = VK_OBJECT_TYPE_INSTANCE; };
template<> struct VkObjectTypeMap<VkPhysicalDevice > { using type = VkPhysicalDevice; static constexpr VkObjectType value = VK_OBJECT_TYPE_PHYSICAL_DEVICE; };
template<> struct VkObjectTypeMap<VkDevice > { using type = VkDevice; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE; };
template<> struct VkObjectTypeMap<VkQueue > { using type = VkQueue; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUEUE; };
template<> struct VkObjectTypeMap<VkSemaphore > { using type = VkSemaphore; static constexpr VkObjectType value = VK_OBJECT_TYPE_SEMAPHORE; };
template<> struct VkObjectTypeMap<VkCommandBuffer > { using type = VkCommandBuffer; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_BUFFER; };
template<> struct VkObjectTypeMap<VkFence > { using type = VkFence; static constexpr VkObjectType value = VK_OBJECT_TYPE_FENCE; };
template<> struct VkObjectTypeMap<VkDeviceMemory > { using type = VkDeviceMemory; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE_MEMORY; };
template<> struct VkObjectTypeMap<VkBuffer > { using type = VkBuffer; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER; };
template<> struct VkObjectTypeMap<VkImage > { using type = VkImage; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE; };
template<> struct VkObjectTypeMap<VkEvent > { using type = VkEvent; static constexpr VkObjectType value = VK_OBJECT_TYPE_EVENT; };
template<> struct VkObjectTypeMap<VkQueryPool > { using type = VkQueryPool; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUERY_POOL; };
template<> struct VkObjectTypeMap<VkBufferView > { using type = VkBufferView; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER_VIEW; };
template<> struct VkObjectTypeMap<VkImageView > { using type = VkImageView; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE_VIEW; };
template<> struct VkObjectTypeMap<VkShaderModule > { using type = VkShaderModule; static constexpr VkObjectType value = VK_OBJECT_TYPE_SHADER_MODULE; };
template<> struct VkObjectTypeMap<VkPipelineCache > { using type = VkPipelineCache; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_CACHE; };
template<> struct VkObjectTypeMap<VkPipelineLayout > { using type = VkPipelineLayout; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_LAYOUT; };
template<> struct VkObjectTypeMap<VkRenderPass > { using type = VkRenderPass; static constexpr VkObjectType value = VK_OBJECT_TYPE_RENDER_PASS; };
template<> struct VkObjectTypeMap<VkPipeline > { using type = VkPipeline; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE; };
template<> struct VkObjectTypeMap<VkDescriptorSetLayout > { using type = VkDescriptorSetLayout; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT; };
template<> struct VkObjectTypeMap<VkSampler > { using type = VkSampler; static constexpr VkObjectType value = VK_OBJECT_TYPE_SAMPLER; };
template<> struct VkObjectTypeMap<VkDescriptorPool > { using type = VkDescriptorPool; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorSet > { using type = VkDescriptorSet; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET; };
template<> struct VkObjectTypeMap<VkFramebuffer > { using type = VkFramebuffer; static constexpr VkObjectType value = VK_OBJECT_TYPE_FRAMEBUFFER; };
template<> struct VkObjectTypeMap<VkCommandPool > { using type = VkCommandPool; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorUpdateTemplate> { using type = VkDescriptorUpdateTemplate; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE; };
template<> struct VkObjectTypeMap<VkSurfaceKHR > { using type = VkSurfaceKHR; static constexpr VkObjectType value = VK_OBJECT_TYPE_SURFACE_KHR; };
template<> struct VkObjectTypeMap<VkSwapchainKHR > { using type = VkSwapchainKHR; static constexpr VkObjectType value = VK_OBJECT_TYPE_SWAPCHAIN_KHR; };
template<> struct VkObjectTypeMap<VkDebugUtilsMessengerEXT > { using type = VkDebugUtilsMessengerEXT; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT; };
// clang-format on
#endif
static inline void SetObjectName(
VkDevice device, void* object_handle, VkObjectType object_type, const char* format, va_list ap)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkSetDebugUtilsObjectNameEXT)
{
return;
}
const std::string str(StringUtil::StdStringFromFormatV(format, ap));
const VkDebugUtilsObjectNameInfoEXT nameInfo{VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, nullptr,
object_type, reinterpret_cast<uint64_t>(object_handle), str.c_str()};
vkSetDebugUtilsObjectNameEXT(device, &nameInfo);
#endif
}
template <typename T>
static inline void SetObjectName(VkDevice device, T object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
va_start(ap, format);
SetObjectName(device, reinterpret_cast<void*>((typename VkObjectTypeMap<T>::type)object_handle),
VkObjectTypeMap<T>::value, format, ap);
va_end(ap);
#endif
}
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -1,405 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "GS/Renderers/Vulkan/VKStreamBuffer.h"
#include "common/ReadbackSpinManager.h"
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
struct WindowInfo;
class VKSwapChain;
class VKContext
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 3,
TEXTURE_BUFFER_SIZE = 64 * 1024 * 1024,
};
struct OptionalExtensions
{
bool vk_ext_provoking_vertex : 1;
bool vk_ext_memory_budget : 1;
bool vk_ext_calibrated_timestamps : 1;
bool vk_ext_line_rasterization : 1;
bool vk_ext_rasterization_order_attachment_access : 1;
bool vk_ext_attachment_feedback_loop_layout : 1;
bool vk_ext_full_screen_exclusive : 1;
bool vk_khr_driver_properties : 1;
bool vk_khr_fragment_shader_barycentric : 1;
bool vk_khr_shader_draw_parameters : 1;
};
~VKContext();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(
const WindowInfo& wi, bool enable_debug_utils, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<std::pair<VkPhysicalDevice, std::string>>;
static GPUList EnumerateGPUs(VkInstance instance);
// Creates a new context and sets it up as global.
static bool Create(VkInstance instance, VkSurfaceKHR surface, VkPhysicalDevice physical_device,
bool threaded_presentation, bool enable_debug_utils, bool enable_validation_layer);
// Destroys context.
static void Destroy();
// Enable/disable debug message runtime.
bool EnableDebugUtils();
void DisableDebugUtils();
// Global state accessors
__fi VkInstance GetVulkanInstance() const { return m_instance; }
__fi VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
__fi VkDevice GetDevice() const { return m_device; }
__fi VmaAllocator GetAllocator() const { return m_allocator; }
__fi VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
__fi u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
__fi VkQueue GetPresentQueue() const { return m_present_queue; }
__fi u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
__fi const VkQueueFamilyProperties& GetGraphicsQueueProperties() const { return m_graphics_queue_properties; }
__fi const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
__fi const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
__fi const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
__fi const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
__fi const VkPhysicalDeviceDriverProperties& GetDeviceDriverProperties() const { return m_device_driver_properties; }
__fi const OptionalExtensions& GetOptionalExtensions() const { return m_optional_extensions; }
// The interaction between raster order attachment access and fbfetch is unclear.
__fi bool UseFeedbackLoopLayout() const
{
return (m_optional_extensions.vk_ext_attachment_feedback_loop_layout &&
!m_optional_extensions.vk_ext_rasterization_order_attachment_access);
}
// Helpers for getting constants
__fi u32 GetUniformBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minUniformBufferOffsetAlignment);
}
__fi u32 GetTexelBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minTexelBufferOffsetAlignment);
}
__fi u32 GetStorageBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minStorageBufferOffsetAlignment);
}
__fi u32 GetBufferImageGranularity() const
{
return static_cast<u32>(m_device_properties.limits.bufferImageGranularity);
}
__fi u32 GetBufferCopyOffsetAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment);
}
__fi u32 GetBufferCopyRowPitchAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment);
}
__fi u32 GetMaxImageDimension2D() const
{
return m_device_properties.limits.maxImageDimension2D;
}
/// Returns true if running on an NVIDIA GPU.
__fi bool IsDeviceNVIDIA() const { return (m_device_properties.vendorID == 0x10DE); }
// Creates a simple render pass.
VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format,
VkAttachmentLoadOp color_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp color_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp depth_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp depth_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp stencil_load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VkAttachmentStoreOp stencil_store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE,
bool color_feedback_loop = false, bool depth_sampling = false);
// Gets a non-clearing version of the specified render pass. Slow, don't call in hot path.
VkRenderPass GetRenderPassForRestarting(VkRenderPass pass);
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
__fi u32 GetCurrentCommandBufferIndex() const { return m_current_frame; }
__fi VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; }
__fi VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
__fi VKStreamBuffer& GetTextureUploadBuffer() { return m_texture_upload_buffer; }
__fi VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
VkCommandBuffer GetCurrentInitCommandBuffer();
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocatePersistentDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreeGlobalDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
__fi VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
void SubmitCommandBuffer(VKSwapChain* present_swap_chain = nullptr, bool submit_on_thread = false);
void MoveToNextCommandBuffer();
enum class WaitType
{
None,
Sleep,
Spin,
};
void ExecuteCommandBuffer(WaitType wait_for_completion);
void WaitForPresentComplete();
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
bool CheckLastSubmitFail();
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object);
void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageDestruction(VkImage object, VmaAllocation allocation);
void DeferImageViewDestruction(VkImageView object);
void DeferPipelineDestruction(VkPipeline pipeline);
void DeferSamplerDestruction(VkSampler sampler);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
float GetAndResetAccumulatedGPUTime();
bool SetEnableGPUTiming(bool enabled);
void CountRenderPass() { m_command_buffer_render_passes++; }
void NotifyOfReadback();
// Allocates a temporary CPU staging buffer, fires the callback with it to populate, then copies to a GPU buffer.
bool AllocatePreinitializedGPUBuffer(u32 size, VkBuffer* gpu_buffer, VmaAllocation* gpu_allocation,
VkBufferUsageFlags gpu_usage, const std::function<void(void*)>& fill_callback);
private:
VKContext(VkInstance instance, VkPhysicalDevice physical_device);
union RenderPassCacheKey
{
struct
{
u32 color_format : 8;
u32 depth_format : 8;
u32 color_load_op : 2;
u32 color_store_op : 1;
u32 depth_load_op : 2;
u32 depth_store_op : 1;
u32 stencil_load_op : 2;
u32 stencil_store_op : 1;
u32 color_feedback_loop : 1;
u32 depth_sampling : 1;
};
u32 key;
};
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(
ExtensionList* extension_list, const WindowInfo& wi, bool enable_debug_utils);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures(const VkPhysicalDeviceFeatures* required_features);
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer, const char** required_device_extensions,
u32 num_required_device_extensions, const char** required_device_layers, u32 num_required_device_layers,
const VkPhysicalDeviceFeatures* required_features);
void ProcessDeviceExtensions();
bool CreateAllocator();
void DestroyAllocator();
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateGlobalDescriptorPool();
void DestroyGlobalDescriptorPool();
bool CreateTextureStreamBuffer();
VkRenderPass CreateCachedRenderPass(RenderPassCacheKey key);
void DestroyRenderPassCache();
void CommandBufferCompleted(u32 index);
void ActivateCommandBuffer(u32 index);
void ScanForCommandBufferCompletion();
void WaitForCommandBufferCompletion(u32 index);
void DoSubmitCommandBuffer(u32 index, VKSwapChain* present_swap_chain, u32 spin_cycles);
void DoPresent(VKSwapChain* present_swap_chain);
void WaitForPresentComplete(std::unique_lock<std::mutex>& lock);
void PresentThread();
void StartPresentThread();
void StopPresentThread();
bool InitSpinResources();
void DestroySpinResources();
void WaitForSpinCompletion(u32 index);
void SpinCommandCompleted(u32 index);
void SubmitSpinCommand(u32 index, u32 cycles);
void CalibrateSpinTimestamp();
u64 GetCPUTimestamp();
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
std::array<VkCommandBuffer, 2> command_buffers{VK_NULL_HANDLE, VK_NULL_HANDLE};
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
s32 spin_id = -1;
u32 submit_timestamp = 0;
bool init_buffer_used = false;
bool needs_fence_wait = false;
bool timestamp_written = false;
std::vector<std::function<void()>> cleanup_resources;
};
struct SpinResources
{
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkSemaphore semaphore = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u32 cycles = 0;
bool in_progress = false;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VmaAllocator m_allocator = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
u32 m_present_queue_family_index = 0;
ReadbackSpinManager m_spin_manager;
VkQueue m_spin_queue = VK_NULL_HANDLE;
VkDescriptorSetLayout m_spin_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_spin_pipeline_layout = VK_NULL_HANDLE;
VkPipeline m_spin_pipeline = VK_NULL_HANDLE;
VkBuffer m_spin_buffer = VK_NULL_HANDLE;
VmaAllocation m_spin_buffer_allocation = VK_NULL_HANDLE;
VkDescriptorSet m_spin_descriptor_set = VK_NULL_HANDLE;
std::array<SpinResources, NUM_COMMAND_BUFFERS> m_spin_resources;
#ifdef _WIN32
double m_queryperfcounter_to_ns = 0;
#endif
double m_spin_timestamp_scale = 0;
double m_spin_timestamp_offset = 0;
u32 m_spin_queue_family_index = 0;
u32 m_command_buffer_render_passes = 0;
u32 m_spin_timer = 0;
bool m_spinning_supported = false;
bool m_spin_queue_is_graphics_queue = false;
bool m_spin_buffer_initialized = false;
VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
bool m_gpu_timing_supported = false;
bool m_wants_new_timestamp_calibration = false;
VkTimeDomainEXT m_calibrated_timestamp_type = VK_TIME_DOMAIN_DEVICE_EXT;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame = 0;
VKStreamBuffer m_texture_upload_buffer;
std::atomic_bool m_last_submit_failed{false};
std::atomic_bool m_last_present_failed{false};
std::atomic_bool m_present_done{true};
std::mutex m_present_mutex;
std::condition_variable m_present_queued_cv;
std::condition_variable m_present_done_cv;
std::thread m_present_thread;
std::atomic_bool m_present_thread_done{false};
struct QueuedPresent
{
VKSwapChain* swap_chain;
u32 command_buffer_index;
u32 spin_cycles;
};
QueuedPresent m_queued_present = {};
std::map<u32, VkRenderPass> m_render_pass_cache;
VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {};
OptionalExtensions m_optional_extensions = {};
};
extern std::unique_ptr<VKContext> g_vulkan_context;

View File

@ -15,7 +15,8 @@
#include "GS/Renderers/Vulkan/VKLoader.h"
#include <atomic>
#include "common/Assertions.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
@ -54,20 +55,19 @@ void Vulkan::ResetVulkanLibraryFunctionPointers()
#if defined(_WIN32)
static HMODULE vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
static HMODULE s_vulkan_module;
bool Vulkan::IsVulkanLibraryLoaded()
{
return s_vulkan_module != NULL;
}
bool Vulkan::LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
pxAssertRel(!s_vulkan_module, "Vulkan module is not loaded.");
vulkan_module = LoadLibraryA("vulkan-1.dll");
if (!vulkan_module)
s_vulkan_module = LoadLibraryA("vulkan-1.dll");
if (!s_vulkan_module)
{
std::fprintf(stderr, "Failed to load vulkan-1.dll\n");
return false;
@ -75,7 +75,7 @@ bool Vulkan::LoadVulkanLibrary()
bool required_functions_missing = false;
auto LoadFunction = [&](FARPROC* func_ptr, const char* name, bool is_required) {
*func_ptr = GetProcAddress(vulkan_module, name);
*func_ptr = GetProcAddress(s_vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
@ -90,45 +90,41 @@ bool Vulkan::LoadVulkanLibrary()
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
FreeLibrary(s_vulkan_module);
s_vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void Vulkan::UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
if (s_vulkan_module)
FreeLibrary(s_vulkan_module);
s_vulkan_module = nullptr;
}
#else
static void* vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
static void* s_vulkan_module;
bool Vulkan::IsVulkanLibraryLoaded()
{
return s_vulkan_module != nullptr;
}
bool Vulkan::LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
pxAssertRel(!s_vulkan_module, "Vulkan module is not loaded.");
#if defined(__APPLE__)
// Check if a path to a specific Vulkan library has been specified.
char* libvulkan_env = getenv("LIBVULKAN_PATH");
if (libvulkan_env)
vulkan_module = dlopen(libvulkan_env, RTLD_NOW);
if (!vulkan_module)
s_vulkan_module = dlopen(libvulkan_env, RTLD_NOW);
if (!s_vulkan_module)
{
unsigned path_size = 0;
_NSGetExecutablePath(nullptr, &path_size);
@ -143,24 +139,24 @@ bool Vulkan::LoadVulkanLibrary()
{
path.erase(pos);
path += "/../Frameworks/libMoltenVK.dylib";
vulkan_module = dlopen(path.c_str(), RTLD_NOW);
s_vulkan_module = dlopen(path.c_str(), RTLD_NOW);
}
}
}
if (!vulkan_module)
vulkan_module = dlopen("libvulkan.dylib", RTLD_NOW);
if (!s_vulkan_module)
s_vulkan_module = dlopen("libvulkan.dylib", RTLD_NOW);
#else
// Names of libraries to search. Desktop should use libvulkan.so.1 or libvulkan.so.
static const char* search_lib_names[] = {"libvulkan.so.1", "libvulkan.so"};
for (size_t i = 0; i < sizeof(search_lib_names) / sizeof(search_lib_names[0]); i++)
{
vulkan_module = dlopen(search_lib_names[i], RTLD_NOW);
if (vulkan_module)
s_vulkan_module = dlopen(search_lib_names[i], RTLD_NOW);
if (s_vulkan_module)
break;
}
#endif
if (!vulkan_module)
if (!s_vulkan_module)
{
std::fprintf(stderr, "Failed to load or locate libvulkan.so\n");
return false;
@ -168,7 +164,7 @@ bool Vulkan::LoadVulkanLibrary()
bool required_functions_missing = false;
auto LoadFunction = [&](void** func_ptr, const char* name, bool is_required) {
*func_ptr = dlsym(vulkan_module, name);
*func_ptr = dlsym(s_vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
@ -183,23 +179,20 @@ bool Vulkan::LoadVulkanLibrary()
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
dlclose(s_vulkan_module);
s_vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void Vulkan::UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
if (s_vulkan_module)
dlclose(s_vulkan_module);
s_vulkan_module = nullptr;
}
#endif

View File

@ -97,6 +97,7 @@
namespace Vulkan
{
bool IsVulkanLibraryLoaded();
bool LoadVulkanLibrary();
bool LoadVulkanInstanceFunctions(VkInstance instance);
bool LoadVulkanDeviceFunctions(VkDevice device);

View File

@ -16,8 +16,8 @@
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKShaderCache.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/GS.h"
#include "Config.h"
@ -81,21 +81,21 @@ static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
return false;
}
if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID)
if (header.vendor_id != GSDeviceVK::GetInstance()->GetDeviceProperties().vendorID)
{
Console.Error("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)",
header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID);
header.vendor_id, GSDeviceVK::GetInstance()->GetDeviceProperties().vendorID);
return false;
}
if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID)
if (header.device_id != GSDeviceVK::GetInstance()->GetDeviceProperties().deviceID)
{
Console.Error("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)",
header.device_id, g_vulkan_context->GetDeviceProperties().deviceID);
header.device_id, GSDeviceVK::GetInstance()->GetDeviceProperties().deviceID);
return false;
}
if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
if (std::memcmp(header.uuid, GSDeviceVK::GetInstance()->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
{
Console.Error("Pipeline cache failed validation: Incorrect UUID");
return false;
@ -108,9 +108,9 @@ static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
{
header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID;
header->device_id = g_vulkan_context->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
header->vendor_id = GSDeviceVK::GetInstance()->GetDeviceProperties().vendorID;
header->device_id = GSDeviceVK::GetInstance()->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, GSDeviceVK::GetInstance()->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
}
static unsigned s_next_bad_shader_id = 1;
@ -243,12 +243,11 @@ bool VKShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
source_length != key.source_length || shader_type != key.shader_type);
}
bool VKShaderCache::Create()
void VKShaderCache::Create()
{
pxAssert(!g_vulkan_shader_cache);
g_vulkan_shader_cache.reset(new VKShaderCache());
g_vulkan_shader_cache->Open();
return true;
}
void VKShaderCache::Destroy()
@ -433,7 +432,7 @@ bool VKShaderCache::CreateNewPipelineCache()
}
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
VkResult res = vkCreatePipelineCache(GSDeviceVK::GetInstance()->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
@ -463,7 +462,7 @@ bool VKShaderCache::ReadExistingPipelineCache()
const VkPipelineCacheCreateInfo ci{
VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(), data->data()};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
VkResult res = vkCreatePipelineCache(GSDeviceVK::GetInstance()->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
@ -479,7 +478,7 @@ bool VKShaderCache::FlushPipelineCache()
return false;
size_t data_size;
VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr);
VkResult res = vkGetPipelineCacheData(GSDeviceVK::GetInstance()->GetDevice(), m_pipeline_cache, &data_size, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
@ -487,7 +486,7 @@ bool VKShaderCache::FlushPipelineCache()
}
std::vector<u8> data(data_size);
res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data());
res = vkGetPipelineCacheData(GSDeviceVK::GetInstance()->GetDevice(), m_pipeline_cache, &data_size, data.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
@ -521,7 +520,7 @@ void VKShaderCache::ClosePipelineCache()
if (m_pipeline_cache == VK_NULL_HANDLE)
return;
vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr);
vkDestroyPipelineCache(GSDeviceVK::GetInstance()->GetDevice(), m_pipeline_cache, nullptr);
m_pipeline_cache = VK_NULL_HANDLE;
}
@ -596,7 +595,7 @@ VkShaderModule VKShaderCache::GetShaderModule(u32 type, std::string_view shader_
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0, spv->size() * sizeof(SPIRVCodeType), spv->data()};
VkShaderModule mod;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod);
VkResult res = vkCreateShaderModule(GSDeviceVK::GetInstance()->GetDevice(), &ci, nullptr, &mod);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: ");

View File

@ -32,7 +32,7 @@ class VKShaderCache
public:
~VKShaderCache();
static bool Create();
static void Create();
static void Destroy();
/// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally.

View File

@ -15,9 +15,9 @@
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/VKStreamBuffer.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "common/Align.h"
#include "common/Assertions.h"
@ -79,7 +79,7 @@ bool VKStreamBuffer::Create(VkBufferUsageFlags usage, u32 size)
VmaAllocationInfo ai = {};
VkBuffer new_buffer = VK_NULL_HANDLE;
VmaAllocation new_allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &new_buffer, &new_allocation, &ai);
VkResult res = vmaCreateBuffer(GSDeviceVK::GetInstance()->GetAllocator(), &bci, &aci, &new_buffer, &new_allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
@ -105,9 +105,9 @@ void VKStreamBuffer::Destroy(bool defer)
if (m_buffer != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferBufferDestruction(m_buffer, m_allocation);
GSDeviceVK::GetInstance()->DeferBufferDestruction(m_buffer, m_allocation);
else
vmaDestroyBuffer(g_vulkan_context->GetAllocator(), m_buffer, m_allocation);
vmaDestroyBuffer(GSDeviceVK::GetInstance()->GetAllocator(), m_buffer, m_allocation);
}
m_size = 0;
@ -194,7 +194,7 @@ void VKStreamBuffer::CommitMemory(u32 final_num_bytes)
pxAssert(final_num_bytes <= m_current_space);
// For non-coherent mappings, flush the memory range
vmaFlushAllocation(g_vulkan_context->GetAllocator(), m_allocation, m_current_offset, final_num_bytes);
vmaFlushAllocation(GSDeviceVK::GetInstance()->GetAllocator(), m_allocation, m_current_offset, final_num_bytes);
m_current_offset += final_num_bytes;
m_current_space -= final_num_bytes;
@ -204,7 +204,7 @@ void VKStreamBuffer::CommitMemory(u32 final_num_bytes)
void VKStreamBuffer::UpdateCurrentFencePosition()
{
// Has the offset changed since the last fence?
const u64 counter = g_vulkan_context->GetCurrentFenceCounter();
const u64 counter = GSDeviceVK::GetInstance()->GetCurrentFenceCounter();
if (!m_tracked_fences.empty() && m_tracked_fences.back().first == counter)
{
// Still haven't executed a command buffer, so just update the offset.
@ -221,7 +221,7 @@ void VKStreamBuffer::UpdateGPUPosition()
auto start = m_tracked_fences.begin();
auto end = start;
const u64 completed_counter = g_vulkan_context->GetCompletedFenceCounter();
const u64 completed_counter = GSDeviceVK::GetInstance()->GetCompletedFenceCounter();
while (end != m_tracked_fences.end() && completed_counter >= end->first)
{
m_current_gpu_position = end->second;
@ -308,11 +308,11 @@ bool VKStreamBuffer::WaitForClearSpace(u32 num_bytes)
// Did any fences satisfy this condition?
// Has the command buffer been executed yet? If not, the caller should execute it.
if (iter == m_tracked_fences.end() || iter->first == g_vulkan_context->GetCurrentFenceCounter())
if (iter == m_tracked_fences.end() || iter->first == GSDeviceVK::GetInstance()->GetCurrentFenceCounter())
return false;
// Wait until this fence is signaled. This will fire the callback, updating the GPU position.
g_vulkan_context->WaitForFenceCounter(iter->first);
GSDeviceVK::GetInstance()->WaitForFenceCounter(iter->first);
m_tracked_fences.erase(
m_tracked_fences.begin(), m_current_offset == iter->second ? m_tracked_fences.end() : ++iter);
m_current_offset = new_offset;

View File

@ -15,9 +15,9 @@
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/VKSwapChain.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "common/Assertions.h"
#include "common/CocoaTools.h"
@ -140,7 +140,7 @@ VkSurfaceKHR VKSwapChain::CreateVulkanSurface(VkInstance instance, VkPhysicalDev
void VKSwapChain::DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface)
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), surface, nullptr);
vkDestroySurfaceKHR(GSDeviceVK::GetInstance()->GetVulkanInstance(), surface, nullptr);
#if defined(__APPLE__)
if (wi->type == WindowInfo::Type::MacOS && wi->surface_handle)
@ -184,7 +184,7 @@ std::optional<VkSurfaceFormatKHR> VKSwapChain::SelectSurfaceFormat(VkSurfaceKHR
{
u32 format_count;
VkResult res =
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), surface, &format_count, nullptr);
vkGetPhysicalDeviceSurfaceFormatsKHR(GSDeviceVK::GetInstance()->GetPhysicalDevice(), surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
@ -193,7 +193,7 @@ std::optional<VkSurfaceFormatKHR> VKSwapChain::SelectSurfaceFormat(VkSurfaceKHR
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(
g_vulkan_context->GetPhysicalDevice(), surface, &format_count, surface_formats.data());
GSDeviceVK::GetInstance()->GetPhysicalDevice(), surface, &format_count, surface_formats.data());
pxAssert(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
@ -255,7 +255,7 @@ std::optional<VkPresentModeKHR> VKSwapChain::SelectPresentMode(VkSurfaceKHR surf
VkResult res;
u32 mode_count;
res =
vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), surface, &mode_count, nullptr);
vkGetPhysicalDeviceSurfacePresentModesKHR(GSDeviceVK::GetInstance()->GetPhysicalDevice(), surface, &mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
@ -264,7 +264,7 @@ std::optional<VkPresentModeKHR> VKSwapChain::SelectPresentMode(VkSurfaceKHR surf
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(
g_vulkan_context->GetPhysicalDevice(), surface, &mode_count, present_modes.data());
GSDeviceVK::GetInstance()->GetPhysicalDevice(), surface, &mode_count, present_modes.data());
pxAssert(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
@ -315,7 +315,7 @@ bool VKSwapChain::CreateSwapChain()
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities);
GSDeviceVK::GetInstance()->GetPhysicalDevice(), m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
@ -373,10 +373,10 @@ bool VKSwapChain::CreateSwapChain()
image_count, surface_format->format, surface_format->colorSpace, size, 1u, image_usage,
VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, transform, alpha, present_mode.value(), VK_TRUE, old_swap_chain};
std::array<uint32_t, 2> indices = {{
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
g_vulkan_context->GetPresentQueueFamilyIndex(),
GSDeviceVK::GetInstance()->GetGraphicsQueueFamilyIndex(),
GSDeviceVK::GetInstance()->GetPresentQueueFamilyIndex(),
}};
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex())
if (GSDeviceVK::GetInstance()->GetGraphicsQueueFamilyIndex() != GSDeviceVK::GetInstance()->GetPresentQueueFamilyIndex())
{
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swap_chain_info.queueFamilyIndexCount = 2;
@ -389,7 +389,7 @@ bool VKSwapChain::CreateSwapChain()
VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT};
if (m_exclusive_fullscreen_control.has_value())
{
if (g_vulkan_context->GetOptionalExtensions().vk_ext_full_screen_exclusive)
if (GSDeviceVK::GetInstance()->GetOptionalExtensions().vk_ext_full_screen_exclusive)
{
exclusive_info.fullScreenExclusive =
(m_exclusive_fullscreen_control.value() ? VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT :
@ -413,7 +413,7 @@ bool VKSwapChain::CreateSwapChain()
Console.Error("Exclusive fullscreen control requested, but is not supported on this platform.");
#endif
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
res = vkCreateSwapchainKHR(GSDeviceVK::GetInstance()->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
@ -423,7 +423,7 @@ bool VKSwapChain::CreateSwapChain()
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
vkDestroySwapchainKHR(GSDeviceVK::GetInstance()->GetDevice(), old_swap_chain, nullptr);
m_window_info.surface_width = std::max(1u, size.width);
m_window_info.surface_height = std::max(1u, size.height);
@ -431,7 +431,7 @@ bool VKSwapChain::CreateSwapChain()
// Get and create images.
pxAssert(m_images.empty());
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
res = vkGetSwapchainImagesKHR(GSDeviceVK::GetInstance()->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
@ -439,7 +439,7 @@ bool VKSwapChain::CreateSwapChain()
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data());
res = vkGetSwapchainImagesKHR(GSDeviceVK::GetInstance()->GetDevice(), m_swap_chain, &image_count, images.data());
pxAssert(res == VK_SUCCESS);
m_images.reserve(image_count);
@ -462,7 +462,7 @@ bool VKSwapChain::CreateSwapChain()
ImageSemaphores sema;
const VkSemaphoreCreateInfo semaphore_info = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.available_semaphore);
res = vkCreateSemaphore(GSDeviceVK::GetInstance()->GetDevice(), &semaphore_info, nullptr, &sema.available_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
@ -470,11 +470,11 @@ bool VKSwapChain::CreateSwapChain()
}
res = vkCreateSemaphore(
g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.rendering_finished_semaphore);
GSDeviceVK::GetInstance()->GetDevice(), &semaphore_info, nullptr, &sema.rendering_finished_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
vkDestroySemaphore(g_vulkan_context->GetDevice(), sema.available_semaphore, nullptr);
vkDestroySemaphore(GSDeviceVK::GetInstance()->GetDevice(), sema.available_semaphore, nullptr);
return false;
}
@ -494,8 +494,8 @@ void VKSwapChain::DestroySwapChainImages()
m_images.clear();
for (auto& it : m_semaphores)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.rendering_finished_semaphore, nullptr);
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.available_semaphore, nullptr);
vkDestroySemaphore(GSDeviceVK::GetInstance()->GetDevice(), it.rendering_finished_semaphore, nullptr);
vkDestroySemaphore(GSDeviceVK::GetInstance()->GetDevice(), it.available_semaphore, nullptr);
}
m_semaphores.clear();
@ -509,7 +509,7 @@ void VKSwapChain::DestroySwapChain()
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
vkDestroySwapchainKHR(GSDeviceVK::GetInstance()->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
m_window_info.surface_width = 0;
m_window_info.surface_height = 0;
@ -534,7 +534,7 @@ VkResult VKSwapChain::AcquireNextImage()
// Use a different semaphore for each image.
m_current_semaphore = (m_current_semaphore + 1) % static_cast<u32>(m_semaphores.size());
const VkResult res = vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX,
const VkResult res = vkAcquireNextImageKHR(GSDeviceVK::GetInstance()->GetDevice(), m_swap_chain, UINT64_MAX,
m_semaphores[m_current_semaphore].available_semaphore, VK_NULL_HANDLE, &m_current_image);
m_image_acquire_result = res;
return res;
@ -594,14 +594,14 @@ bool VKSwapChain::RecreateSurface(const WindowInfo& new_wi)
// Re-create the surface with the new native handle
m_window_info = new_wi;
m_surface = CreateVulkanSurface(
g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), &m_window_info);
GSDeviceVK::GetInstance()->GetVulkanInstance(), GSDeviceVK::GetInstance()->GetPhysicalDevice(), &m_window_info);
if (m_surface == VK_NULL_HANDLE)
return false;
// The validation layers get angry at us if we don't call this before creating the swapchain.
VkBool32 present_supported = VK_TRUE;
VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(),
g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(GSDeviceVK::GetInstance()->GetPhysicalDevice(),
GSDeviceVK::GetInstance()->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
@ -628,6 +628,6 @@ void VKSwapChain::DestroySurface()
if (m_surface == VK_NULL_HANDLE)
return;
DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, m_surface);
DestroyVulkanSurface(GSDeviceVK::GetInstance()->GetVulkanInstance(), &m_window_info, m_surface);
m_surface = VK_NULL_HANDLE;
}

View File

@ -1,239 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/StringUtil.h"
#include <cmath>
void Vulkan::SafeDestroyFramebuffer(VkFramebuffer& fb)
{
if (fb != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
fb = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyShaderModule(VkShaderModule& sm)
{
if (sm != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), sm, nullptr);
sm = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyPipeline(VkPipeline& p)
{
if (p != VK_NULL_HANDLE)
{
vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr);
p = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyPipelineLayout(VkPipelineLayout& pl)
{
if (pl != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr);
pl = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl)
{
if (dsl != VK_NULL_HANDLE)
{
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr);
dsl = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyBufferView(VkBufferView& bv)
{
if (bv != VK_NULL_HANDLE)
{
vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr);
bv = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyImageView(VkImageView& iv)
{
if (iv != VK_NULL_HANDLE)
{
vkDestroyImageView(g_vulkan_context->GetDevice(), iv, nullptr);
iv = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroySampler(VkSampler& samp)
{
if (samp != VK_NULL_HANDLE)
{
vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr);
samp = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroySemaphore(VkSemaphore& sem)
{
if (sem != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), sem, nullptr);
sem = VK_NULL_HANDLE;
}
}
void Vulkan::SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds)
{
if (ds != VK_NULL_HANDLE)
{
g_vulkan_context->FreeGlobalDescriptorSet(ds);
ds = VK_NULL_HANDLE;
}
}
void Vulkan::BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
}
void Vulkan::AddPointerToChain(void* head, const void* ptr)
{
VkBaseInStructure* last_st = static_cast<VkBaseInStructure*>(head);
while (last_st->pNext)
{
if (last_st->pNext == ptr)
return;
last_st = const_cast<VkBaseInStructure*>(last_st->pNext);
}
last_st->pNext = static_cast<const VkBaseInStructure*>(ptr);
}
const char* Vulkan::VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
void Vulkan::LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Console.Error("(%s) %s (%d: %s)", func_name, real_msg.c_str(), static_cast<int>(res), VkResultToString(res));
}

View File

@ -1,131 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "common/StringUtil.h"
#include <algorithm>
#include <array>
#include <cstdarg>
#include <string_view>
namespace Vulkan
{
// Safe destroy helpers
void SafeDestroyFramebuffer(VkFramebuffer& fb);
void SafeDestroyShaderModule(VkShaderModule& sm);
void SafeDestroyPipeline(VkPipeline& p);
void SafeDestroyPipelineLayout(VkPipelineLayout& pl);
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl);
void SafeDestroyBufferView(VkBufferView& bv);
void SafeDestroyImageView(VkImageView& iv);
void SafeDestroySampler(VkSampler& samp);
void SafeDestroySemaphore(VkSemaphore& sem);
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask);
// Adds a structure to a chain.
void AddPointerToChain(void* head, const void* ptr);
const char* VkResultToString(VkResult res);
void LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...) /*printflike(4, 5)*/;
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::LogVulkanResult(__func__, res, __VA_ARGS__)
#if defined(_DEBUG)
// We can't use the templates below because they're all the same type on 32-bit.
#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || \
defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
#define ENABLE_VULKAN_DEBUG_OBJECTS 1
#endif
#endif
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
// Provides a compile-time mapping between a Vulkan-type into its matching VkObjectType
template <typename T>
struct VkObjectTypeMap;
// clang-format off
template<> struct VkObjectTypeMap<VkInstance > { using type = VkInstance ; static constexpr VkObjectType value = VK_OBJECT_TYPE_INSTANCE; };
template<> struct VkObjectTypeMap<VkPhysicalDevice > { using type = VkPhysicalDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PHYSICAL_DEVICE; };
template<> struct VkObjectTypeMap<VkDevice > { using type = VkDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE; };
template<> struct VkObjectTypeMap<VkQueue > { using type = VkQueue ; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUEUE; };
template<> struct VkObjectTypeMap<VkSemaphore > { using type = VkSemaphore ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SEMAPHORE; };
template<> struct VkObjectTypeMap<VkCommandBuffer > { using type = VkCommandBuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_BUFFER; };
template<> struct VkObjectTypeMap<VkFence > { using type = VkFence ; static constexpr VkObjectType value = VK_OBJECT_TYPE_FENCE; };
template<> struct VkObjectTypeMap<VkDeviceMemory > { using type = VkDeviceMemory ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE_MEMORY; };
template<> struct VkObjectTypeMap<VkBuffer > { using type = VkBuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER; };
template<> struct VkObjectTypeMap<VkImage > { using type = VkImage ; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE; };
template<> struct VkObjectTypeMap<VkEvent > { using type = VkEvent ; static constexpr VkObjectType value = VK_OBJECT_TYPE_EVENT; };
template<> struct VkObjectTypeMap<VkQueryPool > { using type = VkQueryPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUERY_POOL; };
template<> struct VkObjectTypeMap<VkBufferView > { using type = VkBufferView ; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER_VIEW; };
template<> struct VkObjectTypeMap<VkImageView > { using type = VkImageView ; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE_VIEW; };
template<> struct VkObjectTypeMap<VkShaderModule > { using type = VkShaderModule ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SHADER_MODULE; };
template<> struct VkObjectTypeMap<VkPipelineCache > { using type = VkPipelineCache ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_CACHE; };
template<> struct VkObjectTypeMap<VkPipelineLayout > { using type = VkPipelineLayout ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_LAYOUT; };
template<> struct VkObjectTypeMap<VkRenderPass > { using type = VkRenderPass ; static constexpr VkObjectType value = VK_OBJECT_TYPE_RENDER_PASS; };
template<> struct VkObjectTypeMap<VkPipeline > { using type = VkPipeline ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE; };
template<> struct VkObjectTypeMap<VkDescriptorSetLayout > { using type = VkDescriptorSetLayout ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT; };
template<> struct VkObjectTypeMap<VkSampler > { using type = VkSampler ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SAMPLER; };
template<> struct VkObjectTypeMap<VkDescriptorPool > { using type = VkDescriptorPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorSet > { using type = VkDescriptorSet ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET; };
template<> struct VkObjectTypeMap<VkFramebuffer > { using type = VkFramebuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_FRAMEBUFFER; };
template<> struct VkObjectTypeMap<VkCommandPool > { using type = VkCommandPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorUpdateTemplate> { using type = VkDescriptorUpdateTemplate; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE; };
template<> struct VkObjectTypeMap<VkSurfaceKHR > { using type = VkSurfaceKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SURFACE_KHR; };
template<> struct VkObjectTypeMap<VkSwapchainKHR > { using type = VkSwapchainKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SWAPCHAIN_KHR; };
template<> struct VkObjectTypeMap<VkDebugUtilsMessengerEXT > { using type = VkDebugUtilsMessengerEXT ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT; };
// clang-format on
#endif
static inline void SetObjectName(
VkDevice device, void* object_handle, VkObjectType object_type, const char* format, va_list ap)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkSetDebugUtilsObjectNameEXT)
{
return;
}
const std::string str(StringUtil::StdStringFromFormatV(format, ap));
const VkDebugUtilsObjectNameInfoEXT nameInfo{VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, nullptr,
object_type, reinterpret_cast<uint64_t>(object_handle), str.c_str()};
vkSetDebugUtilsObjectNameEXT(device, &nameInfo);
#endif
}
template <typename T>
static inline void SetObjectName(VkDevice device, T object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
va_start(ap, format);
SetObjectName(device, reinterpret_cast<void*>((typename VkObjectTypeMap<T>::type)object_handle),
VkObjectTypeMap<T>::value, format, ap);
va_end(ap);
#endif
}
} // namespace Vulkan

View File

@ -192,12 +192,10 @@
<ClCompile Include="GS\Renderers\OpenGL\GLShaderCache.cpp" />
<ClCompile Include="GS\Renderers\OpenGL\GLStreamBuffer.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKBuilders.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKContext.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKLoader.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKShaderCache.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKStreamBuffer.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKSwapChain.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKUtil.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\vk_mem_alloc.cpp" />
<ClCompile Include="Hotkeys.cpp" />
<ClCompile Include="ImGui\FullscreenUI.cpp" />
@ -546,13 +544,11 @@
<ClInclude Include="GS\Renderers\OpenGL\GLShaderCache.h" />
<ClInclude Include="GS\Renderers\OpenGL\GLStreamBuffer.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKBuilders.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKContext.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKEntryPoints.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKLoader.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKShaderCache.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKStreamBuffer.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKSwapChain.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKUtil.h" />
<ClInclude Include="ImGui\FullscreenUI.h" />
<ClInclude Include="ImGui\ImGuiFullscreen.h" />
<ClInclude Include="ImGui\ImGuiManager.h" />

View File

@ -1310,9 +1310,6 @@
<ClCompile Include="GS\Renderers\Vulkan\VKBuilders.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKContext.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKLoader.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
@ -1325,9 +1322,6 @@
<ClCompile Include="GS\Renderers\Vulkan\VKSwapChain.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKUtil.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Dmac.cpp">
<Filter>System\Ps2\EmotionEngine\DMAC</Filter>
</ClCompile>
@ -2256,9 +2250,6 @@
<ClInclude Include="GS\Renderers\Vulkan\VKBuilders.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKContext.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKEntryPoints.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
@ -2274,9 +2265,6 @@
<ClInclude Include="GS\Renderers\Vulkan\VKSwapChain.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKUtil.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="ps2\pgif.h">
<Filter>System\Ps2\Iop</Filter>
</ClInclude>