Merge pull request #4270 from stenzek/vulkan-defer-fix

Vulkan: Miscellaneous minor fixes
This commit is contained in:
shuffle2 2016-10-04 01:47:59 -07:00 committed by GitHub
commit ea33405feb
24 changed files with 273 additions and 299 deletions

View File

@ -40,7 +40,7 @@ private:
void Readback(StateTracker* state_tracker);
VkBuffer m_gpu_buffer = VK_NULL_HANDLE;
VkDeviceMemory m_gpu_memory = nullptr;
VkDeviceMemory m_gpu_memory = VK_NULL_HANDLE;
static const size_t NUM_VALUES = 4;
static const size_t BUFFER_SIZE = sizeof(u32) * NUM_VALUES;

View File

@ -73,7 +73,7 @@ void CommandBufferManager::DestroyCommandPool()
if (m_command_pool)
{
vkDestroyCommandPool(g_vulkan_context->GetDevice(), m_command_pool, nullptr);
m_command_pool = nullptr;
m_command_pool = VK_NULL_HANDLE;
}
}
@ -141,8 +141,8 @@ void CommandBufferManager::DestroyCommandBuffers()
for (FrameResources& resources : m_frame_resources)
{
for (const auto& it : resources.cleanup_resources)
it.destroy_callback(device, it.object);
for (auto& it : resources.cleanup_resources)
it();
resources.cleanup_resources.clear();
if (resources.fence != VK_NULL_HANDLE)
@ -385,8 +385,8 @@ void CommandBufferManager::OnCommandBufferExecuted(size_t index)
iter.second.second(resources.fence);
// Clean up all objects pending destruction on this command buffer
for (const auto& it : resources.cleanup_resources)
it.destroy_callback(g_vulkan_context->GetDevice(), it.object);
for (auto& it : resources.cleanup_resources)
it();
resources.cleanup_resources.clear();
}
@ -446,6 +446,48 @@ void CommandBufferManager::ExecuteCommandBuffer(bool submit_off_thread, bool wai
WaitForFence(pending_fence);
}
void CommandBufferManager::DeferBufferDestruction(VkBuffer object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkDestroyBuffer(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::DeferBufferViewDestruction(VkBufferView object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkDestroyBufferView(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::DeferDeviceMemoryDestruction(VkDeviceMemory object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkFreeMemory(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::DeferFramebufferDestruction(VkFramebuffer object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkDestroyFramebuffer(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::DeferImageDestruction(VkImage object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkDestroyImage(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::DeferImageViewDestruction(VkImageView object)
{
FrameResources& resources = m_frame_resources[m_current_frame];
resources.cleanup_resources.push_back(
[object]() { vkDestroyImageView(g_vulkan_context->GetDevice(), object, nullptr); });
}
void CommandBufferManager::AddFencePointCallback(
const void* key, const CommandBufferQueuedCallback& queued_callback,
const CommandBufferExecutedCallback& executed_callback)

View File

@ -81,12 +81,12 @@ public:
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
template <typename T>
void DeferResourceDestruction(T object)
{
DeferredResourceDestruction wrapper = DeferredResourceDestruction::Wrapper<T>(object);
m_frame_resources[m_current_frame].cleanup_resources.push_back(wrapper);
}
void DeferBufferDestruction(VkBuffer object);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageViewDestruction(VkImageView object);
// Instruct the manager to fire the specified callback when a fence is flagged to be signaled.
// This happens when command buffers are executed, and can be tested if signaled, which means
@ -124,7 +124,7 @@ private:
bool init_command_buffer_used;
bool needs_fence_wait;
std::vector<DeferredResourceDestruction> cleanup_resources;
std::vector<std::function<void()>> cleanup_resources;
};
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources = {};

View File

@ -130,8 +130,8 @@ union SamplerState {
BitField<5, 2, VkSamplerAddressMode> wrap_v;
BitField<7, 8, u32> min_lod;
BitField<15, 8, u32> max_lod;
BitField<23, 6, s32> lod_bias; // tm0.lod_bias (8 bits) / 32 gives us 0-7.
BitField<29, 3, u32> anisotropy; // max_anisotropy = 1 << anisotropy, max of 16, so range 0-4.
BitField<23, 8, s32> lod_bias;
BitField<31, 1, u32> enable_anisotropic_filtering;
u32 bits;
};

View File

@ -46,6 +46,7 @@ FramebufferManager::~FramebufferManager()
DestroyReadbackRenderPasses();
DestroyPokeVertexBuffer();
DestroyPokeShaders();
}
bool FramebufferManager::Initialize()
@ -207,6 +208,12 @@ void FramebufferManager::DestroyEFBRenderPass()
m_efb_load_render_pass = VK_NULL_HANDLE;
}
if (m_efb_clear_render_pass != VK_NULL_HANDLE)
{
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_efb_clear_render_pass, nullptr);
m_efb_clear_render_pass = VK_NULL_HANDLE;
}
if (m_depth_resolve_render_pass != VK_NULL_HANDLE)
{
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_depth_resolve_render_pass, nullptr);
@ -352,6 +359,18 @@ void FramebufferManager::DestroyEFBFramebuffer()
m_efb_framebuffer = VK_NULL_HANDLE;
}
if (m_efb_convert_framebuffer != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), m_efb_convert_framebuffer, nullptr);
m_efb_convert_framebuffer = VK_NULL_HANDLE;
}
if (m_depth_resolve_framebuffer != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), m_depth_resolve_framebuffer, nullptr);
m_depth_resolve_framebuffer = VK_NULL_HANDLE;
}
m_efb_color_texture.reset();
m_efb_convert_color_texture.reset();
m_efb_depth_texture.reset();
@ -899,9 +918,15 @@ bool FramebufferManager::CreateReadbackRenderPasses()
void FramebufferManager::DestroyReadbackRenderPasses()
{
if (m_copy_color_render_pass != VK_NULL_HANDLE)
{
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_copy_color_render_pass, nullptr);
m_copy_color_render_pass = VK_NULL_HANDLE;
}
if (m_copy_depth_render_pass != VK_NULL_HANDLE)
{
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_copy_depth_render_pass, nullptr);
m_copy_depth_render_pass = VK_NULL_HANDLE;
}
}
bool FramebufferManager::CompileReadbackShaders()
@ -1238,6 +1263,7 @@ bool FramebufferManager::CreatePokeVertexBuffer()
void FramebufferManager::DestroyPokeVertexBuffer()
{
m_poke_vertex_stream_buffer.reset();
}
bool FramebufferManager::CompilePokeShaders()

View File

@ -784,11 +784,6 @@ VkSampler ObjectCache::GetSampler(const SamplerState& info)
if (iter != m_sampler_cache.end())
return iter->second;
// Cap anisotropy to device limits.
VkBool32 anisotropy_enable = (info.anisotropy != 0) ? VK_TRUE : VK_FALSE;
float max_anisotropy = std::min(static_cast<float>(1 << info.anisotropy),
g_vulkan_context->GetMaxSaxmplerAnisotropy());
VkSamplerCreateInfo create_info = {
VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
@ -799,17 +794,26 @@ VkSampler ObjectCache::GetSampler(const SamplerState& info)
info.wrap_u, // VkSamplerAddressMode addressModeU
info.wrap_v, // VkSamplerAddressMode addressModeV
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // VkSamplerAddressMode addressModeW
static_cast<float>(info.lod_bias.Value()), // float mipLodBias
anisotropy_enable, // VkBool32 anisotropyEnable
max_anisotropy, // float maxAnisotropy
static_cast<float>(info.lod_bias / 32.0f), // float mipLodBias
VK_FALSE, // VkBool32 anisotropyEnable
0.0f, // float maxAnisotropy
VK_FALSE, // VkBool32 compareEnable
VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp
static_cast<float>(info.min_lod.Value()), // float minLod
static_cast<float>(info.max_lod.Value()), // float maxLod
static_cast<float>(info.min_lod / 16.0f), // float minLod
static_cast<float>(info.max_lod / 16.0f), // float maxLod
VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, // VkBorderColor borderColor
VK_FALSE // VkBool32 unnormalizedCoordinates
};
// Can we use anisotropic filtering with this sampler?
if (info.enable_anisotropic_filtering && g_vulkan_context->SupportsAnisotropicFiltering())
{
// Cap anisotropy to device limits.
create_info.anisotropyEnable = VK_TRUE;
create_info.maxAnisotropy = std::min(static_cast<float>(1 << g_ActiveConfig.iMaxAnisotropy),
g_vulkan_context->GetMaxSamplerAnisotropy());
}
VkSampler sampler = VK_NULL_HANDLE;
VkResult res = vkCreateSampler(g_vulkan_context->GetDevice(), &create_info, nullptr, &sampler);
if (res != VK_SUCCESS)

View File

@ -41,6 +41,9 @@ PaletteTextureConverter::~PaletteTextureConverter()
if (m_palette_buffer_view != VK_NULL_HANDLE)
vkDestroyBufferView(g_vulkan_context->GetDevice(), m_palette_buffer_view, nullptr);
if (m_pipeline_layout != VK_NULL_HANDLE)
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), m_pipeline_layout, nullptr);
if (m_palette_set_layout != VK_NULL_HANDLE)
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), m_palette_set_layout, nullptr);
}

View File

@ -170,9 +170,9 @@ RasterFont::RasterFont()
RasterFont::~RasterFont()
{
if (m_vertex_shader != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_vertex_shader);
vkDestroyShaderModule(g_vulkan_context->GetDevice(), m_vertex_shader, nullptr);
if (m_fragment_shader != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_fragment_shader);
vkDestroyShaderModule(g_vulkan_context->GetDevice(), m_fragment_shader, nullptr);
}
bool RasterFont::Initialize()
@ -279,8 +279,8 @@ bool RasterFont::CreateTexture()
// Free temp buffers after command buffer executes
m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
g_command_buffer_mgr->DeferResourceDestruction(temp_buffer);
g_command_buffer_mgr->DeferResourceDestruction(temp_buffer_memory);
g_command_buffer_mgr->DeferBufferDestruction(temp_buffer);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(temp_buffer_memory);
return true;
}

View File

@ -22,6 +22,7 @@
#include "VideoBackends/Vulkan/StagingTexture2D.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/SwapChain.h"
#include "VideoBackends/Vulkan/TextureCache.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
@ -41,37 +42,39 @@
namespace Vulkan
{
Renderer::Renderer()
Renderer::Renderer(std::unique_ptr<SwapChain> swap_chain) : m_swap_chain(std::move(swap_chain))
{
g_Config.bRunning = true;
UpdateActiveConfig();
// Set to something invalid, forcing all states to be re-initialized.
for (size_t i = 0; i < m_sampler_states.size(); i++)
m_sampler_states[i].bits = std::numeric_limits<decltype(m_sampler_states[i].bits)>::max();
// Set default size so a decent EFB is created initially.
s_backbuffer_width = MAX_XFB_WIDTH;
s_backbuffer_height = MAX_XFB_HEIGHT;
// These have to be initialized before FramebufferManager is created.
// If running surfaceless, assume a window size of MAX_XFB_{WIDTH,HEIGHT}.
FramebufferManagerBase::SetLastXfbWidth(MAX_XFB_WIDTH);
FramebufferManagerBase::SetLastXfbHeight(MAX_XFB_HEIGHT);
PixelShaderManager::SetEfbScaleChanged();
s_backbuffer_width = m_swap_chain ? m_swap_chain->GetWidth() : MAX_XFB_WIDTH;
s_backbuffer_height = m_swap_chain ? m_swap_chain->GetHeight() : MAX_XFB_HEIGHT;
s_last_efb_scale = g_ActiveConfig.iEFBScale;
UpdateDrawRectangle(s_backbuffer_width, s_backbuffer_height);
CalculateTargetSize(s_backbuffer_width, s_backbuffer_height);
PixelShaderManager::SetEfbScaleChanged();
}
Renderer::~Renderer()
{
g_Config.bRunning = false;
UpdateActiveConfig();
DestroyScreenshotResources();
DestroyShaders();
DestroySemaphores();
}
bool Renderer::Initialize(FramebufferManager* framebuffer_mgr, void* window_handle,
VkSurfaceKHR surface)
bool Renderer::Initialize(FramebufferManager* framebuffer_mgr)
{
m_framebuffer_mgr = framebuffer_mgr;
g_Config.bRunning = true;
UpdateActiveConfig();
// Create state tracker, doesn't require any resources
m_state_tracker = std::make_unique<StateTracker>();
BindEFBToStateTracker();
@ -109,24 +112,6 @@ bool Renderer::Initialize(FramebufferManager* framebuffer_mgr, void* window_hand
m_bounding_box->GetGPUBufferSize());
}
// Initialize annoying statics
s_last_efb_scale = g_ActiveConfig.iEFBScale;
// Create swap chain
if (surface)
{
// Update backbuffer dimensions
m_swap_chain = SwapChain::Create(window_handle, surface);
if (!m_swap_chain)
{
PanicAlert("Failed to create swap chain.");
return false;
}
// Update render rectangle etc.
OnSwapChainResized();
}
// Various initialization routines will have executed commands on the command buffer.
// Execute what we have done before beginning the first frame.
g_command_buffer_mgr->PrepareToSubmitCommandBuffer();
@ -164,13 +149,13 @@ void Renderer::DestroySemaphores()
if (m_image_available_semaphore)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_image_available_semaphore, nullptr);
m_image_available_semaphore = nullptr;
m_image_available_semaphore = VK_NULL_HANDLE;
}
if (m_rendering_finished_semaphore)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_rendering_finished_semaphore, nullptr);
m_rendering_finished_semaphore = nullptr;
m_rendering_finished_semaphore = VK_NULL_HANDLE;
}
}
@ -491,9 +476,6 @@ void Renderer::SwapImpl(u32 xfb_addr, u32 fb_width, u32 fb_stride, u32 fb_height
// Scale the source rectangle to the selected internal resolution.
TargetRectangle source_rc = Renderer::ConvertEFBRectangle(rc);
// Target rectangle can change if the VI aspect ratio changes.
UpdateDrawRectangle(s_backbuffer_width, s_backbuffer_height);
// Transition the EFB render target to a shader resource.
VkRect2D src_region = {{0, 0},
{m_framebuffer_mgr->GetEFBWidth(), m_framebuffer_mgr->GetEFBHeight()}};
@ -519,6 +501,10 @@ void Renderer::SwapImpl(u32 xfb_addr, u32 fb_width, u32 fb_stride, u32 fb_height
StopFrameDump();
}
// Restore the EFB color texture to color attachment ready for rendering the next frame.
m_framebuffer_mgr->GetEFBColorTexture()->TransitionToLayout(
g_command_buffer_mgr->GetCurrentCommandBuffer(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
// Ensure the worker thread is not still submitting a previous command buffer.
// In other words, the last frame has been submitted (otherwise the next call would
// be a race, as the image may not have been consumed yet).
@ -543,22 +529,28 @@ void Renderer::SwapImpl(u32 xfb_addr, u32 fb_width, u32 fb_stride, u32 fb_height
g_command_buffer_mgr->SubmitCommandBuffer(true);
}
// NOTE: It is important that no rendering calls are made to the EFB between submitting the
// (now-previous) frame and after the below config checks are completed. If the target size
// changes, as the resize methods to not defer the destruction of the framebuffer, the current
// command buffer will contain references to a now non-existent framebuffer.
// Prep for the next frame (get command buffer ready) before doing anything else.
BeginFrame();
// Restore the EFB color texture to color attachment ready for rendering.
m_framebuffer_mgr->GetEFBColorTexture()->TransitionToLayout(
g_command_buffer_mgr->GetCurrentCommandBuffer(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
// Determine what (if anything) has changed in the config.
CheckForConfigChanges();
// Clean up stale textures
TextureCacheBase::Cleanup(frameCount);
// Handle window resizes.
CheckForTargetResize(fb_width, fb_stride, fb_height);
// Handle host window resizes.
CheckForSurfaceChange();
// Determine what has changed in the config.
CheckForConfigChanges();
// Handle output size changes from the guest.
// There is a downside to doing this here is that if the game changes its XFB source area,
// the changes will be delayed by one frame. For the moment it has to be done here because
// this can cause a target size change, which would result in a black frame if done earlier.
CheckForTargetResize(fb_width, fb_stride, fb_height);
// Clean up stale textures.
TextureCacheBase::Cleanup(frameCount);
}
void Renderer::DrawScreen(const TargetRectangle& src_rect, const Texture2D* src_tex)
@ -829,13 +821,23 @@ void Renderer::StopFrameDump()
void Renderer::CheckForTargetResize(u32 fb_width, u32 fb_stride, u32 fb_height)
{
if (FramebufferManagerBase::LastXfbWidth() != fb_stride ||
FramebufferManagerBase::LastXfbHeight() != fb_height)
if (FramebufferManagerBase::LastXfbWidth() == fb_stride &&
FramebufferManagerBase::LastXfbHeight() == fb_height)
{
u32 last_w = (fb_stride < 1 || fb_stride > MAX_XFB_WIDTH) ? MAX_XFB_WIDTH : fb_stride;
u32 last_h = (fb_height < 1 || fb_height > MAX_XFB_HEIGHT) ? MAX_XFB_HEIGHT : fb_height;
FramebufferManagerBase::SetLastXfbWidth(last_w);
FramebufferManagerBase::SetLastXfbHeight(last_h);
return;
}
u32 new_width = (fb_stride < 1 || fb_stride > MAX_XFB_WIDTH) ? MAX_XFB_WIDTH : fb_stride;
u32 new_height = (fb_height < 1 || fb_height > MAX_XFB_HEIGHT) ? MAX_XFB_HEIGHT : fb_height;
FramebufferManagerBase::SetLastXfbWidth(new_width);
FramebufferManagerBase::SetLastXfbHeight(new_height);
// Changing the XFB source area will likely change the final drawing rectangle.
UpdateDrawRectangle(s_backbuffer_width, s_backbuffer_height);
if (CalculateTargetSize(s_backbuffer_width, s_backbuffer_height))
{
PixelShaderManager::SetEfbScaleChanged();
ResizeEFBTextures();
}
// This call is needed for auto-resizing to work.
@ -850,9 +852,6 @@ void Renderer::CheckForSurfaceChange()
u32 old_width = m_swap_chain ? m_swap_chain->GetWidth() : 0;
u32 old_height = m_swap_chain ? m_swap_chain->GetHeight() : 0;
// Wait for the GPU to catch up since we're going to destroy the swap chain.
g_command_buffer_mgr->WaitForGPUIdle();
// Fast path, if the surface handle is the same, the window has just been resized.
if (m_swap_chain && s_new_surface_handle == m_swap_chain->GetNativeHandle())
{
@ -866,6 +865,9 @@ void Renderer::CheckForSurfaceChange()
}
else
{
// Wait for the GPU to catch up since we're going to destroy the swap chain.
g_command_buffer_mgr->WaitForGPUIdle();
// Did we previously have a swap chain?
if (m_swap_chain)
{
@ -888,7 +890,7 @@ void Renderer::CheckForSurfaceChange()
s_new_surface_handle);
if (surface != VK_NULL_HANDLE)
{
m_swap_chain = SwapChain::Create(s_new_surface_handle, surface);
m_swap_chain = SwapChain::Create(s_new_surface_handle, surface, g_ActiveConfig.IsVSync());
if (!m_swap_chain)
PanicAlert("Failed to create swap chain.");
}
@ -915,7 +917,6 @@ void Renderer::CheckForSurfaceChange()
void Renderer::CheckForConfigChanges()
{
// Compare g_Config to g_ActiveConfig to determine what has changed before copying.
bool vsync_changed = (g_Config.bVSync != g_ActiveConfig.bVSync);
bool msaa_changed = (g_Config.iMultisamples != g_ActiveConfig.iMultisamples);
bool ssaa_changed = (g_Config.bSSAA != g_ActiveConfig.bSSAA);
bool anisotropy_changed = (g_Config.iMaxAnisotropy != g_ActiveConfig.iMaxAnisotropy);
@ -926,6 +927,9 @@ void Renderer::CheckForConfigChanges()
// Copy g_Config to g_ActiveConfig.
UpdateActiveConfig();
// Update texture cache settings with any changed options.
TextureCache::OnConfigChanged(g_ActiveConfig);
// MSAA samples changed, we need to recreate the EFB render pass, and all shaders.
if (msaa_changed)
{
@ -958,8 +962,11 @@ void Renderer::CheckForConfigChanges()
}
// For vsync, we need to change the present mode, which means recreating the swap chain.
if (vsync_changed)
ResizeSwapChain();
if (m_swap_chain && g_ActiveConfig.IsVSync() != m_swap_chain->IsVSyncEnabled())
{
g_command_buffer_mgr->WaitForGPUIdle();
m_swap_chain->SetVSync(g_ActiveConfig.IsVSync());
}
// Wipe sampler cache if force texture filtering or anisotropy changes.
if (anisotropy_changed || force_texture_filtering_changed)
@ -970,13 +977,12 @@ void Renderer::OnSwapChainResized()
{
s_backbuffer_width = m_swap_chain->GetWidth();
s_backbuffer_height = m_swap_chain->GetHeight();
FramebufferManagerBase::SetLastXfbWidth(MAX_XFB_WIDTH);
FramebufferManagerBase::SetLastXfbHeight(MAX_XFB_HEIGHT);
UpdateDrawRectangle(s_backbuffer_width, s_backbuffer_height);
if (CalculateTargetSize(s_backbuffer_width, s_backbuffer_height))
{
PixelShaderManager::SetEfbScaleChanged();
ResizeEFBTextures();
PixelShaderManager::SetEfbScaleChanged();
}
}
void Renderer::BindEFBToStateTracker()
@ -1361,13 +1367,9 @@ void Renderer::SetSamplerState(int stage, int texindex, bool custom_tex)
}
// If mipmaps are disabled, clamp min/max lod
new_state.max_lod = (SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0)) ?
static_cast<u32>(MathUtil::Clamp(tm1.max_lod / 16.0f, 0.0f, 255.0f)) :
0;
new_state.min_lod =
std::min(new_state.max_lod.Value(),
static_cast<u32>(MathUtil::Clamp(tm1.min_lod / 16.0f, 0.0f, 255.0f)));
new_state.lod_bias = static_cast<s32>(tm0.lod_bias / 32.0f);
new_state.max_lod = SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0) ? tm1.max_lod : 0;
new_state.min_lod = std::min(new_state.max_lod.Value(), tm1.min_lod);
new_state.lod_bias = SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0) ? tm0.lod_bias : 0;
// Custom textures may have a greater number of mips
if (custom_tex)
@ -1381,15 +1383,7 @@ void Renderer::SetSamplerState(int stage, int texindex, bool custom_tex)
new_state.wrap_v = address_modes[tm0.wrap_t];
// Only use anisotropic filtering for textures that would be linearly filtered.
if (g_vulkan_context->SupportsAnisotropicFiltering() && g_ActiveConfig.iMaxAnisotropy > 0 &&
!SamplerCommon::IsBpTexMode0PointFiltering(tm0))
{
new_state.anisotropy = g_ActiveConfig.iMaxAnisotropy;
}
else
{
new_state.anisotropy = 0;
}
new_state.enable_anisotropic_filtering = SamplerCommon::IsBpTexMode0PointFiltering(tm0) ? 0 : 1;
// Skip lookup if the state hasn't changed.
size_t bind_index = (texindex * 4) + stage;
@ -1411,6 +1405,7 @@ void Renderer::SetSamplerState(int stage, int texindex, bool custom_tex)
void Renderer::ResetSamplerStates()
{
// Ensure none of the sampler objects are in use.
// This assumes that none of the samplers are in use on the command list currently being recorded.
g_command_buffer_mgr->WaitForGPUIdle();
// Invalidate all sampler states, next draw will re-initialize them.

View File

@ -25,13 +25,13 @@ class RasterFont;
class Renderer : public ::Renderer
{
public:
Renderer();
Renderer(std::unique_ptr<SwapChain> swap_chain);
~Renderer();
SwapChain* GetSwapChain() const { return m_swap_chain.get(); }
StateTracker* GetStateTracker() const { return m_state_tracker.get(); }
BoundingBox* GetBoundingBox() const { return m_bounding_box.get(); }
bool Initialize(FramebufferManager* framebuffer_mgr, void* window_handle, VkSurfaceKHR surface);
bool Initialize(FramebufferManager* framebuffer_mgr);
void RenderText(const std::string& pstr, int left, int top, u32 color) override;
u32 AccessEFB(EFBAccessType type, u32 x, u32 y, u32 poke_data) override;
@ -105,8 +105,8 @@ private:
FramebufferManager* m_framebuffer_mgr = nullptr;
VkSemaphore m_image_available_semaphore = nullptr;
VkSemaphore m_rendering_finished_semaphore = nullptr;
VkSemaphore m_image_available_semaphore = VK_NULL_HANDLE;
VkSemaphore m_rendering_finished_semaphore = VK_NULL_HANDLE;
std::unique_ptr<SwapChain> m_swap_chain;
std::unique_ptr<StateTracker> m_state_tracker;

View File

@ -26,8 +26,8 @@ StagingBuffer::~StagingBuffer()
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory);
g_command_buffer_mgr->DeferBufferDestruction(m_buffer);
}
bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)

View File

@ -133,8 +133,8 @@ StagingTexture2DLinear::~StagingTexture2DLinear()
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_image);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory);
g_command_buffer_mgr->DeferImageDestruction(m_image);
}
void StagingTexture2DLinear::CopyFromImage(VkCommandBuffer command_buffer, VkImage image,
@ -362,8 +362,8 @@ StagingTexture2DBuffer::~StagingTexture2DBuffer()
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory);
g_command_buffer_mgr->DeferBufferDestruction(m_buffer);
}
void StagingTexture2DBuffer::CopyFromImage(VkCommandBuffer command_buffer, VkImage image,

View File

@ -539,7 +539,7 @@ void StateTracker::EndRenderPass()
return;
vkCmdEndRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer());
m_current_render_pass = nullptr;
m_current_render_pass = VK_NULL_HANDLE;
}
void StateTracker::BeginClearRenderPass(const VkRect2D& area, const VkClearValue clear_values[2])

View File

@ -35,9 +35,9 @@ StreamBuffer::~StreamBuffer()
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
if (m_buffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
g_command_buffer_mgr->DeferBufferDestruction(m_buffer);
if (m_memory != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory);
}
std::unique_ptr<StreamBuffer> StreamBuffer::Create(VkBufferUsageFlags usage, size_t initial_size,
@ -124,9 +124,9 @@ bool StreamBuffer::ResizeBuffer(size_t size)
// Destroy the backings for the buffer after the command buffer executes
if (m_buffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
g_command_buffer_mgr->DeferBufferDestruction(m_buffer);
if (m_memory != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory);
// Replace with the new buffer
m_buffer = buffer;

View File

@ -24,8 +24,8 @@
namespace Vulkan
{
SwapChain::SwapChain(void* native_handle, VkSurfaceKHR surface)
: m_native_handle(native_handle), m_surface(surface)
SwapChain::SwapChain(void* native_handle, VkSurfaceKHR surface, bool vsync)
: m_native_handle(native_handle), m_surface(surface), m_vsync_enabled(vsync)
{
}
@ -127,9 +127,10 @@ VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, void* hwnd)
#endif
}
std::unique_ptr<SwapChain> SwapChain::Create(void* native_handle, VkSurfaceKHR surface)
std::unique_ptr<SwapChain> SwapChain::Create(void* native_handle, VkSurfaceKHR surface, bool vsync)
{
std::unique_ptr<SwapChain> swap_chain = std::make_unique<SwapChain>(native_handle, surface);
std::unique_ptr<SwapChain> swap_chain =
std::make_unique<SwapChain>(native_handle, surface, vsync);
if (!swap_chain->CreateSwapChain() || !swap_chain->CreateRenderPass() ||
!swap_chain->SetupSwapChainImages())
@ -198,7 +199,7 @@ bool SwapChain::SelectPresentMode()
};
// If vsync is enabled, prefer VK_PRESENT_MODE_FIFO_KHR.
if (g_ActiveConfig.IsVSync())
if (m_vsync_enabled)
{
// Try for relaxed vsync first, since it's likely the VI won't line up with
// the refresh rate of the system exactly, so tearing once is better than
@ -280,8 +281,8 @@ void SwapChain::DestroyRenderPass()
if (!m_render_pass)
return;
g_command_buffer_mgr->DeferResourceDestruction(m_render_pass);
m_render_pass = nullptr;
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_render_pass, nullptr);
m_render_pass = VK_NULL_HANDLE;
}
bool SwapChain::CreateSwapChain()
@ -456,11 +457,8 @@ VkResult SwapChain::AcquireNextImage(VkSemaphore available_semaphore)
bool SwapChain::ResizeSwapChain()
{
if (!CreateSwapChain())
return false;
DestroySwapChainImages();
if (!SetupSwapChainImages())
if (!CreateSwapChain() || !SetupSwapChainImages())
{
PanicAlert("Failed to re-configure swap chain images, this is fatal (for now)");
return false;
@ -469,6 +467,16 @@ bool SwapChain::ResizeSwapChain()
return true;
}
bool SwapChain::SetVSync(bool enabled)
{
if (m_vsync_enabled == enabled)
return true;
// Resizing recreates the swap chain with the new present mode.
m_vsync_enabled = enabled;
return ResizeSwapChain();
}
bool SwapChain::RecreateSurface(void* native_handle)
{
// Destroy the old swap chain, images, and surface.

View File

@ -19,18 +19,19 @@ class ObjectCache;
class SwapChain
{
public:
SwapChain(void* native_handle, VkSurfaceKHR surface);
SwapChain(void* native_handle, VkSurfaceKHR surface, bool vsync);
~SwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, void* hwnd);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<SwapChain> Create(void* native_handle, VkSurfaceKHR surface);
static std::unique_ptr<SwapChain> Create(void* native_handle, VkSurfaceKHR surface, bool vsync);
void* GetNativeHandle() const { return m_native_handle; }
VkSurfaceKHR GetSurface() const { return m_surface; }
VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
bool IsVSyncEnabled() const { return m_vsync_enabled; }
VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
VkRenderPass GetRenderPass() const { return m_render_pass; }
u32 GetWidth() const { return m_width; }
@ -54,6 +55,9 @@ public:
bool RecreateSurface(void* native_handle);
bool ResizeSwapChain();
// Change vsync enabled state. This may fail as it causes a swapchain recreation.
bool SetVSync(bool enabled);
private:
bool SelectSurfaceFormat();
bool SelectPresentMode();
@ -76,16 +80,17 @@ private:
VkFramebuffer framebuffer;
};
void* m_native_handle = nullptr;
VkSurfaceKHR m_surface = nullptr;
void* m_native_handle;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_RANGE_SIZE_KHR;
bool m_vsync_enabled;
VkSwapchainKHR m_swap_chain = nullptr;
VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE;
std::vector<SwapChainImage> m_swap_chain_images;
u32 m_current_swap_chain_image_index = 0;
VkRenderPass m_render_pass = nullptr;
VkRenderPass m_render_pass = VK_NULL_HANDLE;
u32 m_width = 0;
u32 m_height = 0;

View File

@ -21,13 +21,13 @@ Texture2D::Texture2D(u32 width, u32 height, u32 levels, u32 layers, VkFormat for
Texture2D::~Texture2D()
{
g_command_buffer_mgr->DeferResourceDestruction(m_view);
g_command_buffer_mgr->DeferImageViewDestruction(m_view);
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_device_memory != VK_NULL_HANDLE)
{
g_command_buffer_mgr->DeferResourceDestruction(m_image);
g_command_buffer_mgr->DeferResourceDestruction(m_device_memory);
g_command_buffer_mgr->DeferImageDestruction(m_image);
g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_device_memory);
}
}
@ -134,6 +134,8 @@ std::unique_ptr<Texture2D> Texture2D::CreateFromExistingImage(u32 width, u32 hei
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
// Memory is managed by the owner of the image.
VkDeviceMemory memory = VK_NULL_HANDLE;
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
@ -143,7 +145,7 @@ std::unique_ptr<Texture2D> Texture2D::CreateFromExistingImage(u32 width, u32 hei
}
return std::make_unique<Texture2D>(width, height, levels, layers, format, samples, view_type,
existing_image, nullptr, view);
existing_image, memory, view);
}
void Texture2D::OverrideImageLayout(VkImageLayout new_layout)

View File

@ -41,6 +41,7 @@ TextureCache::~TextureCache()
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_initialize_render_pass, nullptr);
if (m_update_render_pass != VK_NULL_HANDLE)
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_update_render_pass, nullptr);
TextureCache::DeleteShaders();
}
bool TextureCache::Initialize(StateTracker* state_tracker)
@ -316,7 +317,7 @@ TextureCache::TCacheEntry::~TCacheEntry()
m_parent->m_state_tracker->UnbindTexture(m_texture->GetView());
if (m_framebuffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_framebuffer);
g_command_buffer_mgr->DeferFramebufferDestruction(m_framebuffer);
}
void TextureCache::TCacheEntry::Load(unsigned int width, unsigned int height,
@ -725,20 +726,25 @@ bool TextureCache::CompileShaders()
void TextureCache::DeleteShaders()
{
auto DestroyShader = [this](VkShaderModule& shader) {
if (shader != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), shader, nullptr);
shader = VK_NULL_HANDLE;
}
};
// Since this can be called by the base class we need to wait for idle.
g_command_buffer_mgr->WaitForGPUIdle();
DestroyShader(m_copy_shader);
DestroyShader(m_efb_color_to_tex_shader);
DestroyShader(m_efb_depth_to_tex_shader);
// It is safe to destroy shader modules after they are consumed by creating a pipeline.
// Therefore, no matter where this function is called from, it won't cause an issue due to
// pending commands, although at the time of writing should only be called at the end of
// a frame. See Vulkan spec, section 2.3.1. Object Lifetime.
if (m_copy_shader != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), m_copy_shader, nullptr);
m_copy_shader = VK_NULL_HANDLE;
}
if (m_efb_color_to_tex_shader != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), m_efb_color_to_tex_shader, nullptr);
m_efb_color_to_tex_shader = VK_NULL_HANDLE;
}
if (m_efb_depth_to_tex_shader != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), m_efb_depth_to_tex_shader, nullptr);
m_efb_depth_to_tex_shader = VK_NULL_HANDLE;
}
}
} // namespace Vulkan

View File

@ -260,120 +260,6 @@ VkShaderModule CompileAndCreateFragmentShader(const std::string& source_code, bo
} // namespace Util
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkCommandPool>(VkCommandPool object)
{
DeferredResourceDestruction ret;
ret.object.command_pool = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyCommandPool(device, obj.command_pool, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkDeviceMemory>(VkDeviceMemory object)
{
DeferredResourceDestruction ret;
ret.object.device_memory = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkFreeMemory(device, obj.device_memory, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkBuffer>(VkBuffer object)
{
DeferredResourceDestruction ret;
ret.object.buffer = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyBuffer(device, obj.buffer, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkBufferView>(VkBufferView object)
{
DeferredResourceDestruction ret;
ret.object.buffer_view = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyBufferView(device, obj.buffer_view, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkImage>(VkImage object)
{
DeferredResourceDestruction ret;
ret.object.image = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyImage(device, obj.image, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkImageView>(VkImageView object)
{
DeferredResourceDestruction ret;
ret.object.image_view = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyImageView(device, obj.image_view, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkRenderPass>(VkRenderPass object)
{
DeferredResourceDestruction ret;
ret.object.render_pass = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyRenderPass(device, obj.render_pass, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkFramebuffer>(VkFramebuffer object)
{
DeferredResourceDestruction ret;
ret.object.framebuffer = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyFramebuffer(device, obj.framebuffer, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkShaderModule>(VkShaderModule object)
{
DeferredResourceDestruction ret;
ret.object.shader_module = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyShaderModule(device, obj.shader_module, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkPipeline>(VkPipeline object)
{
DeferredResourceDestruction ret;
ret.object.pipeline = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyPipeline(device, obj.pipeline, nullptr);
};
return ret;
}
UtilityShaderDraw::UtilityShaderDraw(VkCommandBuffer command_buffer,
VkPipelineLayout pipeline_layout, VkRenderPass render_pass,
VkShaderModule vertex_shader, VkShaderModule geometry_shader,

View File

@ -66,28 +66,6 @@ VkShaderModule CompileAndCreateFragmentShader(const std::string& source_code,
bool prepend_header = true);
}
// Helper methods for cleaning up device objects, used by deferred destruction
struct DeferredResourceDestruction
{
union Object {
VkCommandPool command_pool;
VkDeviceMemory device_memory;
VkBuffer buffer;
VkBufferView buffer_view;
VkImage image;
VkImageView image_view;
VkRenderPass render_pass;
VkFramebuffer framebuffer;
VkShaderModule shader_module;
VkPipeline pipeline;
} object;
void (*destroy_callback)(VkDevice device, const Object& object);
template <typename T>
static DeferredResourceDestruction Wrapper(T object);
};
// Utility shader vertex format
#pragma pack(push, 1)
struct UtilityShaderVertex

View File

@ -198,8 +198,14 @@ void VertexManager::vFlush(bool use_dst_alpha)
vkCmdDrawIndexed(g_command_buffer_mgr->GetCurrentCommandBuffer(), index_count, 1,
m_current_draw_base_index, m_current_draw_base_vertex, 0);
// If we can't do single pass dst alpha, we now need to draw the alpha pass.
if (use_dst_alpha && !g_vulkan_context->SupportsDualSourceBlend())
// If the GPU does not support dual-source blending, we can approximate the effect by drawing
// the object a second time, with the write mask set to alpha only using a shader that outputs
// the destination/constant alpha value (which would normally be SRC_COLOR.a).
//
// This is also used when logic ops and destination alpha is enabled, since we can't enable
// blending and logic ops concurrently (and the logical operation applies to all channels).
bool logic_op_enabled = bpmem.blendmode.logicopenable && !bpmem.blendmode.blendenable;
if (use_dst_alpha && (!g_vulkan_context->SupportsDualSourceBlend() || logic_op_enabled))
{
m_state_tracker->CheckForShaderChanges(m_current_primitive_type, DSTALPHA_ALPHA_PASS);
if (!m_state_tracker->Bind())

View File

@ -526,7 +526,8 @@ bool VulkanContext::CreateDevice(VkSurfaceKHR surface, bool enable_validation_la
device_info.pQueueCreateInfos = &queue_info;
ExtensionList enabled_extensions;
if (!SelectDeviceExtensions(&enabled_extensions, (surface != nullptr), enable_validation_layer))
if (!SelectDeviceExtensions(&enabled_extensions, (surface != VK_NULL_HANDLE),
enable_validation_layer))
return false;
device_info.enabledLayerCount = 0;

View File

@ -93,7 +93,7 @@ public:
{
return m_device_properties.limits.bufferImageGranularity;
}
float GetMaxSaxmplerAnisotropy() const { return m_device_properties.limits.maxSamplerAnisotropy; }
float GetMaxSamplerAnisotropy() const { return m_device_properties.limits.maxSamplerAnisotropy; }
// Finds a memory type index for the specified memory properties and the bits returned by
// vkGetImageMemoryRequirements
bool GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index);

View File

@ -165,6 +165,18 @@ bool VideoBackend::Initialize(void* window_handle)
return false;
}
// Create swap chain. This has to be done early so that the target size is correct for auto-scale.
std::unique_ptr<SwapChain> swap_chain;
if (surface != VK_NULL_HANDLE)
{
swap_chain = SwapChain::Create(window_handle, surface, g_Config.IsVSync());
if (!swap_chain)
{
PanicAlert("Failed to create Vulkan swap chain.");
return false;
}
}
// Create command buffers. We do this separately because the other classes depend on it.
g_command_buffer_mgr = std::make_unique<CommandBufferManager>(g_Config.bBackendMultithreading);
if (!g_command_buffer_mgr->Initialize())
@ -180,7 +192,7 @@ bool VideoBackend::Initialize(void* window_handle)
// Create main wrapper instances.
g_object_cache = std::make_unique<ObjectCache>();
g_framebuffer_manager = std::make_unique<FramebufferManager>();
g_renderer = std::make_unique<Renderer>();
g_renderer = std::make_unique<Renderer>(std::move(swap_chain));
// Cast to our wrapper classes, so we can call the init methods.
Renderer* renderer = static_cast<Renderer*>(g_renderer.get());
@ -191,7 +203,7 @@ bool VideoBackend::Initialize(void* window_handle)
// These have to be done before the others because the destructors
// for the remaining classes may call methods on these.
if (!g_object_cache->Initialize() || !framebuffer_mgr->Initialize() ||
!renderer->Initialize(framebuffer_mgr, window_handle, surface))
!renderer->Initialize(framebuffer_mgr))
{
PanicAlert("Failed to initialize Vulkan classes.");
g_renderer.reset();