GS: Move Vulkan helpers from common to GS

This commit is contained in:
Stenzek 2023-05-05 21:47:41 +10:00 committed by refractionpcsx2
parent 0ed418834a
commit c57d8980a1
44 changed files with 6769 additions and 7200 deletions

View File

@ -128,34 +128,6 @@ target_sources(common PRIVATE
emitter/x86types.h
)
if(USE_VULKAN)
target_link_libraries(common PUBLIC
Vulkan-Headers glslang
)
target_sources(common PRIVATE
Vulkan/ShaderCache.cpp
Vulkan/Texture.cpp
Vulkan/Loader.cpp
Vulkan/ShaderCompiler.cpp
Vulkan/Util.cpp
Vulkan/SwapChain.cpp
Vulkan/StreamBuffer.cpp
Vulkan/Context.cpp
Vulkan/Builders.cpp
Vulkan/vk_mem_alloc.cpp
Vulkan/Context.h
Vulkan/Texture.h
Vulkan/ShaderCompiler.h
Vulkan/SwapChain.h
Vulkan/Builders.h
Vulkan/StreamBuffer.h
Vulkan/ShaderCache.h
Vulkan/EntryPoints.h
Vulkan/Loader.h
Vulkan/Util.h
)
endif()
if(USE_VTUNE)
target_link_libraries(common PUBLIC Vtune::Vtune)
endif()
@ -197,21 +169,6 @@ if(X11_API AND TARGET PkgConfig::XRANDR)
target_compile_definitions(common PRIVATE "HAS_XRANDR=1")
endif()
if(USE_VULKAN)
if(APPLE)
# Needed for Metal surface creation.
target_compile_options(common PRIVATE -fobjc-arc)
target_link_options(common PRIVATE -fobjc-link-runtime)
elseif(NOT WIN32)
if(X11_API)
target_compile_definitions(common PUBLIC "VULKAN_USE_X11=1")
endif()
if(WAYLAND_API)
target_compile_definitions(common PUBLIC "VULKAN_USE_WAYLAND=1")
endif()
endif()
endif()
if (USE_GCC AND CMAKE_INTERPROCEDURAL_OPTIMIZATION)
# GCC LTO doesn't work with asm statements
set_source_files_properties(FastJmp.cpp PROPERTIES COMPILE_FLAGS -fno-lto)

View File

@ -1,951 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/Builders.h"
#include "common/Vulkan/Util.h"
#include "common/Assertions.h"
#include <limits>
namespace Vulkan
{
DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder() { Clear(); }
void DescriptorSetLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pBindings = nullptr;
m_ci.bindingCount = 0;
}
VkDescriptorSetLayout DescriptorSetLayoutBuilder::Create(VkDevice device)
{
VkDescriptorSetLayout layout;
VkResult res = vkCreateDescriptorSetLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void DescriptorSetLayoutBuilder::AddBinding(
u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages)
{
pxAssert(m_ci.bindingCount < MAX_BINDINGS);
VkDescriptorSetLayoutBinding& b = m_bindings[m_ci.bindingCount];
b.binding = binding;
b.descriptorType = dtype;
b.descriptorCount = dcount;
b.stageFlags = stages;
b.pImmutableSamplers = nullptr;
m_ci.pBindings = m_bindings.data();
m_ci.bindingCount++;
}
PipelineLayoutBuilder::PipelineLayoutBuilder() { Clear(); }
void PipelineLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pSetLayouts = nullptr;
m_ci.setLayoutCount = 0;
m_ci.pPushConstantRanges = nullptr;
m_ci.pushConstantRangeCount = 0;
}
VkPipelineLayout PipelineLayoutBuilder::Create(VkDevice device)
{
VkPipelineLayout layout;
VkResult res = vkCreatePipelineLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void PipelineLayoutBuilder::AddDescriptorSet(VkDescriptorSetLayout layout)
{
pxAssert(m_ci.setLayoutCount < MAX_SETS);
m_sets[m_ci.setLayoutCount] = layout;
m_ci.setLayoutCount++;
m_ci.pSetLayouts = m_sets.data();
}
void PipelineLayoutBuilder::AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size)
{
pxAssert(m_ci.pushConstantRangeCount < MAX_PUSH_CONSTANTS);
VkPushConstantRange& r = m_push_constants[m_ci.pushConstantRangeCount];
r.stageFlags = stages;
r.offset = offset;
r.size = size;
m_ci.pushConstantRangeCount++;
m_ci.pPushConstantRanges = m_push_constants.data();
}
GraphicsPipelineBuilder::GraphicsPipelineBuilder() { Clear(); }
void GraphicsPipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
m_shader_stages = {};
m_vertex_input_state = {};
m_vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_ci.pVertexInputState = &m_vertex_input_state;
m_vertex_attributes = {};
m_vertex_buffers = {};
m_input_assembly = {};
m_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
m_rasterization_state = {};
m_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
m_rasterization_state.lineWidth = 1.0f;
m_depth_state = {};
m_depth_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
m_blend_state = {};
m_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
m_blend_attachments = {};
m_viewport_state = {};
m_viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
m_viewport = {};
m_scissor = {};
m_dynamic_state = {};
m_dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
m_dynamic_state_values = {};
m_multisample_state = {};
m_multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
m_provoking_vertex = {};
m_provoking_vertex.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT;
m_line_rasterization_state = {};
m_line_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT;
// set defaults
SetNoCullRasterizationState();
SetNoDepthTestState();
SetNoBlendingState();
SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
// have to be specified even if dynamic
SetViewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
SetScissorRect(0, 0, 1, 1);
SetMultisamples(VK_SAMPLE_COUNT_1_BIT);
}
VkPipeline GraphicsPipelineBuilder::Create(VkDevice device, VkPipelineCache pipeline_cache, bool clear /* = true */)
{
VkPipeline pipeline;
VkResult res = vkCreateGraphicsPipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateGraphicsPipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void GraphicsPipelineBuilder::SetShaderStage(
VkShaderStageFlagBits stage, VkShaderModule module, const char* entry_point)
{
pxAssert(m_ci.stageCount < MAX_SHADER_STAGES);
u32 index = 0;
for (; index < m_ci.stageCount; index++)
{
if (m_shader_stages[index].stage == stage)
break;
}
if (index == m_ci.stageCount)
{
m_ci.stageCount++;
m_ci.pStages = m_shader_stages.data();
}
VkPipelineShaderStageCreateInfo& s = m_shader_stages[index];
s.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
s.stage = stage;
s.module = module;
s.pName = entry_point;
}
void GraphicsPipelineBuilder::AddVertexBuffer(
u32 binding, u32 stride, VkVertexInputRate input_rate /*= VK_VERTEX_INPUT_RATE_VERTEX*/)
{
pxAssert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputBindingDescription& b = m_vertex_buffers[m_vertex_input_state.vertexBindingDescriptionCount];
b.binding = binding;
b.stride = stride;
b.inputRate = input_rate;
m_vertex_input_state.vertexBindingDescriptionCount++;
m_vertex_input_state.pVertexBindingDescriptions = m_vertex_buffers.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset)
{
pxAssert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputAttributeDescription& a =
m_vertex_attributes[m_vertex_input_state.vertexAttributeDescriptionCount];
a.location = location;
a.binding = binding;
a.format = format;
a.offset = offset;
m_vertex_input_state.vertexAttributeDescriptionCount++;
m_vertex_input_state.pVertexAttributeDescriptions = m_vertex_attributes.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::SetPrimitiveTopology(
VkPrimitiveTopology topology, bool enable_primitive_restart /*= false*/)
{
m_input_assembly.topology = topology;
m_input_assembly.primitiveRestartEnable = enable_primitive_restart;
m_ci.pInputAssemblyState = &m_input_assembly;
}
void GraphicsPipelineBuilder::SetRasterizationState(
VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, VkFrontFace front_face)
{
m_rasterization_state.polygonMode = polygon_mode;
m_rasterization_state.cullMode = cull_mode;
m_rasterization_state.frontFace = front_face;
m_ci.pRasterizationState = &m_rasterization_state;
}
void GraphicsPipelineBuilder::SetLineWidth(float width)
{
m_rasterization_state.lineWidth = width;
}
void GraphicsPipelineBuilder::SetLineRasterizationMode(VkLineRasterizationModeEXT mode)
{
Util::AddPointerToChain(&m_rasterization_state, &m_line_rasterization_state);
m_line_rasterization_state.lineRasterizationMode = mode;
}
void GraphicsPipelineBuilder::SetMultisamples(u32 multisamples, bool per_sample_shading)
{
m_multisample_state.rasterizationSamples = static_cast<VkSampleCountFlagBits>(multisamples);
m_multisample_state.sampleShadingEnable = per_sample_shading;
m_multisample_state.minSampleShading = (multisamples > 1) ? 1.0f : 0.0f;
}
void GraphicsPipelineBuilder::SetNoCullRasterizationState()
{
SetRasterizationState(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE);
}
void GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op)
{
m_depth_state.depthTestEnable = depth_test;
m_depth_state.depthWriteEnable = depth_write;
m_depth_state.depthCompareOp = compare_op;
m_ci.pDepthStencilState = &m_depth_state;
}
void GraphicsPipelineBuilder::SetStencilState(
bool stencil_test, const VkStencilOpState& front, const VkStencilOpState& back)
{
m_depth_state.stencilTestEnable = stencil_test;
m_depth_state.front = front;
m_depth_state.back = back;
}
void GraphicsPipelineBuilder::SetNoStencilState()
{
m_depth_state.stencilTestEnable = VK_FALSE;
m_depth_state.front = {};
m_depth_state.back = {};
}
void GraphicsPipelineBuilder::SetNoDepthTestState() { SetDepthState(false, false, VK_COMPARE_OP_ALWAYS); }
void GraphicsPipelineBuilder::SetBlendConstants(float r, float g, float b, float a)
{
m_blend_state.blendConstants[0] = r;
m_blend_state.blendConstants[1] = g;
m_blend_state.blendConstants[2] = b;
m_blend_state.blendConstants[3] = a;
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::AddBlendAttachment(bool blend_enable, VkBlendFactor src_factor,
VkBlendFactor dst_factor, VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags
write_mask /* = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT */)
{
pxAssert(m_blend_state.attachmentCount < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[m_blend_state.attachmentCount];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
m_blend_state.attachmentCount++;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::SetBlendAttachment(u32 attachment, bool blend_enable, VkBlendFactor src_factor,
VkBlendFactor dst_factor, VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags
write_mask /*= VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT*/)
{
pxAssert(attachment < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[attachment];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
if (attachment >= m_blend_state.attachmentCount)
{
m_blend_state.attachmentCount = attachment + 1u;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
}
void GraphicsPipelineBuilder::AddBlendFlags(u32 flags)
{
m_blend_state.flags |= flags;
}
void GraphicsPipelineBuilder::ClearBlendAttachments()
{
m_blend_attachments = {};
m_blend_state.attachmentCount = 0;
}
void GraphicsPipelineBuilder::SetNoBlendingState()
{
ClearBlendAttachments();
SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE,
VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
}
void GraphicsPipelineBuilder::AddDynamicState(VkDynamicState state)
{
pxAssert(m_dynamic_state.dynamicStateCount < MAX_DYNAMIC_STATE);
m_dynamic_state_values[m_dynamic_state.dynamicStateCount] = state;
m_dynamic_state.dynamicStateCount++;
m_dynamic_state.pDynamicStates = m_dynamic_state_values.data();
m_ci.pDynamicState = &m_dynamic_state;
}
void GraphicsPipelineBuilder::SetDynamicViewportAndScissorState()
{
AddDynamicState(VK_DYNAMIC_STATE_VIEWPORT);
AddDynamicState(VK_DYNAMIC_STATE_SCISSOR);
}
void GraphicsPipelineBuilder::SetViewport(
float x, float y, float width, float height, float min_depth, float max_depth)
{
m_viewport.x = x;
m_viewport.y = y;
m_viewport.width = width;
m_viewport.height = height;
m_viewport.minDepth = min_depth;
m_viewport.maxDepth = max_depth;
m_viewport_state.pViewports = &m_viewport;
m_viewport_state.viewportCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetScissorRect(s32 x, s32 y, u32 width, u32 height)
{
m_scissor.offset.x = x;
m_scissor.offset.y = y;
m_scissor.extent.width = width;
m_scissor.extent.height = height;
m_viewport_state.pScissors = &m_scissor;
m_viewport_state.scissorCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetMultisamples(VkSampleCountFlagBits samples)
{
m_multisample_state.rasterizationSamples = samples;
m_ci.pMultisampleState = &m_multisample_state;
}
void GraphicsPipelineBuilder::SetPipelineLayout(VkPipelineLayout layout) { m_ci.layout = layout; }
void GraphicsPipelineBuilder::SetRenderPass(VkRenderPass render_pass, u32 subpass)
{
m_ci.renderPass = render_pass;
m_ci.subpass = subpass;
}
void GraphicsPipelineBuilder::SetProvokingVertex(VkProvokingVertexModeEXT mode)
{
Util::AddPointerToChain(&m_rasterization_state, &m_provoking_vertex);
m_provoking_vertex.provokingVertexMode = mode;
}
ComputePipelineBuilder::ComputePipelineBuilder() { Clear(); }
void ComputePipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
m_si = {};
m_smap_entries = {};
m_smap_constants = {};
}
VkPipeline ComputePipelineBuilder::Create(VkDevice device, VkPipelineCache pipeline_cache /*= VK_NULL_HANDLE*/, bool clear /*= true*/)
{
VkPipeline pipeline;
VkResult res = vkCreateComputePipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateComputePipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void ComputePipelineBuilder::SetShader(VkShaderModule module, const char* entry_point)
{
m_ci.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
m_ci.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
m_ci.stage.module = module;
m_ci.stage.pName = entry_point;
}
void ComputePipelineBuilder::SetPipelineLayout(VkPipelineLayout layout)
{
m_ci.layout = layout;
}
void ComputePipelineBuilder::SetSpecializationBool(u32 index, bool value)
{
const u32 u32_value = static_cast<u32>(value);
SetSpecializationValue(index, u32_value);
}
void ComputePipelineBuilder::SetSpecializationValue(u32 index, u32 value)
{
if (m_si.mapEntryCount == 0)
{
m_si.pMapEntries = m_smap_entries.data();
m_si.pData = m_smap_constants.data();
m_ci.stage.pSpecializationInfo = &m_si;
}
m_smap_entries[m_si.mapEntryCount++] = {index, index * SPECIALIZATION_CONSTANT_SIZE, SPECIALIZATION_CONSTANT_SIZE};
m_si.dataSize += SPECIALIZATION_CONSTANT_SIZE;
}
SamplerBuilder::SamplerBuilder() { Clear(); }
void SamplerBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
}
VkSampler SamplerBuilder::Create(VkDevice device, bool clear /* = true */)
{
VkSampler sampler;
VkResult res = vkCreateSampler(device, &m_ci, nullptr, &sampler);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSampler() failed: ");
return VK_NULL_HANDLE;
}
return sampler;
}
void SamplerBuilder::SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter)
{
m_ci.magFilter = mag_filter;
m_ci.minFilter = min_filter;
m_ci.mipmapMode = mip_filter;
}
void SamplerBuilder::SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w)
{
m_ci.addressModeU = u;
m_ci.addressModeV = v;
m_ci.addressModeW = w;
}
void SamplerBuilder::SetPointSampler(
VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
}
void SamplerBuilder::SetLinearSampler(
bool mipmaps, VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_LINEAR, VK_FILTER_LINEAR,
mipmaps ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
if (mipmaps)
{
m_ci.minLod = std::numeric_limits<float>::min();
m_ci.maxLod = std::numeric_limits<float>::max();
}
}
DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder() { Clear(); }
void DescriptorSetUpdateBuilder::Clear()
{
m_writes = {};
m_num_writes = 0;
}
void DescriptorSetUpdateBuilder::Update(VkDevice device, bool clear /*= true*/)
{
pxAssert(m_num_writes > 0);
vkUpdateDescriptorSets(device, m_num_writes, (m_num_writes > 0) ? m_writes.data() : nullptr, 0, nullptr);
if (clear)
Clear();
}
void DescriptorSetUpdateBuilder::AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddImageDescriptorWrites(VkDescriptorSet set, u32 binding,
const VkImageView* views, u32 num_views, VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && (m_num_image_infos + num_views) < MAX_IMAGE_INFOS);
#if 1
// NOTE: This is deliberately split up - updating multiple descriptors in one write is broken on Adreno.
for (u32 i = 0; i < num_views; i++)
AddImageDescriptorWrite(set, binding + i, views[i], layout);
#else
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = num_views;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
for (u32 i = 0; i < num_views; i++)
{
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = views[i];
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
}
#endif
}
void DescriptorSetUpdateBuilder::AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddSamplerDescriptorWrites(
VkDescriptorSet set, u32 binding, const VkSampler* samplers, u32 num_samplers)
{
pxAssert(m_num_writes < MAX_WRITES && (m_num_image_infos + num_samplers) < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = num_samplers;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
for (u32 i = 0; i < num_samplers; i++)
{
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ii.sampler = samplers[i];
}
}
void DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding,
VkImageView view, VkSampler sampler, VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrites(VkDescriptorSet set, u32 binding,
const VkImageView* views, const VkSampler* samplers, u32 num_views,
VkImageLayout layout /* = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL */)
{
pxAssert(m_num_writes < MAX_WRITES && (m_num_image_infos + num_views) < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = num_views;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
for (u32 i = 0; i < num_views; i++)
{
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = views[i];
ii.sampler = samplers[i];
ii.imageLayout = layout;
}
}
void DescriptorSetUpdateBuilder::AddBufferDescriptorWrite(
VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBuffer buffer, u32 offset, u32 size)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_buffer_infos < MAX_BUFFER_INFOS);
VkDescriptorBufferInfo& bi = m_buffer_infos[m_num_buffer_infos++];
bi.buffer = buffer;
bi.offset = offset;
bi.range = size;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pBufferInfo = &bi;
}
void DescriptorSetUpdateBuilder::AddBufferViewDescriptorWrite(
VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBufferView view)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_views < MAX_VIEWS);
VkBufferView& bi = m_views[m_num_views++];
bi = view;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pTexelBufferView = &bi;
}
void DescriptorSetUpdateBuilder::AddInputAttachmentDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkImageLayout layout /*= VK_IMAGE_LAYOUT_GENERAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
}
void DescriptorSetUpdateBuilder::AddStorageImageDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkImageLayout layout /*= VK_IMAGE_LAYOUT_GENERAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
}
FramebufferBuilder::FramebufferBuilder() { Clear(); }
void FramebufferBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
m_images = {};
}
VkFramebuffer FramebufferBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkFramebuffer fb;
VkResult res = vkCreateFramebuffer(device, &m_ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return fb;
}
void FramebufferBuilder::AddAttachment(VkImageView image)
{
pxAssert(m_ci.attachmentCount < MAX_ATTACHMENTS);
m_images[m_ci.attachmentCount] = image;
m_ci.attachmentCount++;
m_ci.pAttachments = m_images.data();
}
void FramebufferBuilder::SetSize(u32 width, u32 height, u32 layers)
{
m_ci.width = width;
m_ci.height = height;
m_ci.layers = layers;
}
void FramebufferBuilder::SetRenderPass(VkRenderPass render_pass) { m_ci.renderPass = render_pass; }
RenderPassBuilder::RenderPassBuilder() { Clear(); }
void RenderPassBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
m_attachments = {};
m_attachment_references = {};
m_num_attachment_references = 0;
m_subpasses = {};
}
VkRenderPass RenderPassBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkRenderPass rp;
VkResult res = vkCreateRenderPass(device, &m_ci, nullptr, &rp);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass() failed: ");
return VK_NULL_HANDLE;
}
return rp;
}
u32 RenderPassBuilder::AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout, VkImageLayout final_layout)
{
pxAssert(m_ci.attachmentCount < MAX_ATTACHMENTS);
const u32 index = m_ci.attachmentCount;
VkAttachmentDescription& ad = m_attachments[index];
ad.format = format;
ad.samples = samples;
ad.loadOp = load_op;
ad.storeOp = store_op;
ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
ad.initialLayout = initial_layout;
ad.finalLayout = final_layout;
m_ci.attachmentCount++;
m_ci.pAttachments = m_attachments.data();
return index;
}
u32 RenderPassBuilder::AddSubpass()
{
pxAssert(m_ci.subpassCount < MAX_SUBPASSES);
const u32 index = m_ci.subpassCount;
VkSubpassDescription& sp = m_subpasses[index];
sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
m_ci.subpassCount++;
m_ci.pSubpasses = m_subpasses.data();
return index;
}
void RenderPassBuilder::AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
pxAssert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
if (sp.colorAttachmentCount == 0)
sp.pColorAttachments = &ar;
sp.colorAttachmentCount++;
}
void RenderPassBuilder::AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
pxAssert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
sp.pDepthStencilAttachment = &ar;
}
BufferViewBuilder::BufferViewBuilder() { Clear(); }
void BufferViewBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
}
VkBufferView BufferViewBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkBufferView bv;
VkResult res = vkCreateBufferView(device, &m_ci, nullptr, &bv);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBufferView() failed: ");
return VK_NULL_HANDLE;
}
return bv;
}
void BufferViewBuilder::Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size)
{
m_ci.buffer = buffer;
m_ci.format = format;
m_ci.offset = offset;
m_ci.range = size;
}
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -1,415 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include "common/ReadbackSpinManager.h"
#include "common/Vulkan/Loader.h"
#include "common/Vulkan/StreamBuffer.h"
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
struct WindowInfo;
namespace Vulkan
{
class SwapChain;
class Context
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 3,
TEXTURE_BUFFER_SIZE = 64 * 1024 * 1024,
};
struct OptionalExtensions
{
bool vk_ext_provoking_vertex : 1;
bool vk_ext_memory_budget : 1;
bool vk_ext_calibrated_timestamps : 1;
bool vk_ext_line_rasterization : 1;
bool vk_ext_rasterization_order_attachment_access : 1;
bool vk_ext_full_screen_exclusive : 1;
bool vk_khr_driver_properties : 1;
bool vk_khr_fragment_shader_barycentric : 1;
bool vk_khr_shader_draw_parameters : 1;
};
~Context();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(
const WindowInfo& wi, bool enable_debug_utils, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<std::pair<VkPhysicalDevice, std::string>>;
static GPUList EnumerateGPUs(VkInstance instance);
// Creates a new context and sets it up as global.
static bool Create(VkInstance instance, VkSurfaceKHR surface, VkPhysicalDevice physical_device,
bool threaded_presentation, bool enable_debug_utils, bool enable_validation_layer);
// Destroys context.
static void Destroy();
// Enable/disable debug message runtime.
bool EnableDebugUtils();
void DisableDebugUtils();
// Global state accessors
__fi VkInstance GetVulkanInstance() const { return m_instance; }
__fi VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
__fi VkDevice GetDevice() const { return m_device; }
__fi VmaAllocator GetAllocator() const { return m_allocator; }
__fi VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
__fi u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
__fi VkQueue GetPresentQueue() const { return m_present_queue; }
__fi u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
__fi const VkQueueFamilyProperties& GetGraphicsQueueProperties() const { return m_graphics_queue_properties; }
__fi const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
__fi const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
__fi const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
__fi const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
__fi const VkPhysicalDeviceDriverProperties& GetDeviceDriverProperties() const { return m_device_driver_properties; }
__fi const OptionalExtensions& GetOptionalExtensions() const { return m_optional_extensions; }
// Helpers for getting constants
__fi u32 GetUniformBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minUniformBufferOffsetAlignment);
}
__fi u32 GetTexelBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minTexelBufferOffsetAlignment);
}
__fi u32 GetStorageBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minStorageBufferOffsetAlignment);
}
__fi u32 GetBufferImageGranularity() const
{
return static_cast<u32>(m_device_properties.limits.bufferImageGranularity);
}
__fi u32 GetBufferCopyOffsetAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment);
}
__fi u32 GetBufferCopyRowPitchAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment);
}
__fi u32 GetMaxImageDimension2D() const
{
return m_device_properties.limits.maxImageDimension2D;
}
// Creates a simple render pass.
__ri VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format,
VkAttachmentLoadOp color_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp color_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp depth_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp depth_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp stencil_load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VkAttachmentStoreOp stencil_store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE,
bool color_feedback_loop = false, bool depth_sampling = false)
{
RenderPassCacheKey key = {};
key.color_format = color_format;
key.depth_format = depth_format;
key.color_load_op = color_load_op;
key.color_store_op = color_store_op;
key.depth_load_op = depth_load_op;
key.depth_store_op = depth_store_op;
key.stencil_load_op = stencil_load_op;
key.stencil_store_op = stencil_store_op;
key.color_feedback_loop = color_feedback_loop;
key.depth_sampling = depth_sampling;
auto it = m_render_pass_cache.find(key.key);
if (it != m_render_pass_cache.end())
return it->second;
return CreateCachedRenderPass(key);
}
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
__fi u32 GetCurrentCommandBufferIndex() const { return m_current_frame; }
__fi VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; }
__fi VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
__fi StreamBuffer& GetTextureUploadBuffer() { return m_texture_upload_buffer; }
__fi VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
VkCommandBuffer GetCurrentInitCommandBuffer();
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocatePersistentDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreeGlobalDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
__fi VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
void SubmitCommandBuffer(SwapChain* present_swap_chain = nullptr, bool submit_on_thread = false);
void MoveToNextCommandBuffer();
enum class WaitType
{
None,
Sleep,
Spin,
};
void ExecuteCommandBuffer(WaitType wait_for_completion);
void WaitForPresentComplete();
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
bool CheckLastSubmitFail();
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object);
void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageDestruction(VkImage object, VmaAllocation allocation);
void DeferImageViewDestruction(VkImageView object);
void DeferPipelineDestruction(VkPipeline pipeline);
void DeferSamplerDestruction(VkSampler sampler);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
float GetAndResetAccumulatedGPUTime();
bool SetEnableGPUTiming(bool enabled);
void CountRenderPass() { m_command_buffer_render_passes++; }
void NotifyOfReadback();
// Allocates a temporary CPU staging buffer, fires the callback with it to populate, then copies to a GPU buffer.
bool AllocatePreinitializedGPUBuffer(u32 size, VkBuffer* gpu_buffer, VmaAllocation* gpu_allocation,
VkBufferUsageFlags gpu_usage, const std::function<void(void*)>& fill_callback);
private:
Context(VkInstance instance, VkPhysicalDevice physical_device);
union RenderPassCacheKey
{
struct
{
u32 color_format : 8;
u32 depth_format : 8;
u32 color_load_op : 2;
u32 color_store_op : 1;
u32 depth_load_op : 2;
u32 depth_store_op : 1;
u32 stencil_load_op : 2;
u32 stencil_store_op : 1;
u32 color_feedback_loop : 1;
u32 depth_sampling : 1;
};
u32 key;
};
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(
ExtensionList* extension_list, const WindowInfo& wi, bool enable_debug_utils);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures(const VkPhysicalDeviceFeatures* required_features);
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer, const char** required_device_extensions,
u32 num_required_device_extensions, const char** required_device_layers, u32 num_required_device_layers,
const VkPhysicalDeviceFeatures* required_features);
void ProcessDeviceExtensions();
bool CreateAllocator();
void DestroyAllocator();
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateGlobalDescriptorPool();
void DestroyGlobalDescriptorPool();
bool CreateTextureStreamBuffer();
VkRenderPass CreateCachedRenderPass(RenderPassCacheKey key);
void DestroyRenderPassCache();
void CommandBufferCompleted(u32 index);
void ActivateCommandBuffer(u32 index);
void ScanForCommandBufferCompletion();
void WaitForCommandBufferCompletion(u32 index);
void DoSubmitCommandBuffer(u32 index, SwapChain* present_swap_chain, u32 spin_cycles);
void DoPresent(SwapChain* present_swap_chain);
void WaitForPresentComplete(std::unique_lock<std::mutex>& lock);
void PresentThread();
void StartPresentThread();
void StopPresentThread();
bool InitSpinResources();
void DestroySpinResources();
void WaitForSpinCompletion(u32 index);
void SpinCommandCompleted(u32 index);
void SubmitSpinCommand(u32 index, u32 cycles);
void CalibrateSpinTimestamp();
u64 GetCPUTimestamp();
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
std::array<VkCommandBuffer, 2> command_buffers{VK_NULL_HANDLE, VK_NULL_HANDLE};
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
s32 spin_id = -1;
u32 submit_timestamp = 0;
bool init_buffer_used = false;
bool needs_fence_wait = false;
bool timestamp_written = false;
std::vector<std::function<void()>> cleanup_resources;
};
struct SpinResources
{
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkSemaphore semaphore = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u32 cycles = 0;
bool in_progress = false;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VmaAllocator m_allocator = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
u32 m_present_queue_family_index = 0;
ReadbackSpinManager m_spin_manager;
VkQueue m_spin_queue = VK_NULL_HANDLE;
VkDescriptorSetLayout m_spin_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_spin_pipeline_layout = VK_NULL_HANDLE;
VkPipeline m_spin_pipeline = VK_NULL_HANDLE;
VkBuffer m_spin_buffer = VK_NULL_HANDLE;
VmaAllocation m_spin_buffer_allocation = VK_NULL_HANDLE;
VkDescriptorSet m_spin_descriptor_set = VK_NULL_HANDLE;
std::array<SpinResources, NUM_COMMAND_BUFFERS> m_spin_resources;
#ifdef _WIN32
double m_queryperfcounter_to_ns = 0;
#endif
double m_spin_timestamp_scale = 0;
double m_spin_timestamp_offset = 0;
u32 m_spin_queue_family_index = 0;
u32 m_command_buffer_render_passes = 0;
u32 m_spin_timer = 0;
bool m_spinning_supported = false;
bool m_spin_queue_is_graphics_queue = false;
bool m_spin_buffer_initialized = false;
VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
bool m_gpu_timing_supported = false;
bool m_wants_new_timestamp_calibration = false;
VkTimeDomainEXT m_calibrated_timestamp_type = VK_TIME_DOMAIN_DEVICE_EXT;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame = 0;
StreamBuffer m_texture_upload_buffer;
std::atomic_bool m_last_submit_failed{false};
std::atomic_bool m_last_present_failed{false};
std::atomic_bool m_present_done{true};
std::mutex m_present_mutex;
std::condition_variable m_present_queued_cv;
std::condition_variable m_present_done_cv;
std::thread m_present_thread;
std::atomic_bool m_present_thread_done{false};
struct QueuedPresent
{
SwapChain* swap_chain;
u32 command_buffer_index;
u32 spin_cycles;
};
QueuedPresent m_queued_present = {};
std::map<u32, VkRenderPass> m_render_pass_cache;
VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {};
OptionalExtensions m_optional_extensions = {};
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::Context> g_vulkan_context;

View File

@ -1,260 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include <atomic>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#include "Loader.h"
#ifndef _WIN32
#include <dlfcn.h>
#endif
#ifdef __APPLE__
#include <mach-o/dyld.h>
#endif
extern "C" {
#define VULKAN_MODULE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#include "EntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
}
namespace Vulkan
{
void ResetVulkanLibraryFunctionPointers()
{
#define VULKAN_MODULE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#include "EntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
}
#if defined(_WIN32)
static HMODULE vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
vulkan_module = LoadLibraryA("vulkan-1.dll");
if (!vulkan_module)
{
std::fprintf(stderr, "Failed to load vulkan-1.dll\n");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](FARPROC* func_ptr, const char* name, bool is_required) {
*func_ptr = GetProcAddress(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast<FARPROC*>(&name), #name, required);
#include "EntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
}
#else
static void* vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
#if defined(__APPLE__)
// Check if a path to a specific Vulkan library has been specified.
char* libvulkan_env = getenv("LIBVULKAN_PATH");
if (libvulkan_env)
vulkan_module = dlopen(libvulkan_env, RTLD_NOW);
if (!vulkan_module)
{
unsigned path_size = 0;
_NSGetExecutablePath(nullptr, &path_size);
std::string path;
path.resize(path_size);
if (_NSGetExecutablePath(path.data(), &path_size) == 0)
{
path[path_size] = 0;
size_t pos = path.rfind('/');
if (pos != std::string::npos)
{
path.erase(pos);
path += "/../Frameworks/libvulkan.dylib";
vulkan_module = dlopen(path.c_str(), RTLD_NOW);
if (!vulkan_module)
{
path.erase(pos);
path += "/../Frameworks/libMoltenVK.dylib";
vulkan_module = dlopen(path.c_str(), RTLD_NOW);
}
}
}
}
if (!vulkan_module)
{
vulkan_module = dlopen("libvulkan.dylib", RTLD_NOW);
if (!vulkan_module)
vulkan_module = dlopen("libMoltenVK.dylib", RTLD_NOW);
}
#else
// Names of libraries to search. Desktop should use libvulkan.so.1 or libvulkan.so.
static const char* search_lib_names[] = {"libvulkan.so.1", "libvulkan.so"};
for (size_t i = 0; i < sizeof(search_lib_names) / sizeof(search_lib_names[0]); i++)
{
vulkan_module = dlopen(search_lib_names[i], RTLD_NOW);
if (vulkan_module)
break;
}
#endif
if (!vulkan_module)
{
std::fprintf(stderr, "Failed to load or locate libvulkan.so\n");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](void** func_ptr, const char* name, bool is_required) {
*func_ptr = dlsym(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast<void**>(&name), #name, required);
#include "EntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
}
#endif
bool LoadVulkanInstanceFunctions(VkInstance instance)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetInstanceProcAddr(instance, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required instance function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "EntryPoints.inl"
#undef VULKAN_INSTANCE_ENTRY_POINT
return !required_functions_missing;
}
bool LoadVulkanDeviceFunctions(VkDevice device)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetDeviceProcAddr(device, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required device function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_DEVICE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "EntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
return !required_functions_missing;
}
} // namespace Vulkan

View File

@ -1,539 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/ShaderCache.h"
#include "common/Vulkan/ShaderCompiler.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Util.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/FileSystem.h"
#include "common/MD5Digest.h"
// TODO: store the driver version and stuff in the shader header
std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;
namespace Vulkan
{
using ShaderCompiler::SPIRVCodeType;
using ShaderCompiler::SPIRVCodeVector;
#pragma pack(push, 4)
struct VK_PIPELINE_CACHE_HEADER
{
u32 header_length;
u32 header_version;
u32 vendor_id;
u32 device_id;
u8 uuid[VK_UUID_SIZE];
};
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
{
if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Console.Error("Pipeline cache failed validation: Invalid header length");
return false;
}
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
{
Console.Error("Pipeline cache failed validation: Invalid header version");
return false;
}
if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID)
{
Console.Error("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)",
header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID);
return false;
}
if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID)
{
Console.Error("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)",
header.device_id, g_vulkan_context->GetDeviceProperties().deviceID);
return false;
}
if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
{
Console.Error("Pipeline cache failed validation: Incorrect UUID");
return false;
}
return true;
}
static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
{
header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID;
header->device_id = g_vulkan_context->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
}
ShaderCache::ShaderCache() = default;
ShaderCache::~ShaderCache()
{
CloseShaderCache();
FlushPipelineCache();
ClosePipelineCache();
}
bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && shader_type == key.shader_type);
}
bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || shader_type != key.shader_type);
}
void ShaderCache::Create(std::string_view base_path, u32 version, bool debug)
{
pxAssert(!g_vulkan_shader_cache);
g_vulkan_shader_cache.reset(new ShaderCache());
g_vulkan_shader_cache->Open(base_path, version, debug);
}
void ShaderCache::Destroy() { g_vulkan_shader_cache.reset(); }
void ShaderCache::Open(std::string_view directory, u32 version, bool debug)
{
m_version = version;
m_debug = debug;
if (!directory.empty())
{
m_pipeline_cache_filename = GetPipelineCacheBaseFileName(directory, debug);
const std::string base_filename = GetShaderCacheBaseFileName(directory, debug);
const std::string index_filename = base_filename + ".idx";
const std::string blob_filename = base_filename + ".bin";
if (!ReadExistingShaderCache(index_filename, blob_filename))
CreateNewShaderCache(index_filename, blob_filename);
if (!ReadExistingPipelineCache())
CreateNewPipelineCache();
}
else
{
CreateNewPipelineCache();
}
}
VkPipelineCache ShaderCache::GetPipelineCache(bool set_dirty /*= true*/)
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return VK_NULL_HANDLE;
m_pipeline_cache_dirty |= set_dirty;
return m_pipeline_cache;
}
bool ShaderCache::CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Console.Warning("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFilePath(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Console.Warning("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFilePath(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Console.Error("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
VK_PIPELINE_CACHE_HEADER header;
FillPipelineCacheHeader(&header);
if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 ||
std::fwrite(&m_version, sizeof(m_version), 1, m_index_file) != 1 ||
std::fwrite(&header, sizeof(header), 1, m_index_file) != 1)
{
Console.Error("Failed to write header to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFilePath(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Console.Error("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFilePath(index_filename.c_str());
return false;
}
return true;
}
bool ShaderCache::ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
{
// special case here: when there's a sharing violation (i.e. two instances running),
// we don't want to blow away the cache. so just continue without a cache.
if (errno == EACCES)
{
Console.WriteLn("Failed to open shader cache index with EACCES, are you running two instances?");
return true;
}
return false;
}
u32 file_version = 0;
u32 data_version = 0;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION ||
std::fread(&data_version, sizeof(data_version), 1, m_index_file) != 1 || data_version != m_version)
{
Console.Error("Bad file/data version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
VK_PIPELINE_CACHE_HEADER header;
if (std::fread(&header, sizeof(header), 1, m_index_file) != 1 || !ValidatePipelineCacheHeader(header))
{
Console.Error("Mismatched pipeline cache header in '%s' (GPU/driver changed?)", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Console.Error("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Console.Error("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length,
static_cast<ShaderCompiler::Type>(entry.shader_type)};
const CacheIndexData data{entry.file_offset, entry.blob_size};
m_index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(m_index_file, 0, SEEK_END);
Console.WriteLn("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
void ShaderCache::CloseShaderCache()
{
if (m_index_file)
{
std::fclose(m_index_file);
m_index_file = nullptr;
}
if (m_blob_file)
{
std::fclose(m_blob_file);
m_blob_file = nullptr;
}
}
bool ShaderCache::CreateNewPipelineCache()
{
if (!m_pipeline_cache_filename.empty() && FileSystem::FileExists(m_pipeline_cache_filename.c_str()))
{
Console.Warning("Removing existing pipeline cache '%s'", m_pipeline_cache_filename.c_str());
FileSystem::DeleteFilePath(m_pipeline_cache_filename.c_str());
}
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
m_pipeline_cache_dirty = true;
return true;
}
bool ShaderCache::ReadExistingPipelineCache()
{
std::optional<std::vector<u8>> data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str());
if (!data.has_value())
return false;
if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Console.Error("Pipeline cache at '%s' is too small", m_pipeline_cache_filename.c_str());
return false;
}
VK_PIPELINE_CACHE_HEADER header;
std::memcpy(&header, data->data(), sizeof(header));
if (!ValidatePipelineCacheHeader(header))
return false;
const VkPipelineCacheCreateInfo ci{
VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(), data->data()};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
return true;
}
bool ShaderCache::FlushPipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE || !m_pipeline_cache_dirty || m_pipeline_cache_filename.empty())
return false;
size_t data_size;
VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
return false;
}
std::vector<u8> data(data_size);
res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
return false;
}
data.resize(data_size);
// Save disk writes if it hasn't changed, think of the poor SSDs.
FILESYSTEM_STAT_DATA sd;
if (!FileSystem::StatFile(m_pipeline_cache_filename.c_str(), &sd) || sd.Size != static_cast<s64>(data_size))
{
Console.WriteLn("Writing %zu bytes to '%s'", data_size, m_pipeline_cache_filename.c_str());
if (!FileSystem::WriteBinaryFile(m_pipeline_cache_filename.c_str(), data.data(), data.size()))
{
Console.Error("Failed to write pipeline cache to '%s'", m_pipeline_cache_filename.c_str());
return false;
}
}
else
{
Console.WriteLn(
"Skipping updating pipeline cache '%s' due to no changes.", m_pipeline_cache_filename.c_str());
}
m_pipeline_cache_dirty = false;
return true;
}
void ShaderCache::ClosePipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return;
vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr);
m_pipeline_cache = VK_NULL_HANDLE;
}
std::string ShaderCache::GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "vulkan_shaders";
if (debug)
base_filename += "_debug";
return base_filename;
}
std::string ShaderCache::GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "vulkan_pipelines";
if (debug)
base_filename += "_debug";
base_filename += ".bin";
return base_filename;
}
ShaderCache::CacheIndexKey ShaderCache::GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code)
{
union HashParts
{
struct
{
u64 hash_low;
u64 hash_high;
};
u8 hash[16];
};
HashParts h;
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(h.hash);
return CacheIndexKey{h.hash_low, h.hash_high, static_cast<u32>(shader_code.length()), type};
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::GetShaderSPV(
ShaderCompiler::Type type, std::string_view shader_code)
{
const auto key = GetCacheKey(type, shader_code);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddShaderSPV(key, shader_code);
SPIRVCodeVector spv(iter->second.blob_size);
if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(spv.data(), sizeof(SPIRVCodeType), iter->second.blob_size, m_blob_file) !=
iter->second.blob_size)
{
Console.Error("Read blob from file failed, recompiling");
return ShaderCompiler::CompileShader(type, shader_code, m_debug);
}
return spv;
}
VkShaderModule ShaderCache::GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = GetShaderSPV(type, shader_code);
if (!spv.has_value())
return VK_NULL_HANDLE;
const VkShaderModuleCreateInfo ci{
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0, spv->size() * sizeof(SPIRVCodeType), spv->data()};
VkShaderModule mod;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: ");
return VK_NULL_HANDLE;
}
return mod;
}
VkShaderModule ShaderCache::GetVertexShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Vertex, std::move(shader_code));
}
VkShaderModule ShaderCache::GetFragmentShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Fragment, std::move(shader_code));
}
VkShaderModule ShaderCache::GetComputeShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Compute, std::move(shader_code));
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::CompileAndAddShaderSPV(
const CacheIndexKey& key, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = ShaderCompiler::CompileShader(key.shader_type, shader_code, m_debug);
if (!spv.has_value())
return {};
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return spv;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(spv->size());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.shader_type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(spv->data(), sizeof(SPIRVCodeType), entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Console.Error("Failed to write shader blob to file");
return spv;
}
m_index.emplace(key, data);
return spv;
}
} // namespace Vulkan

View File

@ -1,117 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Vulkan/ShaderCompiler.h"
#include "common/Pcsx2Defs.h"
#include "common/HashCombine.h"
#include "common/Vulkan/Loader.h"
#include <cstdio>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
namespace Vulkan
{
class ShaderCache
{
public:
~ShaderCache();
static void Create(std::string_view directory, u32 version, bool debug);
static void Destroy();
/// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally.
VkPipelineCache GetPipelineCache(bool set_dirty = true);
/// Writes pipeline cache to file, saving all newly compiled pipelines.
bool FlushPipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> GetShaderSPV(
ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetVertexShader(std::string_view shader_code);
VkShaderModule GetFragmentShader(std::string_view shader_code);
VkShaderModule GetComputeShader(std::string_view shader_code);
private:
static constexpr u32 FILE_VERSION = 2;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
ShaderCompiler::Type shader_type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
HashCombine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
ShaderCache();
static std::string GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug);
static std::string GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug);
static CacheIndexKey GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code);
void Open(std::string_view base_path, u32 version, bool debug);
bool CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename);
bool ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename);
void CloseShaderCache();
bool CreateNewPipelineCache();
bool ReadExistingPipelineCache();
void ClosePipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> CompileAndAddShaderSPV(
const CacheIndexKey& key, std::string_view shader_code);
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
std::string m_pipeline_cache_filename;
CacheIndex m_index;
VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE;
u32 m_version = 0;
bool m_debug = false;
bool m_pipeline_cache_dirty = false;
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;

View File

@ -1,184 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/ShaderCompiler.h"
#include "common/Vulkan/Util.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/StringUtil.h"
#include <cstring>
#include <fstream>
#include <memory>
// glslang includes
#include "SPIRV/GlslangToSpv.h"
#include "StandAlone/ResourceLimits.h"
#include "glslang/Public/ShaderLang.h"
namespace Vulkan::ShaderCompiler
{
// Registers itself for cleanup via atexit
bool InitializeGlslang();
static unsigned s_next_bad_shader_id = 1;
static bool glslang_initialized = false;
static std::optional<SPIRVCodeVector> CompileShaderToSPV(
EShLanguage stage, const char* stage_filename, std::string_view source, bool debug)
{
if (!InitializeGlslang())
return std::nullopt;
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage);
std::unique_ptr<glslang::TProgram> program;
glslang::TShader::ForbidIncluder includer;
const EProfile profile = ECoreProfile;
const EShMessages messages = static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules | (debug ? EShMsgDebugInfo : 0));
const int default_version = 450;
std::string full_source_code;
const char* pass_source_code = source.data();
int pass_source_code_length = static_cast<int>(source.size());
shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1);
auto DumpBadShader = [&](const char* msg) {
std::string filename = StringUtil::StdStringFromFormat("pcsx2_bad_shader_%u.txt", s_next_bad_shader_id++);
Console.Error("CompileShaderToSPV: %s, writing to %s", msg, filename.c_str());
std::ofstream ofs(filename.c_str(), std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs << source;
ofs << "\n";
ofs << msg << std::endl;
ofs << "Shader Info Log:" << std::endl;
ofs << shader->getInfoLog() << std::endl;
ofs << shader->getInfoDebugLog() << std::endl;
if (program)
{
ofs << "Program Info Log:" << std::endl;
ofs << program->getInfoLog() << std::endl;
ofs << program->getInfoDebugLog() << std::endl;
}
ofs.close();
}
};
if (!shader->parse(
&glslang::DefaultTBuiltInResource, default_version, profile, false, true, messages, includer))
{
DumpBadShader("Failed to parse shader");
return std::nullopt;
}
// Even though there's only a single shader, we still need to link it to generate SPV
program = std::make_unique<glslang::TProgram>();
program->addShader(shader.get());
if (!program->link(messages))
{
DumpBadShader("Failed to link program");
return std::nullopt;
}
glslang::TIntermediate* intermediate = program->getIntermediate(stage);
if (!intermediate)
{
DumpBadShader("Failed to generate SPIR-V");
return std::nullopt;
}
SPIRVCodeVector out_code;
spv::SpvBuildLogger logger;
glslang::SpvOptions options;
options.generateDebugInfo = debug;
glslang::GlslangToSpv(*intermediate, out_code, &logger, &options);
// Write out messages
if (std::strlen(shader->getInfoLog()) > 0)
Console.Warning("Shader info log: %s", shader->getInfoLog());
if (std::strlen(shader->getInfoDebugLog()) > 0)
Console.Warning("Shader debug info log: %s", shader->getInfoDebugLog());
if (std::strlen(program->getInfoLog()) > 0)
Console.Warning("Program info log: %s", program->getInfoLog());
if (std::strlen(program->getInfoDebugLog()) > 0)
Console.Warning("Program debug info log: %s", program->getInfoDebugLog());
std::string spv_messages = logger.getAllMessages();
if (!spv_messages.empty())
Console.Warning("SPIR-V conversion messages: %s", spv_messages.c_str());
return out_code;
}
bool InitializeGlslang()
{
if (glslang_initialized)
return true;
if (!glslang::InitializeProcess())
{
pxFailRel("Failed to initialize glslang shader compiler");
return false;
}
std::atexit(DeinitializeGlslang);
glslang_initialized = true;
return true;
}
void DeinitializeGlslang()
{
if (!glslang_initialized)
return;
glslang::FinalizeProcess();
glslang_initialized = false;
}
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code, bool debug)
{
return CompileShaderToSPV(EShLangVertex, "vs", source_code, debug);
}
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code, bool debug)
{
return CompileShaderToSPV(EShLangFragment, "ps", source_code, debug);
}
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code, bool debug)
{
return CompileShaderToSPV(EShLangCompute, "cs", source_code, debug);
}
std::optional<ShaderCompiler::SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug)
{
switch (type)
{
case Type::Vertex:
return CompileShaderToSPV(EShLangVertex, "vs", source_code, debug);
case Type::Fragment:
return CompileShaderToSPV(EShLangFragment, "ps", source_code, debug);
case Type::Compute:
return CompileShaderToSPV(EShLangCompute, "cs", source_code, debug);
default:
return std::nullopt;
}
}
} // namespace Vulkan::ShaderCompiler

View File

@ -1,49 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include <optional>
#include <string_view>
#include <vector>
namespace Vulkan::ShaderCompiler
{
// Shader types
enum class Type
{
Vertex,
Fragment,
Compute
};
void DeinitializeGlslang();
// SPIR-V compiled code type
using SPIRVCodeType = u32;
using SPIRVCodeVector = std::vector<SPIRVCodeType>;
// Compile a vertex shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code, bool debug);
// Compile a fragment shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code, bool debug);
// Compile a compute shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code, bool debug);
std::optional<SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug);
} // namespace Vulkan::ShaderCompiler

View File

@ -1,323 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/StreamBuffer.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Util.h"
#include "common/Align.h"
#include "common/Assertions.h"
#include "common/Console.h"
namespace Vulkan
{
StreamBuffer::StreamBuffer() = default;
StreamBuffer::StreamBuffer(StreamBuffer&& move)
: m_size(move.m_size)
, m_current_offset(move.m_current_offset)
, m_current_space(move.m_current_space)
, m_current_gpu_position(move.m_current_gpu_position)
, m_allocation(move.m_allocation)
, m_buffer(move.m_buffer)
, m_host_pointer(move.m_host_pointer)
, m_tracked_fences(std::move(move.m_tracked_fences))
{
move.m_size = 0;
move.m_current_offset = 0;
move.m_current_space = 0;
move.m_current_gpu_position = 0;
move.m_allocation = VK_NULL_HANDLE;
move.m_buffer = VK_NULL_HANDLE;
move.m_host_pointer = nullptr;
}
StreamBuffer::~StreamBuffer()
{
if (IsValid())
Destroy(true);
}
StreamBuffer& StreamBuffer::operator=(StreamBuffer&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_size, move.m_size);
std::swap(m_current_offset, move.m_current_offset);
std::swap(m_current_space, move.m_current_space);
std::swap(m_current_gpu_position, move.m_current_gpu_position);
std::swap(m_buffer, move.m_buffer);
std::swap(m_host_pointer, move.m_host_pointer);
std::swap(m_tracked_fences, move.m_tracked_fences);
return *this;
}
bool StreamBuffer::Create(VkBufferUsageFlags usage, u32 size)
{
const VkBufferCreateInfo bci = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, nullptr, 0,
static_cast<VkDeviceSize>(size), usage, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr};
VmaAllocationCreateInfo aci = {};
aci.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
aci.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
aci.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VmaAllocationInfo ai = {};
VkBuffer new_buffer = VK_NULL_HANDLE;
VmaAllocation new_allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &new_buffer, &new_allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
if (IsValid())
Destroy(true);
// Replace with the new buffer
m_size = size;
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
m_allocation = new_allocation;
m_buffer = new_buffer;
m_host_pointer = static_cast<u8*>(ai.pMappedData);
return true;
}
void StreamBuffer::Destroy(bool defer)
{
if (m_buffer != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferBufferDestruction(m_buffer, m_allocation);
else
vmaDestroyBuffer(g_vulkan_context->GetAllocator(), m_buffer, m_allocation);
}
m_size = 0;
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
m_buffer = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
m_host_pointer = nullptr;
}
bool StreamBuffer::ReserveMemory(u32 num_bytes, u32 alignment)
{
const u32 required_bytes = num_bytes + alignment;
// Check for sane allocations
if (required_bytes > m_size)
{
Console.Error("Attempting to allocate %u bytes from a %u byte stream buffer", static_cast<u32>(num_bytes),
static_cast<u32>(m_size));
pxFailRel("Stream buffer overflow");
return false;
}
UpdateGPUPosition();
// Is the GPU behind or up to date with our current offset?
if (m_current_offset >= m_current_gpu_position)
{
const u32 remaining_bytes = m_size - m_current_offset;
if (required_bytes <= remaining_bytes)
{
// Place at the current position, after the GPU position.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_size - m_current_offset;
return true;
}
// Check for space at the start of the buffer
// We use < here because we don't want to have the case of m_current_offset ==
// m_current_gpu_position. That would mean the code above would assume the
// GPU has caught up to us, which it hasn't.
if (required_bytes < m_current_gpu_position)
{
// Reset offset to zero, since we're allocating behind the gpu now
m_current_offset = 0;
m_current_space = m_current_gpu_position - 1;
return true;
}
}
// Is the GPU ahead of our current offset?
if (m_current_offset < m_current_gpu_position)
{
// We have from m_current_offset..m_current_gpu_position space to use.
const u32 remaining_bytes = m_current_gpu_position - m_current_offset;
if (required_bytes < remaining_bytes)
{
// Place at the current position, since this is still behind the GPU.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_current_gpu_position - m_current_offset - 1;
return true;
}
}
// Can we find a fence to wait on that will give us enough memory?
if (WaitForClearSpace(required_bytes))
{
const u32 align_diff = Common::AlignUp(m_current_offset, alignment) - m_current_offset;
m_current_offset += align_diff;
m_current_space -= align_diff;
return true;
}
// We tried everything we could, and still couldn't get anything. This means that too much space
// in the buffer is being used by the command buffer currently being recorded. Therefore, the
// only option is to execute it, and wait until it's done.
return false;
}
void StreamBuffer::CommitMemory(u32 final_num_bytes)
{
pxAssert((m_current_offset + final_num_bytes) <= m_size);
pxAssert(final_num_bytes <= m_current_space);
// For non-coherent mappings, flush the memory range
vmaFlushAllocation(g_vulkan_context->GetAllocator(), m_allocation, m_current_offset, final_num_bytes);
m_current_offset += final_num_bytes;
m_current_space -= final_num_bytes;
UpdateCurrentFencePosition();
}
void StreamBuffer::UpdateCurrentFencePosition()
{
// Has the offset changed since the last fence?
const u64 counter = g_vulkan_context->GetCurrentFenceCounter();
if (!m_tracked_fences.empty() && m_tracked_fences.back().first == counter)
{
// Still haven't executed a command buffer, so just update the offset.
m_tracked_fences.back().second = m_current_offset;
return;
}
// New buffer, so update the GPU position while we're at it.
m_tracked_fences.emplace_back(counter, m_current_offset);
}
void StreamBuffer::UpdateGPUPosition()
{
auto start = m_tracked_fences.begin();
auto end = start;
const u64 completed_counter = g_vulkan_context->GetCompletedFenceCounter();
while (end != m_tracked_fences.end() && completed_counter >= end->first)
{
m_current_gpu_position = end->second;
++end;
}
if (start != end)
{
m_tracked_fences.erase(start, end);
if (m_current_offset == m_current_gpu_position)
{
// GPU is all caught up now.
m_current_offset = 0;
m_current_gpu_position = 0;
m_current_space = m_size;
}
}
}
bool StreamBuffer::WaitForClearSpace(u32 num_bytes)
{
u32 new_offset = 0;
u32 new_space = 0;
u32 new_gpu_position = 0;
auto iter = m_tracked_fences.begin();
for (; iter != m_tracked_fences.end(); ++iter)
{
// Would this fence bring us in line with the GPU?
// This is the "last resort" case, where a command buffer execution has been forced
// after no additional data has been written to it, so we can assume that after the
// fence has been signaled the entire buffer is now consumed.
u32 gpu_position = iter->second;
if (m_current_offset == gpu_position)
{
new_offset = 0;
new_space = m_size;
new_gpu_position = 0;
break;
}
// Assuming that we wait for this fence, are we allocating in front of the GPU?
if (m_current_offset > gpu_position)
{
// This would suggest the GPU has now followed us and wrapped around, so we have from
// m_current_position..m_size free, as well as and 0..gpu_position.
const u32 remaining_space_after_offset = m_size - m_current_offset;
if (remaining_space_after_offset >= num_bytes)
{
// Switch to allocating in front of the GPU, using the remainder of the buffer.
new_offset = m_current_offset;
new_space = m_size - m_current_offset;
new_gpu_position = gpu_position;
break;
}
// We can wrap around to the start, behind the GPU, if there is enough space.
// We use > here because otherwise we'd end up lining up with the GPU, and then the
// allocator would assume that the GPU has consumed what we just wrote.
if (gpu_position > num_bytes)
{
new_offset = 0;
new_space = gpu_position - 1;
new_gpu_position = gpu_position;
break;
}
}
else
{
// We're currently allocating behind the GPU. This would give us between the current
// offset and the GPU position worth of space to work with. Again, > because we can't
// align the GPU position with the buffer offset.
u32 available_space_inbetween = gpu_position - m_current_offset;
if (available_space_inbetween > num_bytes)
{
// Leave the offset as-is, but update the GPU position.
new_offset = m_current_offset;
new_space = available_space_inbetween - 1;
new_gpu_position = gpu_position;
break;
}
}
}
// Did any fences satisfy this condition?
// Has the command buffer been executed yet? If not, the caller should execute it.
if (iter == m_tracked_fences.end() || iter->first == g_vulkan_context->GetCurrentFenceCounter())
return false;
// Wait until this fence is signaled. This will fire the callback, updating the GPU position.
g_vulkan_context->WaitForFenceCounter(iter->first);
m_tracked_fences.erase(
m_tracked_fences.begin(), m_current_offset == iter->second ? m_tracked_fences.end() : ++iter);
m_current_offset = new_offset;
m_current_space = new_space;
m_current_gpu_position = new_gpu_position;
return true;
}
} // namespace Vulkan

View File

@ -1,71 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include "common/Vulkan/Loader.h"
#include "vk_mem_alloc.h"
#include <deque>
#include <memory>
namespace Vulkan
{
class StreamBuffer
{
public:
StreamBuffer();
StreamBuffer(StreamBuffer&& move);
StreamBuffer(const StreamBuffer&) = delete;
~StreamBuffer();
StreamBuffer& operator=(StreamBuffer&& move);
StreamBuffer& operator=(const StreamBuffer&) = delete;
__fi bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); }
__fi VkBuffer GetBuffer() const { return m_buffer; }
__fi u8* GetHostPointer() const { return m_host_pointer; }
__fi u8* GetCurrentHostPointer() const { return m_host_pointer + m_current_offset; }
__fi u32 GetCurrentSize() const { return m_size; }
__fi u32 GetCurrentSpace() const { return m_current_space; }
__fi u32 GetCurrentOffset() const { return m_current_offset; }
bool Create(VkBufferUsageFlags usage, u32 size);
void Destroy(bool defer);
bool ReserveMemory(u32 num_bytes, u32 alignment);
void CommitMemory(u32 final_num_bytes);
private:
bool AllocateBuffer(VkBufferUsageFlags usage, u32 size);
void UpdateCurrentFencePosition();
void UpdateGPUPosition();
// Waits for as many fences as needed to allocate num_bytes bytes from the buffer.
bool WaitForClearSpace(u32 num_bytes);
u32 m_size = 0;
u32 m_current_offset = 0;
u32 m_current_space = 0;
u32 m_current_gpu_position = 0;
VmaAllocation m_allocation = VK_NULL_HANDLE;
VkBuffer m_buffer = VK_NULL_HANDLE;
u8* m_host_pointer = nullptr;
// List of fences and the corresponding positions in the buffer
std::deque<std::pair<u64, u32>> m_tracked_fences;
};
} // namespace Vulkan

View File

@ -1,862 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/SwapChain.h"
#include "common/Assertions.h"
#include "common/CocoaTools.h"
#include "common/Console.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Util.h"
#include <algorithm>
#include <array>
#include <cmath>
#if defined(VK_USE_PLATFORM_XLIB_KHR)
#include <X11/Xlib.h>
#endif
namespace Vulkan
{
SwapChain::SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, VkPresentModeKHR preferred_present_mode,
std::optional<bool> exclusive_fullscreen_control)
: m_window_info(wi)
, m_surface(surface)
, m_preferred_present_mode(preferred_present_mode)
, m_exclusive_fullscreen_control(exclusive_fullscreen_control)
{
}
SwapChain::~SwapChain()
{
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
}
#if 0
// Unused for now, can be used for nogui
static VkSurfaceKHR CreateDisplaySurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi)
{
Console.WriteLn("Trying to create a VK_KHR_display surface of %ux%u", wi->surface_width, wi->surface_height);
u32 num_displays;
VkResult res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, nullptr);
if (res != VK_SUCCESS || num_displays == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
std::vector<VkDisplayPropertiesKHR> displays(num_displays);
res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, displays.data());
if (res != VK_SUCCESS || num_displays != displays.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
for (u32 display_index = 0; display_index < num_displays; display_index++)
{
const VkDisplayPropertiesKHR& props = displays[display_index];
DevCon.WriteLn("Testing display '%s'", props.displayName);
u32 num_modes;
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, nullptr);
if (res != VK_SUCCESS || num_modes == 0)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
std::vector<VkDisplayModePropertiesKHR> modes(num_modes);
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, modes.data());
if (res != VK_SUCCESS || num_modes != modes.size())
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
const VkDisplayModePropertiesKHR* matched_mode = nullptr;
for (const VkDisplayModePropertiesKHR& mode : modes)
{
const float refresh_rate = static_cast<float>(mode.parameters.refreshRate) / 1000.0f;
DevCon.WriteLn(" Mode %ux%u @ %f", mode.parameters.visibleRegion.width,
mode.parameters.visibleRegion.height, refresh_rate);
if (!matched_mode && ((wi->surface_width == 0 && wi->surface_height == 0) ||
(mode.parameters.visibleRegion.width == wi->surface_width &&
mode.parameters.visibleRegion.height == wi->surface_height &&
(wi->surface_refresh_rate == 0.0f ||
std::abs(refresh_rate - wi->surface_refresh_rate) < 0.1f))))
{
matched_mode = &mode;
}
}
if (!matched_mode)
{
DevCon.WriteLn("No modes matched on '%s'", props.displayName);
continue;
}
u32 num_planes;
res = vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physical_device, &num_planes, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR() failed:");
continue;
}
if (num_planes == 0)
continue;
std::vector<VkDisplayPlanePropertiesKHR> planes(num_planes);
res = vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physical_device, &num_planes, planes.data());
if (res != VK_SUCCESS || num_planes != planes.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR() failed:");
continue;
}
u32 plane_index = 0;
for (; plane_index < num_planes; plane_index++)
{
u32 supported_display_count;
res = vkGetDisplayPlaneSupportedDisplaysKHR(
physical_device, plane_index, &supported_display_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayPlaneSupportedDisplaysKHR() failed:");
continue;
}
if (supported_display_count == 0)
continue;
std::vector<VkDisplayKHR> supported_displays(supported_display_count);
res = vkGetDisplayPlaneSupportedDisplaysKHR(
physical_device, plane_index, &supported_display_count, supported_displays.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayPlaneSupportedDisplaysKHR() failed:");
continue;
}
const bool is_supported = std::find(supported_displays.begin(), supported_displays.end(),
props.display) != supported_displays.end();
if (!is_supported)
continue;
break;
}
if (plane_index == num_planes)
{
DevCon.WriteLn("No planes matched on '%s'", props.displayName);
continue;
}
VkDisplaySurfaceCreateInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR;
info.displayMode = matched_mode->displayMode;
info.planeIndex = plane_index;
info.planeStackIndex = planes[plane_index].currentStackIndex;
info.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
info.globalAlpha = 1.0f;
info.alphaMode = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR;
info.imageExtent = matched_mode->parameters.visibleRegion;
VkSurfaceKHR surface;
res = vkCreateDisplayPlaneSurfaceKHR(instance, &info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDisplayPlaneSurfaceKHR() failed: ");
continue;
}
wi->surface_refresh_rate = static_cast<float>(matched_mode->parameters.refreshRate) / 1000.0f;
return surface;
}
return VK_NULL_HANDLE;
}
static std::vector<SwapChain::FullscreenModeInfo> GetDisplayModes(
VkInstance instance, VkPhysicalDevice physical_device, const WindowInfo& wi)
{
u32 num_displays;
VkResult res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
if (num_displays == 0)
{
Console.Error("No displays were returned");
return {};
}
std::vector<VkDisplayPropertiesKHR> displays(num_displays);
res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, displays.data());
if (res != VK_SUCCESS || num_displays != displays.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
std::vector<SwapChain::FullscreenModeInfo> result;
for (u32 display_index = 0; display_index < num_displays; display_index++)
{
const VkDisplayPropertiesKHR& props = displays[display_index];
u32 num_modes;
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, nullptr);
if (res != VK_SUCCESS || num_modes == 0)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
std::vector<VkDisplayModePropertiesKHR> modes(num_modes);
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, modes.data());
if (res != VK_SUCCESS || num_modes != modes.size())
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
for (const VkDisplayModePropertiesKHR& mode : modes)
{
const float refresh_rate = static_cast<float>(mode.parameters.refreshRate) / 1000.0f;
if (std::find_if(
result.begin(), result.end(), [&mode, refresh_rate](const SwapChain::FullscreenModeInfo& mi) {
return (mi.width == mode.parameters.visibleRegion.width &&
mi.height == mode.parameters.visibleRegion.height &&
mode.parameters.refreshRate == refresh_rate);
}) != result.end())
{
continue;
}
result.push_back(SwapChain::FullscreenModeInfo{static_cast<u32>(mode.parameters.visibleRegion.width),
static_cast<u32>(mode.parameters.visibleRegion.height), refresh_rate});
}
}
return result;
}
#endif
VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi)
{
#if defined(VK_USE_PLATFORM_WIN32_KHR)
if (wi->type == WindowInfo::Type::Win32)
{
VkWin32SurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkWin32SurfaceCreateFlagsKHR flags
nullptr, // HINSTANCE hinstance
reinterpret_cast<HWND>(wi->window_handle) // HWND hwnd
};
VkSurfaceKHR surface;
VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_XLIB_KHR)
if (wi->type == WindowInfo::Type::X11)
{
VkXlibSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXlibSurfaceCreateFlagsKHR flags
static_cast<Display*>(wi->display_connection), // Display* dpy
reinterpret_cast<Window>(wi->window_handle) // Window window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
if (wi->type == WindowInfo::Type::Wayland)
{
VkWaylandSurfaceCreateInfoKHR surface_create_info = {VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
nullptr, 0, static_cast<struct wl_display*>(wi->display_connection),
static_cast<struct wl_surface*>(wi->window_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateWaylandSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWaylandSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
if (wi->type == WindowInfo::Type::Android)
{
VkAndroidSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAndroidSurfaceCreateFlagsKHR flags
reinterpret_cast<ANativeWindow*>(wi->window_handle) // ANativeWindow* window
};
VkSurfaceKHR surface;
VkResult res = vkCreateAndroidSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateAndroidSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_METAL_EXT)
if (wi->type == WindowInfo::Type::MacOS)
{
if (!wi->surface_handle && !CocoaTools::CreateMetalLayer(wi))
return VK_NULL_HANDLE;
VkMetalSurfaceCreateInfoEXT surface_create_info = {VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, nullptr,
0, static_cast<const CAMetalLayer*>(wi->surface_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateMetalSurfaceEXT(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMetalSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#elif defined(VK_USE_PLATFORM_MACOS_MVK)
if (wi->type == WindowInfo::Type::MacOS)
{
VkMacOSSurfaceCreateInfoMVK surface_create_info = {
VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, nullptr, 0, wi->window_handle};
VkSurfaceKHR surface;
VkResult res = vkCreateMacOSSurfaceMVK(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMacOSSurfaceMVK failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if 0
if (wi->type == WindowInfo::Type::Display)
return CreateDisplaySurface(instance, physical_device, wi);
#endif
return VK_NULL_HANDLE;
}
void SwapChain::DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface)
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), surface, nullptr);
#if defined(__APPLE__)
if (wi->type == WindowInfo::Type::MacOS && wi->surface_handle)
CocoaTools::DestroyMetalLayer(wi);
#endif
}
std::vector<SwapChain::FullscreenModeInfo> SwapChain::GetSurfaceFullscreenModes(
VkInstance instance, VkPhysicalDevice physical_device, const WindowInfo& wi)
{
#if 0
if (wi.type == WindowInfo::Type::Display)
return GetDisplayModes(instance, physical_device, wi);
#endif
return {};
}
std::unique_ptr<SwapChain> SwapChain::Create(const WindowInfo& wi, VkSurfaceKHR surface,
VkPresentModeKHR preferred_present_mode, std::optional<bool> exclusive_fullscreen_control)
{
std::unique_ptr<SwapChain> swap_chain = std::unique_ptr<SwapChain>(
new SwapChain(wi, surface, preferred_present_mode, exclusive_fullscreen_control));
if (!swap_chain->CreateSwapChain() || !swap_chain->SetupSwapChainImages())
return nullptr;
return swap_chain;
}
bool SwapChain::SelectSurfaceFormat()
{
u32 format_count;
VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, surface_formats.data());
pxAssert(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
{
m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
// Try to find a suitable format.
for (const VkSurfaceFormatKHR& surface_format : surface_formats)
{
// Some drivers seem to return a SRGB format here (Intel Mesa).
// This results in gamma correction when presenting to the screen, which we don't want.
// Use a linear format instead, if this is the case.
m_surface_format.format = Util::GetLinearFormat(surface_format.format);
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
pxFailRel("Failed to find a suitable format for swap chain buffers.");
return false;
}
bool SwapChain::SelectPresentMode()
{
VkResult res;
u32 mode_count;
res = vkGetPhysicalDeviceSurfacePresentModesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, present_modes.data());
pxAssert(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
auto it = std::find_if(present_modes.begin(), present_modes.end(),
[check_mode](VkPresentModeKHR mode) { return check_mode == mode; });
return it != present_modes.end();
};
// Use preferred mode if available.
if (CheckForMode(m_preferred_present_mode))
{
m_present_mode = m_preferred_present_mode;
return true;
}
// Prefer mailbox over fifo for adaptive vsync/no-vsync.
if ((m_preferred_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
m_preferred_present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) &&
CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR))
{
m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
return true;
}
// Fallback to FIFO if we're using any kind of vsync.
if (m_preferred_present_mode == VK_PRESENT_MODE_FIFO_KHR || m_preferred_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
{
// This should never fail, FIFO is mandated.
if (CheckForMode(VK_PRESENT_MODE_FIFO_KHR))
{
m_present_mode = VK_PRESENT_MODE_FIFO_KHR;
return true;
}
}
// Fall back to whatever is available.
m_present_mode = present_modes[0];
return true;
}
bool SwapChain::CreateSwapChain()
{
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
return false;
}
// Select swap chain format and present mode
if (!SelectSurfaceFormat() || !SelectPresentMode())
return false;
DevCon.WriteLn("(SwapChain) Preferred present mode: %s, selected: %s",
Util::PresentModeToString(m_preferred_present_mode), Util::PresentModeToString(m_present_mode));
// Select number of images in swap chain, we prefer one buffer in the background to work on
u32 image_count = std::max(surface_capabilities.minImageCount + 1u, 2u);
// maxImageCount can be zero, in which case there isn't an upper limit on the number of buffers.
if (surface_capabilities.maxImageCount > 0)
image_count = std::min(image_count, surface_capabilities.maxImageCount);
// Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here
// determines window size?
VkExtent2D size = surface_capabilities.currentExtent;
if (size.width == UINT32_MAX)
{
size.width = m_window_info.surface_width;
size.height = m_window_info.surface_height;
}
size.width = std::clamp(
size.width, surface_capabilities.minImageExtent.width, surface_capabilities.maxImageExtent.width);
size.height = std::clamp(
size.height, surface_capabilities.minImageExtent.height, surface_capabilities.maxImageExtent.height);
// Prefer identity transform if possible
VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR))
transform = surface_capabilities.currentTransform;
VkCompositeAlphaFlagBitsKHR alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
if (!(surface_capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR))
{
// If we only support pre-multiplied/post-multiplied... :/
if (surface_capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
alpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
}
// Select swap chain flags, we only need a colour attachment
VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if ((surface_capabilities.supportedUsageFlags & image_usage) != image_usage)
{
Console.Error("Vulkan: Swap chain does not support usage as color attachment");
return false;
}
// Store the old/current swap chain when recreating for resize
// Old swap chain is destroyed regardless of whether the create call succeeds
VkSwapchainKHR old_swap_chain = m_swap_chain;
m_swap_chain = VK_NULL_HANDLE;
// Now we can actually create the swap chain
VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, nullptr, 0, m_surface,
image_count, m_surface_format.format, m_surface_format.colorSpace, size, 1u, image_usage,
VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, transform, alpha, m_present_mode,
VK_TRUE, old_swap_chain};
std::array<uint32_t, 2> indices = {{
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
g_vulkan_context->GetPresentQueueFamilyIndex(),
}};
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex())
{
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swap_chain_info.queueFamilyIndexCount = 2;
swap_chain_info.pQueueFamilyIndices = indices.data();
}
#ifdef _WIN32
VkSurfaceFullScreenExclusiveInfoEXT exclusive_info = {VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT};
if (g_vulkan_context->GetOptionalExtensions().vk_ext_full_screen_exclusive)
{
exclusive_info.fullScreenExclusive = m_exclusive_fullscreen_control.has_value() ?
(m_exclusive_fullscreen_control.value() ?
VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT :
VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT) :
VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT;
Util::AddPointerToChain(&swap_chain_info, &exclusive_info);
}
else if (m_exclusive_fullscreen_control.has_value())
{
Console.Error("Exclusive fullscreen control requested, but VK_EXT_full_screen_exclusive is not supported.");
}
#else
if (m_exclusive_fullscreen_control.has_value())
Console.Error("Exclusive fullscreen control requested, but is not supported on this platform.");
#endif
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
return false;
}
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
m_window_info.surface_width = std::max(1u, size.width);
m_window_info.surface_height = std::max(1u, size.height);
return true;
}
bool SwapChain::SetupSwapChainImages()
{
pxAssert(m_images.empty());
u32 image_count;
VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
return false;
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data());
pxAssert(res == VK_SUCCESS);
m_load_render_pass =
g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, VK_ATTACHMENT_LOAD_OP_LOAD);
m_clear_render_pass =
g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, VK_ATTACHMENT_LOAD_OP_CLEAR);
if (m_load_render_pass == VK_NULL_HANDLE || m_clear_render_pass == VK_NULL_HANDLE)
{
pxFailRel("Failed to get swap chain render passes.");
return false;
}
m_images.reserve(image_count);
m_current_image = 0;
for (u32 i = 0; i < image_count; i++)
{
SwapChainImage image;
image.image = images[i];
// Create texture object, which creates a view of the backbuffer
if (!image.texture.Adopt(image.image, VK_IMAGE_VIEW_TYPE_2D, m_window_info.surface_width,
m_window_info.surface_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT))
{
return false;
}
image.framebuffer = image.texture.CreateFramebuffer(m_load_render_pass);
if (image.framebuffer == VK_NULL_HANDLE)
return false;
m_images.emplace_back(std::move(image));
}
m_semaphores.reserve(image_count);
m_current_semaphore = (image_count - 1);
for (u32 i = 0; i < image_count; i++)
{
ImageSemaphores sema;
const VkSemaphoreCreateInfo semaphore_info = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.available_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
return false;
}
res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.rendering_finished_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
vkDestroySemaphore(g_vulkan_context->GetDevice(), sema.available_semaphore, nullptr);
return false;
}
m_semaphores.push_back(sema);
}
return true;
}
void SwapChain::DestroySwapChainImages()
{
for (auto& it : m_images)
{
// Images themselves are cleaned up by the swap chain object
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr);
}
m_images.clear();
for (auto& it : m_semaphores)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.rendering_finished_semaphore, nullptr);
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.available_semaphore, nullptr);
}
m_semaphores.clear();
m_image_acquire_result.reset();
}
void SwapChain::DestroySwapChain()
{
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
m_window_info.surface_width = 0;
m_window_info.surface_height = 0;
}
VkResult SwapChain::AcquireNextImage()
{
if (m_image_acquire_result.has_value())
return m_image_acquire_result.value();
if (!m_swap_chain)
return VK_ERROR_SURFACE_LOST_KHR;
// Use a different semaphore for each image.
m_current_semaphore = (m_current_semaphore + 1) % static_cast<u32>(m_semaphores.size());
const VkResult res = vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX,
m_semaphores[m_current_semaphore].available_semaphore, VK_NULL_HANDLE, &m_current_image);
m_image_acquire_result = res;
return res;
}
void SwapChain::ReleaseCurrentImage()
{
m_image_acquire_result.reset();
}
bool SwapChain::ResizeSwapChain(u32 new_width, u32 new_height, float new_scale)
{
DestroySwapChainImages();
if (new_width != 0 && new_height != 0)
{
m_window_info.surface_width = new_width;
m_window_info.surface_height = new_height;
}
m_window_info.surface_scale = new_scale;
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool SwapChain::RecreateSwapChain()
{
DestroySwapChainImages();
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool SwapChain::SetVSync(VkPresentModeKHR preferred_mode)
{
if (m_preferred_present_mode == preferred_mode)
return true;
// Recreate the swap chain with the new present mode.
m_preferred_present_mode = preferred_mode;
return RecreateSwapChain();
}
bool SwapChain::RecreateSurface(const WindowInfo& new_wi)
{
// Destroy the old swap chain, images, and surface.
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
// Re-create the surface with the new native handle
m_window_info = new_wi;
m_surface = CreateVulkanSurface(
g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), &m_window_info);
if (m_surface == VK_NULL_HANDLE)
return false;
// The validation layers get angry at us if we don't call this before creating the swapchain.
VkBool32 present_supported = VK_TRUE;
VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(),
g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
return false;
}
if (!present_supported)
{
pxFailRel("Recreated surface does not support presenting.");
return false;
}
// Finally re-create the swap chain
if (!CreateSwapChain())
return false;
if (!SetupSwapChainImages())
{
DestroySwapChain();
DestroySurface();
return false;
}
return true;
}
void SwapChain::DestroySurface()
{
if (m_surface == VK_NULL_HANDLE)
return;
DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, m_surface);
m_surface = VK_NULL_HANDLE;
}
} // namespace Vulkan

View File

@ -1,138 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include "common/WindowInfo.h"
#include "common/Vulkan/Texture.h"
#include "common/Vulkan/Loader.h"
#include <memory>
#include <optional>
#include <vector>
namespace Vulkan
{
class SwapChain
{
public:
~SwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi);
// Destroys a previously-created surface.
static void DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface);
// Enumerates fullscreen modes for window info.
struct FullscreenModeInfo
{
u32 width;
u32 height;
float refresh_rate;
};
static std::vector<FullscreenModeInfo> GetSurfaceFullscreenModes(
VkInstance instance, VkPhysicalDevice physical_device, const WindowInfo& wi);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<SwapChain> Create(const WindowInfo& wi, VkSurfaceKHR surface,
VkPresentModeKHR preferred_present_mode, std::optional<bool> exclusive_fullscreen_control);
__fi VkSurfaceKHR GetSurface() const { return m_surface; }
__fi VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
__fi VkFormat GetTextureFormat() const { return m_surface_format.format; }
__fi VkPresentModeKHR GetPreferredPresentMode() const { return m_preferred_present_mode; }
__fi VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
__fi const VkSwapchainKHR* GetSwapChainPtr() const { return &m_swap_chain; }
__fi const WindowInfo& GetWindowInfo() const { return m_window_info; }
__fi u32 GetWidth() const { return m_window_info.surface_width; }
__fi u32 GetHeight() const { return m_window_info.surface_height; }
__fi float GetScale() const { return m_window_info.surface_scale; }
__fi u32 GetCurrentImageIndex() const { return m_current_image; }
__fi const u32* GetCurrentImageIndexPtr() const { return &m_current_image; }
__fi u32 GetImageCount() const { return static_cast<u32>(m_images.size()); }
__fi VkImage GetCurrentImage() const { return m_images[m_current_image].image; }
__fi const Texture& GetCurrentTexture() const { return m_images[m_current_image].texture; }
__fi Texture& GetCurrentTexture() { return m_images[m_current_image].texture; }
__fi VkFramebuffer GetCurrentFramebuffer() const { return m_images[m_current_image].framebuffer; }
__fi VkRenderPass GetLoadRenderPass() const { return m_load_render_pass; }
__fi VkRenderPass GetClearRenderPass() const { return m_clear_render_pass; }
__fi VkSemaphore GetImageAvailableSemaphore() const { return m_semaphores[m_current_semaphore].available_semaphore; }
__fi const VkSemaphore* GetImageAvailableSemaphorePtr() const { return &m_semaphores[m_current_semaphore].available_semaphore; }
__fi VkSemaphore GetRenderingFinishedSemaphore() const { return m_semaphores[m_current_semaphore].rendering_finished_semaphore; }
__fi const VkSemaphore* GetRenderingFinishedSemaphorePtr() const { return &m_semaphores[m_current_semaphore].rendering_finished_semaphore; }
VkResult AcquireNextImage();
void ReleaseCurrentImage();
bool RecreateSurface(const WindowInfo& new_wi);
bool ResizeSwapChain(u32 new_width = 0, u32 new_height = 0, float new_scale = 1.0f);
bool RecreateSwapChain();
// Change vsync enabled state. This may fail as it causes a swapchain recreation.
bool SetVSync(VkPresentModeKHR preferred_mode);
// Returns true if the current present mode is synchronizing (adaptive or hard).
bool IsPresentModeSynchronizing() const
{
return (m_present_mode == VK_PRESENT_MODE_FIFO_KHR || m_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR);
}
private:
SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, VkPresentModeKHR preferred_present_mode,
std::optional<bool> exclusive_fullscreen_control);
bool SelectSurfaceFormat();
bool SelectPresentMode();
bool CreateSwapChain();
void DestroySwapChain();
bool SetupSwapChainImages();
void DestroySwapChainImages();
void DestroySurface();
struct SwapChainImage
{
VkImage image;
Texture texture;
VkFramebuffer framebuffer;
};
struct ImageSemaphores
{
VkSemaphore available_semaphore;
VkSemaphore rendering_finished_semaphore;
};
WindowInfo m_window_info;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_preferred_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkRenderPass m_load_render_pass = VK_NULL_HANDLE;
VkRenderPass m_clear_render_pass = VK_NULL_HANDLE;
VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE;
std::vector<SwapChainImage> m_images;
std::vector<ImageSemaphores> m_semaphores;
u32 m_current_image = 0;
u32 m_current_semaphore = 0;
std::optional<VkResult> m_image_acquire_result;
std::optional<bool> m_exclusive_fullscreen_control;
};
} // namespace Vulkan

View File

@ -1,395 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/Texture.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Util.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include <algorithm>
static constexpr VkComponentMapping s_identity_swizzle{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
namespace Vulkan
{
Texture::Texture() = default;
Texture::Texture(Texture&& move)
: m_width(move.m_width)
, m_height(move.m_height)
, m_levels(move.m_levels)
, m_layers(move.m_layers)
, m_format(move.m_format)
, m_samples(move.m_samples)
, m_view_type(move.m_view_type)
, m_layout(move.m_layout)
, m_image(move.m_image)
, m_allocation(move.m_allocation)
, m_view(move.m_view)
{
move.m_width = 0;
move.m_height = 0;
move.m_levels = 0;
move.m_layers = 0;
move.m_format = VK_FORMAT_UNDEFINED;
move.m_samples = VK_SAMPLE_COUNT_1_BIT;
move.m_view_type = VK_IMAGE_VIEW_TYPE_2D;
move.m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
move.m_image = VK_NULL_HANDLE;
move.m_allocation = VK_NULL_HANDLE;
move.m_view = VK_NULL_HANDLE;
}
Texture::~Texture()
{
if (IsValid())
Destroy(true);
}
Vulkan::Texture& Texture::operator=(Texture&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_width, move.m_width);
std::swap(m_height, move.m_height);
std::swap(m_levels, move.m_levels);
std::swap(m_layers, move.m_layers);
std::swap(m_format, move.m_format);
std::swap(m_samples, move.m_samples);
std::swap(m_view_type, move.m_view_type);
std::swap(m_layout, move.m_layout);
std::swap(m_image, move.m_image);
std::swap(m_allocation, move.m_allocation);
std::swap(m_view, move.m_view);
return *this;
}
bool Texture::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage,
const VkComponentMapping* swizzle /* = nullptr*/)
{
const VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, 0, VK_IMAGE_TYPE_2D, format,
{width, height, 1}, levels, layers, samples, tiling, usage, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VmaAllocationCreateInfo aci = {};
aci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
aci.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT;
aci.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
VkImage image = VK_NULL_HANDLE;
VmaAllocation allocation = VK_NULL_HANDLE;
VkResult res =
vmaCreateImage(g_vulkan_context->GetAllocator(), &image_info, &aci, &image, &allocation, nullptr);
if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY)
{
DevCon.WriteLn("Failed to allocate device memory for %ux%u texture", width, height);
return false;
}
else if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vmaCreateImage failed: ");
return false;
}
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, view_type,
format, swizzle ? *swizzle : s_identity_swizzle,
{Util::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vmaDestroyImage(g_vulkan_context->GetAllocator(), image, allocation);
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = image;
m_allocation = allocation;
m_view = view;
return true;
}
bool Texture::Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels,
u32 layers, VkFormat format, VkSampleCountFlagBits samples, const VkComponentMapping* swizzle /* = nullptr*/)
{
// Only need to create the image view, this is mainly for swap chains.
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, existing_image,
view_type, format, swizzle ? *swizzle : s_identity_swizzle,
{Util::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
// Memory is managed by the owner of the image.
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = existing_image;
m_view = view;
return true;
}
void Texture::Destroy(bool defer /* = true */)
{
if (m_view != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageViewDestruction(m_view);
else
vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr);
m_view = VK_NULL_HANDLE;
}
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_allocation != VK_NULL_HANDLE)
{
pxAssert(m_image != VK_NULL_HANDLE);
if (defer)
g_vulkan_context->DeferImageDestruction(m_image, m_allocation);
else
vmaDestroyImage(g_vulkan_context->GetAllocator(), m_image, m_allocation);
m_image = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
}
m_width = 0;
m_height = 0;
m_levels = 0;
m_layers = 0;
m_format = VK_FORMAT_UNDEFINED;
m_samples = VK_SAMPLE_COUNT_1_BIT;
m_view_type = VK_IMAGE_VIEW_TYPE_2D;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
void Texture::OverrideImageLayout(VkImageLayout new_layout) { m_layout = new_layout; }
void Texture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout)
{
if (m_layout == new_layout)
return;
TransitionSubresourcesToLayout(command_buffer, 0, m_levels, 0, m_layers, m_layout, new_layout);
m_layout = new_layout;
}
void Texture::TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels,
u32 start_layer, u32 num_layers, VkImageLayout old_layout, VkImageLayout new_layout)
{
VkImageAspectFlags aspect;
if (Util::IsDepthStencilFormat(m_format))
aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
else if (Util::IsDepthFormat(m_format))
aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
else
aspect = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
0, // VkAccessFlags dstAccessMask
old_layout, // VkImageLayout oldLayout
new_layout, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{aspect, start_level, num_levels, start_layer, num_layers} // VkImageSubresourceRange subresourceRange
};
// srcStageMask -> Stages that must complete before the barrier
// dstStageMask -> Stages that must wait for after the barrier before beginning
VkPipelineStageFlags srcStageMask, dstStageMask;
switch (old_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Layout undefined therefore contents undefined, and we don't care what happens to it.
barrier.srcAccessMask = 0;
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image has been pre-initialized by the host, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image was being used as a color attachment, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image was being used as a shader resource, make sure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image was being used as a copy source, ensure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image was being used as a copy destination, ensure all writes have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_GENERAL:
// General is used for feedback loops.
barrier.srcAccessMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT) :
(VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
srcStageMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) :
(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
break;
default:
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
}
switch (new_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
barrier.dstAccessMask = 0;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_GENERAL:
// General is used for feedback loops.
barrier.dstAccessMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT) :
(VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
dstStageMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) :
(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
break;
default:
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
}
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
VkFramebuffer Texture::CreateFramebuffer(VkRenderPass render_pass)
{
const VkFramebufferCreateInfo ci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0u, render_pass, 1,
&m_view, m_width, m_height, m_layers};
VkFramebuffer fb = VK_NULL_HANDLE;
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
return fb;
}
void Texture::UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width, u32 height,
u32 buffer_height, u32 row_length, VkBuffer buffer, u32 buffer_offset)
{
const VkImageLayout old_layout = m_layout;
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
TransitionSubresourcesToLayout(
cmdbuf, level, 1, layer, 1, old_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkBufferImageCopy bic = {static_cast<VkDeviceSize>(buffer_offset), row_length, buffer_height,
{VK_IMAGE_ASPECT_COLOR_BIT, level, layer, 1u}, {static_cast<int32_t>(x), static_cast<int32_t>(y), 0},
{width, height, 1u}};
vkCmdCopyBufferToImage(cmdbuf, buffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bic);
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
TransitionSubresourcesToLayout(
cmdbuf, level, 1, layer, 1, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, old_layout);
}
} // namespace Vulkan

View File

@ -1,92 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include "common/Vulkan/Loader.h"
#include <algorithm>
#include <memory>
namespace Vulkan
{
class Texture
{
public:
Texture();
Texture(Texture&& move);
Texture(const Texture&) = delete;
~Texture();
Texture& operator=(Texture&& move);
Texture& operator=(const Texture&) = delete;
__fi bool IsValid() const { return (m_image != VK_NULL_HANDLE); }
/// An image is considered owned/managed if we control the memory.
__fi bool IsOwned() const { return (m_allocation != VK_NULL_HANDLE); }
__fi u32 GetWidth() const { return m_width; }
__fi u32 GetHeight() const { return m_height; }
__fi u32 GetLevels() const { return m_levels; }
__fi u32 GetLayers() const { return m_layers; }
__fi u32 GetMipWidth(u32 level) const { return std::max<u32>(m_width >> level, 1u); }
__fi u32 GetMipHeight(u32 level) const { return std::max<u32>(m_height >> level, 1u); }
__fi VkFormat GetFormat() const { return m_format; }
__fi VkSampleCountFlagBits GetSamples() const { return m_samples; }
__fi VkImageLayout GetLayout() const { return m_layout; }
__fi VkImageViewType GetViewType() const { return m_view_type; }
__fi VkImage GetImage() const { return m_image; }
__fi VmaAllocation GetAllocation() const { return m_allocation; }
__fi VkImageView GetView() const { return m_view; }
bool Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage,
const VkComponentMapping* swizzle = nullptr);
bool Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples, const VkComponentMapping* swizzle = nullptr);
void Destroy(bool defer = true);
// Used when the render pass is changing the image layout, or to force it to
// VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is
// irrelevant and will not be loaded.
void OverrideImageLayout(VkImageLayout new_layout);
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout);
void TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels,
u32 start_layer, u32 num_layers, VkImageLayout old_layout, VkImageLayout new_layout);
VkFramebuffer CreateFramebuffer(VkRenderPass render_pass);
void UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width, u32 height,
u32 buffer_height, u32 row_length, VkBuffer buffer, u32 buffer_offset);
private:
u32 m_width = 0;
u32 m_height = 0;
u32 m_levels = 0;
u32 m_layers = 0;
VkFormat m_format = VK_FORMAT_UNDEFINED;
VkSampleCountFlagBits m_samples = VK_SAMPLE_COUNT_1_BIT;
VkImageViewType m_view_type = VK_IMAGE_VIEW_TYPE_2D;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage m_image = VK_NULL_HANDLE;
VmaAllocation m_allocation = VK_NULL_HANDLE;
VkImageView m_view = VK_NULL_HANDLE;
};
} // namespace Vulkan

View File

@ -1,393 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/Vulkan/Util.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/ShaderCompiler.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/StringUtil.h"
#include <cmath>
namespace Vulkan
{
namespace Util
{
bool IsDepthFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
bool IsDepthStencilFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
VkFormat GetLinearFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_SRGB:
return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB:
return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB:
return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB:
return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8_SRGB:
return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB:
return VK_FORMAT_B8G8R8A8_UNORM;
default:
return format;
}
}
u32 GetTexelSize(VkFormat format)
{
// Only contains pixel formats we use.
switch (format)
{
case VK_FORMAT_R8_UNORM:
return 1;
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_R16_UINT:
return 2;
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return 8;
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 16;
default:
pxFailRel("Unhandled pixel format");
return 1;
}
}
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor)
{
switch (factor)
{
case VK_BLEND_FACTOR_SRC_COLOR:
return VK_BLEND_FACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_COLOR:
return VK_BLEND_FACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
default:
return factor;
}
}
void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
const VkViewport vp{static_cast<float>(x), static_cast<float>(y), static_cast<float>(width),
static_cast<float>(height), min_depth, max_depth};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
}
void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height)
{
const VkRect2D scissor{{x, y}, {static_cast<u32>(width), static_cast<u32>(height)}};
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /* = 0.0f */, float max_depth /* = 1.0f */)
{
}
void SafeDestroyFramebuffer(VkFramebuffer& fb)
{
if (fb != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
fb = VK_NULL_HANDLE;
}
}
void SafeDestroyShaderModule(VkShaderModule& sm)
{
if (sm != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), sm, nullptr);
sm = VK_NULL_HANDLE;
}
}
void SafeDestroyPipeline(VkPipeline& p)
{
if (p != VK_NULL_HANDLE)
{
vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr);
p = VK_NULL_HANDLE;
}
}
void SafeDestroyPipelineLayout(VkPipelineLayout& pl)
{
if (pl != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr);
pl = VK_NULL_HANDLE;
}
}
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl)
{
if (dsl != VK_NULL_HANDLE)
{
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr);
dsl = VK_NULL_HANDLE;
}
}
void SafeDestroyBufferView(VkBufferView& bv)
{
if (bv != VK_NULL_HANDLE)
{
vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr);
bv = VK_NULL_HANDLE;
}
}
void SafeDestroyImageView(VkImageView& iv)
{
if (iv != VK_NULL_HANDLE)
{
vkDestroyImageView(g_vulkan_context->GetDevice(), iv, nullptr);
iv = VK_NULL_HANDLE;
}
}
void SafeDestroySampler(VkSampler& samp)
{
if (samp != VK_NULL_HANDLE)
{
vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr);
samp = VK_NULL_HANDLE;
}
}
void SafeDestroySemaphore(VkSemaphore& sem)
{
if (sem != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), sem, nullptr);
sem = VK_NULL_HANDLE;
}
}
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds)
{
if (ds != VK_NULL_HANDLE)
{
g_vulkan_context->FreeGlobalDescriptorSet(ds);
ds = VK_NULL_HANDLE;
}
}
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(
command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
}
void AddPointerToChain(void* head, const void* ptr)
{
VkBaseInStructure* last_st = static_cast<VkBaseInStructure*>(head);
while (last_st->pNext)
{
if (last_st->pNext == ptr)
return;
last_st = const_cast<VkBaseInStructure*>(last_st->pNext);
}
last_st->pNext = static_cast<const VkBaseInStructure*>(ptr);
}
const char* VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
const char* PresentModeToString(VkPresentModeKHR mode)
{
switch (mode)
{
case VK_PRESENT_MODE_IMMEDIATE_KHR:
return "VK_PRESENT_MODE_IMMEDIATE_KHR";
case VK_PRESENT_MODE_MAILBOX_KHR:
return "VK_PRESENT_MODE_MAILBOX_KHR";
case VK_PRESENT_MODE_FIFO_KHR:
return "VK_PRESENT_MODE_FIFO_KHR";
case VK_PRESENT_MODE_FIFO_RELAXED_KHR:
return "VK_PRESENT_MODE_FIFO_RELAXED_KHR";
case VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR:
return "VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR";
case VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR:
return "VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR";
default:
return "UNKNOWN_VK_PRESENT_MODE";
}
}
void LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Console.Error(
"(%s) %s (%d: %s)", func_name, real_msg.c_str(), static_cast<int>(res), VkResultToString(res));
}
} // namespace Util
} // namespace Vulkan

View File

@ -34,7 +34,6 @@
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\glad\include;$(SolutionDir)3rdparty\glslang\glslang;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)3rdparty\rapidyaml\rapidyaml\ext\c4core\src\c4\ext\fast_float\include</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)3rdparty\libpng</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)3rdparty\jpgd</AdditionalIncludeDirectories>
@ -77,16 +76,6 @@
<ClCompile Include="StringUtil.cpp" />
<ClCompile Include="SettingsWrapper.cpp" />
<ClCompile Include="Timer.cpp" />
<ClCompile Include="Vulkan\vk_mem_alloc.cpp" />
<ClCompile Include="Vulkan\Builders.cpp" />
<ClCompile Include="Vulkan\Context.cpp" />
<ClCompile Include="Vulkan\Loader.cpp" />
<ClCompile Include="Vulkan\ShaderCache.cpp" />
<ClCompile Include="Vulkan\ShaderCompiler.cpp" />
<ClCompile Include="Vulkan\StreamBuffer.cpp" />
<ClCompile Include="Vulkan\SwapChain.cpp" />
<ClCompile Include="Vulkan\Texture.cpp" />
<ClCompile Include="Vulkan\Util.cpp" />
<ClCompile Include="WAVWriter.cpp" />
<ClCompile Include="WindowInfo.cpp" />
<ClCompile Include="Perf.cpp" />
@ -117,7 +106,6 @@
</ItemGroup>
<ItemGroup>
<MASM Include="FastJmp.asm" />
<None Include="Vulkan\EntryPoints.inl" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="Align.h" />
@ -160,16 +148,6 @@
<ClInclude Include="SafeArray.h" />
<ClInclude Include="ThreadPool.h" />
<ClInclude Include="Timer.h" />
<ClInclude Include="Vulkan\Builders.h" />
<ClInclude Include="Vulkan\Context.h" />
<ClInclude Include="Vulkan\EntryPoints.h" />
<ClInclude Include="Vulkan\Loader.h" />
<ClInclude Include="Vulkan\ShaderCache.h" />
<ClInclude Include="Vulkan\ShaderCompiler.h" />
<ClInclude Include="Vulkan\StreamBuffer.h" />
<ClInclude Include="Vulkan\SwapChain.h" />
<ClInclude Include="Vulkan\Texture.h" />
<ClInclude Include="Vulkan\Util.h" />
<ClInclude Include="WAVWriter.h" />
<ClInclude Include="WindowInfo.h" />
<ClInclude Include="Threading.h" />
@ -205,9 +183,6 @@
<ProjectReference Include="..\3rdparty\glad\glad.vcxproj">
<Project>{c0293b32-5acf-40f0-aa6c-e6da6f3bf33a}</Project>
</ProjectReference>
<ProjectReference Include="..\3rdparty\glslang\glslang.vcxproj">
<Project>{ef6834a9-11f3-4331-bc34-21b325abb180}</Project>
</ProjectReference>
<ProjectReference Include="..\3rdparty\jpgd\jpgd.vcxproj">
<Project>{ed2f21fd-0a36-4a8f-9b90-e7d92a2acb63}</Project>
</ProjectReference>

View File

@ -103,39 +103,9 @@
<ClCompile Include="ProgressCallback.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Vulkan\Builders.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\Context.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\Loader.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\ShaderCache.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\ShaderCompiler.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\StreamBuffer.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\SwapChain.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\Texture.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="Vulkan\Util.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="MD5Digest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Vulkan\vk_mem_alloc.cpp">
<Filter>Source Files\Vulkan</Filter>
</ClCompile>
<ClCompile Include="StackWalker.cpp">
<Filter>Source Files</Filter>
</ClCompile>
@ -330,36 +300,6 @@
<ClInclude Include="HashCombine.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Vulkan\ShaderCompiler.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\StreamBuffer.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\SwapChain.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\Texture.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\Util.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\Builders.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\Context.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\EntryPoints.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\Loader.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Vulkan\ShaderCache.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="MD5Digest.h">
<Filter>Header Files</Filter>
</ClInclude>
@ -419,17 +359,6 @@
<Filter Include="Header Files">
<UniqueIdentifier>{eef579af-e6a8-4d3b-a88e-c0e4cad9e5d8}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\Vulkan">
<UniqueIdentifier>{94154238-8b02-44f8-a7b8-3612e7bfa33c}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\Vulkan">
<UniqueIdentifier>{46f36c68-0e0e-4acd-a621-3365e3167c4f}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<None Include="Vulkan\EntryPoints.inl">
<Filter>Source Files\Vulkan</Filter>
</None>
</ItemGroup>
<ItemGroup>
<MASM Include="FastJmp.asm">

View File

@ -640,11 +640,44 @@ if(USE_VULKAN)
list(APPEND pcsx2GSSources
GS/Renderers/Vulkan/GSDeviceVK.cpp
GS/Renderers/Vulkan/GSTextureVK.cpp
GS/Renderers/Vulkan/vk_mem_alloc.cpp
GS/Renderers/Vulkan/VKBuilders.cpp
GS/Renderers/Vulkan/VKContext.cpp
GS/Renderers/Vulkan/VKLoader.cpp
GS/Renderers/Vulkan/VKShaderCache.cpp
GS/Renderers/Vulkan/VKStreamBuffer.cpp
GS/Renderers/Vulkan/VKSwapChain.cpp
GS/Renderers/Vulkan/VKTexture.cpp
GS/Renderers/Vulkan/VKUtil.cpp
)
list(APPEND pcsx2GSHeaders
GS/Renderers/Vulkan/GSDeviceVK.h
GS/Renderers/Vulkan/GSTextureVK.h
GS/Renderers/Vulkan/VKBuilders.h
GS/Renderers/Vulkan/VKContext.h
GS/Renderers/Vulkan/VKEntryPoints.h
GS/Renderers/Vulkan/VKEntryPoints.inl
GS/Renderers/Vulkan/VKLoader.h
GS/Renderers/Vulkan/VKShaderCache.h
GS/Renderers/Vulkan/VKStreamBuffer.h
GS/Renderers/Vulkan/VKSwapChain.h
GS/Renderers/Vulkan/VKTexture.h
GS/Renderers/Vulkan/VKUtil.h
)
target_link_libraries(PCSX2_FLAGS INTERFACE Vulkan-Headers glslang)
if(APPLE)
# Needed for Metal surface creation.
target_compile_options(common PRIVATE -fobjc-arc)
target_link_options(common PRIVATE -fobjc-link-runtime)
elseif(NOT WIN32)
if(X11_API)
target_compile_definitions(common PUBLIC "VULKAN_USE_X11=1")
endif()
if(WAYLAND_API)
target_compile_definitions(common PUBLIC "VULKAN_USE_WAYLAND=1")
endif()
endif()
endif()
set(pcsx2GSMetalShaders

View File

@ -15,19 +15,18 @@
#include "PrecompiledHeader.h"
#include "GS/GS.h"
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/Renderers/Vulkan/VKShaderCache.h"
#include "GS/Renderers/Vulkan/VKSwapChain.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/GS.h"
#include "GS/GSGL.h"
#include "GS/GSPerfMon.h"
#include "GS/GSUtil.h"
#include "Host.h"
#include "ShaderCacheVersion.h"
#include "common/Vulkan/Builders.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/ShaderCache.h"
#include "common/Vulkan/SwapChain.h"
#include "common/Vulkan/Util.h"
#include "Host.h"
#include "common/Align.h"
#include "common/Path.h"
#include "common/ScopedGuard.h"
@ -88,7 +87,7 @@ GSDeviceVK::~GSDeviceVK()
static void GPUListToAdapterNames(std::vector<std::string>* dest, VkInstance instance)
{
Vulkan::Context::GPUList gpus = Vulkan::Context::EnumerateGPUs(instance);
VKContext::GPUList gpus = VKContext::EnumerateGPUs(instance);
dest->clear();
dest->reserve(gpus.size());
for (auto& [gpu, name] : gpus)
@ -98,25 +97,17 @@ static void GPUListToAdapterNames(std::vector<std::string>* dest, VkInstance ins
void GSDeviceVK::GetAdaptersAndFullscreenModes(
std::vector<std::string>* adapters, std::vector<std::string>* fullscreen_modes)
{
std::vector<Vulkan::SwapChain::FullscreenModeInfo> fsmodes;
if (g_vulkan_context)
{
if (adapters)
GPUListToAdapterNames(adapters, g_vulkan_context->GetVulkanInstance());
if (fullscreen_modes)
{
fsmodes = Vulkan::SwapChain::GetSurfaceFullscreenModes(
g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), WindowInfo());
}
}
else
{
if (Vulkan::LoadVulkanLibrary())
{
ScopedGuard lib_guard([]() { Vulkan::UnloadVulkanLibrary(); });
const VkInstance instance = Vulkan::Context::CreateVulkanInstance(WindowInfo(), false, false);
const VkInstance instance = VKContext::CreateVulkanInstance(WindowInfo(), false, false);
if (instance != VK_NULL_HANDLE)
{
if (Vulkan::LoadVulkanInstanceFunctions(instance))
@ -126,16 +117,6 @@ void GSDeviceVK::GetAdaptersAndFullscreenModes(
}
}
}
if (!fsmodes.empty())
{
fullscreen_modes->clear();
fullscreen_modes->reserve(fsmodes.size());
for (const Vulkan::SwapChain::FullscreenModeInfo& fmi : fsmodes)
{
fullscreen_modes->push_back(GetFullscreenModeString(fmi.width, fmi.height, fmi.refresh_rate));
}
}
}
bool GSDeviceVK::IsSuitableDefaultRenderer()
@ -190,9 +171,6 @@ bool GSDeviceVK::Create()
if (!CreateDeviceAndSwapChain())
return false;
Vulkan::ShaderCache::Create(GSConfig.DisableShaderCache ? std::string_view() : std::string_view(EmuFolders::Cache),
SHADER_CACHE_VERSION, GSConfig.UseDebugDevice);
if (!CheckFeatures())
{
Console.Error("Your GPU does not support the required Vulkan features.");
@ -267,8 +245,7 @@ void GSDeviceVK::Destroy()
g_vulkan_context->WaitForGPUIdle();
m_swap_chain.reset();
Vulkan::ShaderCache::Destroy();
Vulkan::Context::Destroy();
VKContext::Destroy();
}
}
@ -298,7 +275,7 @@ bool GSDeviceVK::UpdateWindow()
m_swap_chain.reset();
}
VkSurfaceKHR surface = Vulkan::SwapChain::CreateVulkanSurface(
VkSurfaceKHR surface = VKSwapChain::CreateVulkanSurface(
g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), &m_window_info);
if (surface == VK_NULL_HANDLE)
{
@ -306,12 +283,12 @@ bool GSDeviceVK::UpdateWindow()
return false;
}
m_swap_chain = Vulkan::SwapChain::Create(m_window_info, surface, GetPreferredPresentModeForVsyncMode(m_vsync_mode),
m_swap_chain = VKSwapChain::Create(m_window_info, surface, GetPreferredPresentModeForVsyncMode(m_vsync_mode),
Pcsx2Config::GSOptions::TriStateToOptionalBoolean(GSConfig.ExclusiveFullscreenControl));
if (!m_swap_chain)
{
Console.Error("Failed to create swap chain");
Vulkan::SwapChain::DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, surface);
VKSwapChain::DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, surface);
return false;
}
@ -422,7 +399,7 @@ GSDevice::PresentResult GSDeviceVK::BeginPresent(bool frame_skip)
if (!m_swap_chain->RecreateSurface(m_window_info))
{
Console.Error("Failed to recreate surface after loss");
g_vulkan_context->ExecuteCommandBuffer(Vulkan::Context::WaitType::None);
g_vulkan_context->ExecuteCommandBuffer(VKContext::WaitType::None);
return PresentResult::FrameSkipped;
}
@ -435,7 +412,7 @@ GSDevice::PresentResult GSDeviceVK::BeginPresent(bool frame_skip)
{
// Still submit the command buffer, otherwise we'll end up with several frames waiting.
LOG_VULKAN_ERROR(res, "vkAcquireNextImageKHR() failed: ");
g_vulkan_context->ExecuteCommandBuffer(Vulkan::Context::WaitType::None);
g_vulkan_context->ExecuteCommandBuffer(VKContext::WaitType::None);
return PresentResult::FrameSkipped;
}
}
@ -443,7 +420,7 @@ GSDevice::PresentResult GSDeviceVK::BeginPresent(bool frame_skip)
VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer();
// Swap chain images start in undefined
Vulkan::Texture& swap_chain_texture = m_swap_chain->GetCurrentTexture();
VKTexture& swap_chain_texture = m_swap_chain->GetCurrentTexture();
swap_chain_texture.OverrideImageLayout(VK_IMAGE_LAYOUT_UNDEFINED);
swap_chain_texture.TransitionToLayout(cmdbuffer, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
@ -579,8 +556,7 @@ bool GSDeviceVK::CreateDeviceAndSwapChain()
if (!AcquireWindow(true))
return false;
VkInstance instance =
Vulkan::Context::CreateVulkanInstance(m_window_info, enable_debug_utils, enable_validation_layer);
VkInstance instance = VKContext::CreateVulkanInstance(m_window_info, enable_debug_utils, enable_validation_layer);
if (instance == VK_NULL_HANDLE)
{
if (enable_debug_utils || enable_validation_layer)
@ -588,8 +564,7 @@ bool GSDeviceVK::CreateDeviceAndSwapChain()
// Try again without the validation layer.
enable_debug_utils = false;
enable_validation_layer = false;
instance =
Vulkan::Context::CreateVulkanInstance(m_window_info, enable_debug_utils, enable_validation_layer);
instance = VKContext::CreateVulkanInstance(m_window_info, enable_debug_utils, enable_validation_layer);
if (instance == VK_NULL_HANDLE)
{
Host::ReportErrorAsync(
@ -608,7 +583,7 @@ bool GSDeviceVK::CreateDeviceAndSwapChain()
return false;
}
Vulkan::Context::GPUList gpus = Vulkan::Context::EnumerateGPUs(instance);
VKContext::GPUList gpus = VKContext::EnumerateGPUs(instance);
if (gpus.empty())
{
Host::ReportErrorAsync("Error", "No physical devices found. Does your GPU and/or driver support Vulkan?");
@ -644,12 +619,12 @@ bool GSDeviceVK::CreateDeviceAndSwapChain()
};
if (m_window_info.type != WindowInfo::Type::Surfaceless)
{
surface = Vulkan::SwapChain::CreateVulkanSurface(instance, gpus[gpu_index].first, &m_window_info);
surface = VKSwapChain::CreateVulkanSurface(instance, gpus[gpu_index].first, &m_window_info);
if (surface == VK_NULL_HANDLE)
return false;
}
if (!Vulkan::Context::Create(instance, surface, gpus[gpu_index].first, !GSConfig.DisableThreadedPresentation,
if (!VKContext::Create(instance, surface, gpus[gpu_index].first, !GSConfig.DisableThreadedPresentation,
enable_debug_utils, enable_validation_layer))
{
Console.Error("Failed to create Vulkan context");
@ -659,9 +634,8 @@ bool GSDeviceVK::CreateDeviceAndSwapChain()
// NOTE: This is assigned afterwards, because some platforms can modify the window info (e.g. Metal).
if (surface != VK_NULL_HANDLE)
{
m_swap_chain =
Vulkan::SwapChain::Create(m_window_info, surface, GetPreferredPresentModeForVsyncMode(m_vsync_mode),
Pcsx2Config::GSOptions::TriStateToOptionalBoolean(GSConfig.ExclusiveFullscreenControl));
m_swap_chain = VKSwapChain::Create(m_window_info, surface, GetPreferredPresentModeForVsyncMode(m_vsync_mode),
Pcsx2Config::GSOptions::TriStateToOptionalBoolean(GSConfig.ExclusiveFullscreenControl));
if (!m_swap_chain)
{
Console.Error("Failed to create swap chain");
@ -1692,8 +1666,8 @@ bool GSDeviceVK::CreateNullTexture()
m_null_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearColorImage(cmdbuf, m_null_texture.GetImage(), m_null_texture.GetLayout(), &ccv, 1, &srr);
m_null_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_GENERAL);
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_null_texture.GetImage(), "Null texture");
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_null_texture.GetView(), "Null texture view");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_null_texture.GetImage(), "Null texture");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_null_texture.GetView(), "Null texture view");
return true;
}
@ -1752,13 +1726,13 @@ bool GSDeviceVK::CreatePipelineLayouts()
dslb.AddBinding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, NUM_CONVERT_SAMPLERS, VK_SHADER_STAGE_FRAGMENT_BIT);
if ((m_utility_ds_layout = dslb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_utility_ds_layout, "Convert descriptor layout");
Vulkan::SetObjectName(dev, m_utility_ds_layout, "Convert descriptor layout");
plb.AddPushConstants(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, CONVERT_PUSH_CONSTANTS_SIZE);
plb.AddDescriptorSet(m_utility_ds_layout);
if ((m_utility_pipeline_layout = plb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_utility_ds_layout, "Convert pipeline layout");
Vulkan::SetObjectName(dev, m_utility_ds_layout, "Convert pipeline layout");
//////////////////////////////////////////////////////////////////////////
// Draw/TFX Pipeline Layout
@ -1770,24 +1744,24 @@ bool GSDeviceVK::CreatePipelineLayouts()
dslb.AddBinding(2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_VERTEX_BIT);
if ((m_tfx_ubo_ds_layout = dslb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_tfx_ubo_ds_layout, "TFX UBO descriptor layout");
Vulkan::SetObjectName(dev, m_tfx_ubo_ds_layout, "TFX UBO descriptor layout");
dslb.AddBinding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT);
dslb.AddBinding(1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT);
if ((m_tfx_sampler_ds_layout = dslb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_tfx_sampler_ds_layout, "TFX sampler descriptor layout");
Vulkan::SetObjectName(dev, m_tfx_sampler_ds_layout, "TFX sampler descriptor layout");
dslb.AddBinding(0, m_features.texture_barrier ? VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT);
dslb.AddBinding(1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT);
if ((m_tfx_rt_texture_ds_layout = dslb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_tfx_rt_texture_ds_layout, "TFX RT texture descriptor layout");
Vulkan::SetObjectName(dev, m_tfx_rt_texture_ds_layout, "TFX RT texture descriptor layout");
plb.AddDescriptorSet(m_tfx_ubo_ds_layout);
plb.AddDescriptorSet(m_tfx_sampler_ds_layout);
plb.AddDescriptorSet(m_tfx_rt_texture_ds_layout);
if ((m_tfx_pipeline_layout = plb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_tfx_pipeline_layout, "TFX pipeline layout");
Vulkan::SetObjectName(dev, m_tfx_pipeline_layout, "TFX pipeline layout");
return true;
}
@ -1890,7 +1864,7 @@ bool GSDeviceVK::CompileConvertPipelines()
VkShaderModule vs = GetUtilityVertexShader(*shader);
if (vs == VK_NULL_HANDLE)
return false;
ScopedGuard vs_guard([&vs]() { Vulkan::Util::SafeDestroyShaderModule(vs); });
ScopedGuard vs_guard([&vs]() { Vulkan::SafeDestroyShaderModule(vs); });
Vulkan::GraphicsPipelineBuilder gpb;
SetPipelineProvokingVertex(m_features, gpb);
@ -1962,7 +1936,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (ps == VK_NULL_HANDLE)
return false;
ScopedGuard ps_guard([&ps]() { Vulkan::Util::SafeDestroyShaderModule(ps); });
ScopedGuard ps_guard([&ps]() { Vulkan::SafeDestroyShaderModule(ps); });
gpb.SetFragmentShader(ps);
m_convert[index] =
@ -1970,7 +1944,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (!m_convert[index])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_convert[index], "Convert pipeline %d", i);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_convert[index], "Convert pipeline %d", i);
if (i == ShaderConvert::COPY)
{
@ -1987,7 +1961,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (!m_color_copy[i])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_color_copy[i],
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_color_copy[i],
"Color copy pipeline (r=%u, g=%u, b=%u, a=%u)", i & 1u, (i >> 1) & 1u, (i >> 2) & 1u,
(i >> 3) & 1u);
}
@ -2010,7 +1984,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (!arr[ds][fbl])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), arr[ds][fbl],
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), arr[ds][fbl],
"HDR %s/copy pipeline (ds=%u, fbl=%u)", is_setup ? "setup" : "finish", i, ds, fbl);
}
}
@ -2038,7 +2012,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (ps == VK_NULL_HANDLE)
return false;
ScopedGuard ps_guard([&ps]() { Vulkan::Util::SafeDestroyShaderModule(ps); });
ScopedGuard ps_guard([&ps]() { Vulkan::SafeDestroyShaderModule(ps); });
gpb.SetPipelineLayout(m_utility_pipeline_layout);
gpb.SetFragmentShader(ps);
gpb.SetNoDepthTestState();
@ -2055,7 +2029,7 @@ bool GSDeviceVK::CompileConvertPipelines()
if (!m_date_image_setup_pipelines[ds][datm])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_date_image_setup_pipelines[ds][datm],
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_date_image_setup_pipelines[ds][datm],
"DATE image clear pipeline (ds=%u, datm=%u)", ds, datm);
}
}
@ -2082,7 +2056,7 @@ bool GSDeviceVK::CompilePresentPipelines()
VkShaderModule vs = GetUtilityVertexShader(*shader);
if (vs == VK_NULL_HANDLE)
return false;
ScopedGuard vs_guard([&vs]() { Vulkan::Util::SafeDestroyShaderModule(vs); });
ScopedGuard vs_guard([&vs]() { Vulkan::SafeDestroyShaderModule(vs); });
Vulkan::GraphicsPipelineBuilder gpb;
SetPipelineProvokingVertex(m_features, gpb);
@ -2106,7 +2080,7 @@ bool GSDeviceVK::CompilePresentPipelines()
if (ps == VK_NULL_HANDLE)
return false;
ScopedGuard ps_guard([&ps]() { Vulkan::Util::SafeDestroyShaderModule(ps); });
ScopedGuard ps_guard([&ps]() { Vulkan::SafeDestroyShaderModule(ps); });
gpb.SetFragmentShader(ps);
m_present[index] =
@ -2114,7 +2088,7 @@ bool GSDeviceVK::CompilePresentPipelines()
if (!m_present[index])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_present[index], "Present pipeline %d", i);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_present[index], "Present pipeline %d", i);
}
return true;
@ -2138,7 +2112,7 @@ bool GSDeviceVK::CompileInterlacePipelines()
VkShaderModule vs = GetUtilityVertexShader(*shader);
if (vs == VK_NULL_HANDLE)
return false;
ScopedGuard vs_guard([&vs]() { Vulkan::Util::SafeDestroyShaderModule(vs); });
ScopedGuard vs_guard([&vs]() { Vulkan::SafeDestroyShaderModule(vs); });
Vulkan::GraphicsPipelineBuilder gpb;
SetPipelineProvokingVertex(m_features, gpb);
@ -2162,11 +2136,11 @@ bool GSDeviceVK::CompileInterlacePipelines()
m_interlace[i] =
gpb.Create(g_vulkan_context->GetDevice(), g_vulkan_shader_cache->GetPipelineCache(true), false);
Vulkan::Util::SafeDestroyShaderModule(ps);
Vulkan::SafeDestroyShaderModule(ps);
if (!m_interlace[i])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_convert[i], "Interlace pipeline %d", i);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_convert[i], "Interlace pipeline %d", i);
}
return true;
@ -2189,7 +2163,7 @@ bool GSDeviceVK::CompileMergePipelines()
VkShaderModule vs = GetUtilityVertexShader(*shader);
if (vs == VK_NULL_HANDLE)
return false;
ScopedGuard vs_guard([&vs]() { Vulkan::Util::SafeDestroyShaderModule(vs); });
ScopedGuard vs_guard([&vs]() { Vulkan::SafeDestroyShaderModule(vs); });
Vulkan::GraphicsPipelineBuilder gpb;
SetPipelineProvokingVertex(m_features, gpb);
@ -2213,11 +2187,11 @@ bool GSDeviceVK::CompileMergePipelines()
VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD);
m_merge[i] = gpb.Create(g_vulkan_context->GetDevice(), g_vulkan_shader_cache->GetPipelineCache(true), false);
Vulkan::Util::SafeDestroyShaderModule(ps);
Vulkan::SafeDestroyShaderModule(ps);
if (!m_merge[i])
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_convert[i], "Merge pipeline %d", i);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_convert[i], "Merge pipeline %d", i);
}
return true;
@ -2261,8 +2235,8 @@ bool GSDeviceVK::CompilePostProcessingPipelines()
VkShaderModule vs = GetUtilityVertexShader(*vshader);
VkShaderModule ps = GetUtilityFragmentShader(psource, "ps_main");
ScopedGuard shader_guard([&vs, &ps]() {
Vulkan::Util::SafeDestroyShaderModule(vs);
Vulkan::Util::SafeDestroyShaderModule(ps);
Vulkan::SafeDestroyShaderModule(vs);
Vulkan::SafeDestroyShaderModule(ps);
});
if (vs == VK_NULL_HANDLE || ps == VK_NULL_HANDLE)
return false;
@ -2274,7 +2248,7 @@ bool GSDeviceVK::CompilePostProcessingPipelines()
if (!m_fxaa_pipeline)
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_fxaa_pipeline, "FXAA pipeline");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_fxaa_pipeline, "FXAA pipeline");
}
{
@ -2288,8 +2262,8 @@ bool GSDeviceVK::CompilePostProcessingPipelines()
VkShaderModule vs = GetUtilityVertexShader(*shader);
VkShaderModule ps = GetUtilityFragmentShader(*shader);
ScopedGuard shader_guard([&vs, &ps]() {
Vulkan::Util::SafeDestroyShaderModule(vs);
Vulkan::Util::SafeDestroyShaderModule(ps);
Vulkan::SafeDestroyShaderModule(vs);
Vulkan::SafeDestroyShaderModule(ps);
});
if (vs == VK_NULL_HANDLE || ps == VK_NULL_HANDLE)
return false;
@ -2301,7 +2275,7 @@ bool GSDeviceVK::CompilePostProcessingPipelines()
if (!m_shadeboost_pipeline)
return false;
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_shadeboost_pipeline, "Shadeboost pipeline");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_shadeboost_pipeline, "Shadeboost pipeline");
}
return true;
@ -2317,13 +2291,13 @@ bool GSDeviceVK::CompileCASPipelines()
dslb.AddBinding(1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT);
if ((m_cas_ds_layout = dslb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_cas_ds_layout, "CAS descriptor layout");
Vulkan::SetObjectName(dev, m_cas_ds_layout, "CAS descriptor layout");
plb.AddPushConstants(VK_SHADER_STAGE_COMPUTE_BIT, 0, NUM_CAS_CONSTANTS * sizeof(u32));
plb.AddDescriptorSet(m_cas_ds_layout);
if ((m_cas_pipeline_layout = plb.Create(dev)) == VK_NULL_HANDLE)
return false;
Vulkan::Util::SetObjectName(dev, m_cas_pipeline_layout, "CAS pipeline layout");
Vulkan::SetObjectName(dev, m_cas_pipeline_layout, "CAS pipeline layout");
// we use specialization constants to avoid compiling it twice
std::optional<std::string> cas_source(Host::ReadResourceFileToString("shaders/vulkan/cas.glsl"));
@ -2331,7 +2305,7 @@ bool GSDeviceVK::CompileCASPipelines()
return false;
VkShaderModule mod = g_vulkan_shader_cache->GetComputeShader(cas_source->c_str());
ScopedGuard mod_guard = [&mod]() { Vulkan::Util::SafeDestroyShaderModule(mod); };
ScopedGuard mod_guard = [&mod]() { Vulkan::SafeDestroyShaderModule(mod); };
if (mod == VK_NULL_HANDLE)
return false;
@ -2365,7 +2339,7 @@ bool GSDeviceVK::CompileImGuiPipeline()
Console.Error("Failed to compile ImGui vertex shader");
return false;
}
ScopedGuard vs_guard([&vs]() { Vulkan::Util::SafeDestroyShaderModule(vs); });
ScopedGuard vs_guard([&vs]() { Vulkan::SafeDestroyShaderModule(vs); });
VkShaderModule ps = GetUtilityFragmentShader(glsl.value(), "ps_main");
if (ps == VK_NULL_HANDLE)
@ -2373,7 +2347,7 @@ bool GSDeviceVK::CompileImGuiPipeline()
Console.Error("Failed to compile ImGui pixel shader");
return false;
}
ScopedGuard ps_guard([&ps]() { Vulkan::Util::SafeDestroyShaderModule(ps); });
ScopedGuard ps_guard([&ps]() { Vulkan::SafeDestroyShaderModule(ps); });
Vulkan::GraphicsPipelineBuilder gpb;
SetPipelineProvokingVertex(m_features, gpb);
@ -2400,7 +2374,7 @@ bool GSDeviceVK::CompileImGuiPipeline()
return false;
}
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_imgui_pipeline, "ImGui pipeline");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_imgui_pipeline, "ImGui pipeline");
return true;
}
@ -2465,7 +2439,7 @@ void GSDeviceVK::RenderImGui()
SetScissor(GSVector4i(clip).max_i32(GSVector4i::zero()));
// Since we don't have the GSTexture...
Vulkan::Texture* tex = static_cast<Vulkan::Texture*>(pcmd->GetTexID());
VKTexture* tex = static_cast<VKTexture*>(pcmd->GetTexID());
if (m_utility_texture != tex)
{
m_utility_texture = tex;
@ -2493,7 +2467,7 @@ void GSDeviceVK::RenderBlankFrame()
}
VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer();
Vulkan::Texture& sctex = m_swap_chain->GetCurrentTexture();
VKTexture& sctex = m_swap_chain->GetCurrentTexture();
sctex.TransitionToLayout(cmdbuffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
constexpr VkImageSubresourceRange srr = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
@ -2576,52 +2550,52 @@ bool GSDeviceVK::DoCAS(GSTexture* sTex, GSTexture* dTex, bool sharpen_only, cons
void GSDeviceVK::DestroyResources()
{
g_vulkan_context->ExecuteCommandBuffer(Vulkan::Context::WaitType::Sleep);
g_vulkan_context->ExecuteCommandBuffer(VKContext::WaitType::Sleep);
if (m_tfx_descriptor_sets[0] != VK_NULL_HANDLE)
g_vulkan_context->FreeGlobalDescriptorSet(m_tfx_descriptor_sets[0]);
for (auto& it : m_tfx_pipelines)
Vulkan::Util::SafeDestroyPipeline(it.second);
Vulkan::SafeDestroyPipeline(it.second);
for (auto& it : m_tfx_fragment_shaders)
Vulkan::Util::SafeDestroyShaderModule(it.second);
Vulkan::SafeDestroyShaderModule(it.second);
for (auto& it : m_tfx_vertex_shaders)
Vulkan::Util::SafeDestroyShaderModule(it.second);
Vulkan::SafeDestroyShaderModule(it.second);
for (VkPipeline& it : m_interlace)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipeline(it);
for (VkPipeline& it : m_merge)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipeline(it);
for (VkPipeline& it : m_color_copy)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipeline(it);
for (VkPipeline& it : m_present)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipeline(it);
for (VkPipeline& it : m_convert)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipeline(it);
for (u32 ds = 0; ds < 2; ds++)
{
for (u32 fbl = 0; fbl < 2; fbl++)
{
Vulkan::Util::SafeDestroyPipeline(m_hdr_setup_pipelines[ds][fbl]);
Vulkan::Util::SafeDestroyPipeline(m_hdr_finish_pipelines[ds][fbl]);
Vulkan::SafeDestroyPipeline(m_hdr_setup_pipelines[ds][fbl]);
Vulkan::SafeDestroyPipeline(m_hdr_finish_pipelines[ds][fbl]);
}
}
for (u32 ds = 0; ds < 2; ds++)
{
for (u32 datm = 0; datm < 2; datm++)
{
Vulkan::Util::SafeDestroyPipeline(m_date_image_setup_pipelines[ds][datm]);
Vulkan::SafeDestroyPipeline(m_date_image_setup_pipelines[ds][datm]);
}
}
Vulkan::Util::SafeDestroyPipeline(m_fxaa_pipeline);
Vulkan::Util::SafeDestroyPipeline(m_shadeboost_pipeline);
Vulkan::SafeDestroyPipeline(m_fxaa_pipeline);
Vulkan::SafeDestroyPipeline(m_shadeboost_pipeline);
for (VkPipeline& it : m_cas_pipelines)
Vulkan::Util::SafeDestroyPipeline(it);
Vulkan::Util::SafeDestroyPipelineLayout(m_cas_pipeline_layout);
Vulkan::Util::SafeDestroyDescriptorSetLayout(m_cas_ds_layout);
Vulkan::Util::SafeDestroyPipeline(m_imgui_pipeline);
Vulkan::SafeDestroyPipeline(it);
Vulkan::SafeDestroyPipelineLayout(m_cas_pipeline_layout);
Vulkan::SafeDestroyDescriptorSetLayout(m_cas_ds_layout);
Vulkan::SafeDestroyPipeline(m_imgui_pipeline);
for (auto& it : m_samplers)
Vulkan::Util::SafeDestroySampler(it.second);
Vulkan::SafeDestroySampler(it.second);
m_linear_sampler = VK_NULL_HANDLE;
m_point_sampler = VK_NULL_HANDLE;
@ -2646,12 +2620,12 @@ void GSDeviceVK::DestroyResources()
m_expand_index_buffer_allocation = VK_NULL_HANDLE;
}
Vulkan::Util::SafeDestroyPipelineLayout(m_tfx_pipeline_layout);
Vulkan::Util::SafeDestroyDescriptorSetLayout(m_tfx_rt_texture_ds_layout);
Vulkan::Util::SafeDestroyDescriptorSetLayout(m_tfx_sampler_ds_layout);
Vulkan::Util::SafeDestroyDescriptorSetLayout(m_tfx_ubo_ds_layout);
Vulkan::Util::SafeDestroyPipelineLayout(m_utility_pipeline_layout);
Vulkan::Util::SafeDestroyDescriptorSetLayout(m_utility_ds_layout);
Vulkan::SafeDestroyPipelineLayout(m_tfx_pipeline_layout);
Vulkan::SafeDestroyDescriptorSetLayout(m_tfx_rt_texture_ds_layout);
Vulkan::SafeDestroyDescriptorSetLayout(m_tfx_sampler_ds_layout);
Vulkan::SafeDestroyDescriptorSetLayout(m_tfx_ubo_ds_layout);
Vulkan::SafeDestroyPipelineLayout(m_utility_pipeline_layout);
Vulkan::SafeDestroyDescriptorSetLayout(m_utility_ds_layout);
m_null_texture.Destroy(false);
}
@ -2675,7 +2649,7 @@ VkShaderModule GSDeviceVK::GetTFXVertexShader(GSHWDrawConfig::VSSelector sel)
VkShaderModule mod = g_vulkan_shader_cache->GetVertexShader(ss.str());
if (mod)
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), mod, "TFX Vertex %08X", sel.key);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), mod, "TFX Vertex %08X", sel.key);
m_tfx_vertex_shaders.emplace(sel.key, mod);
return mod;
@ -2745,7 +2719,7 @@ VkShaderModule GSDeviceVK::GetTFXFragmentShader(const GSHWDrawConfig::PSSelector
VkShaderModule mod = g_vulkan_shader_cache->GetFragmentShader(ss.str());
if (mod)
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), mod, "TFX Fragment %" PRIX64 "%08X", sel.key_hi, sel.key_lo);
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), mod, "TFX Fragment %" PRIX64 "%08X", sel.key_hi, sel.key_lo);
m_tfx_fragment_shaders.emplace(sel, mod);
return mod;
@ -2868,7 +2842,7 @@ VkPipeline GSDeviceVK::CreateTFXPipeline(const PipelineSelector& p)
VkPipeline pipeline = gpb.Create(g_vulkan_context->GetDevice(), g_vulkan_shader_cache->GetPipelineCache(true));
if (pipeline)
{
Vulkan::Util::SetObjectName(
Vulkan::SetObjectName(
g_vulkan_context->GetDevice(), pipeline, "TFX Pipeline %08X/%" PRIX64 "%08X", p.vs.key, p.ps.key_hi, p.ps.key_lo);
}
@ -2914,10 +2888,10 @@ void GSDeviceVK::InitializeState()
m_point_sampler = GetSampler(GSHWDrawConfig::SamplerSelector::Point());
if (m_point_sampler)
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_point_sampler, "Point sampler");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_point_sampler, "Point sampler");
m_linear_sampler = GetSampler(GSHWDrawConfig::SamplerSelector::Linear());
if (m_linear_sampler)
Vulkan::Util::SetObjectName(g_vulkan_context->GetDevice(), m_point_sampler, "Linear sampler");
Vulkan::SetObjectName(g_vulkan_context->GetDevice(), m_point_sampler, "Linear sampler");
m_tfx_sampler_sel = GSHWDrawConfig::SamplerSelector::Point().key;
m_tfx_sampler = m_point_sampler;
@ -2944,18 +2918,18 @@ bool GSDeviceVK::CreatePersistentDescriptorSets()
m_vertex_stream_buffer.GetBuffer(), 0, VERTEX_BUFFER_SIZE);
}
dsub.Update(dev);
Vulkan::Util::SetObjectName(dev, m_tfx_descriptor_sets[0], "Persistent TFX UBO set");
Vulkan::SetObjectName(dev, m_tfx_descriptor_sets[0], "Persistent TFX UBO set");
return true;
}
static Vulkan::Context::WaitType GetWaitType(bool wait, bool spin)
static VKContext::WaitType GetWaitType(bool wait, bool spin)
{
if (!wait)
return Vulkan::Context::WaitType::None;
return VKContext::WaitType::None;
if (spin)
return Vulkan::Context::WaitType::Spin;
return VKContext::WaitType::Spin;
else
return Vulkan::Context::WaitType::Sleep;
return VKContext::WaitType::Sleep;
}
void GSDeviceVK::ExecuteCommandBuffer(bool wait_for_completion)
@ -3074,7 +3048,7 @@ void GSDeviceVK::SetBlendConstants(u8 color)
void GSDeviceVK::PSSetShaderResource(int i, GSTexture* sr, bool check_state)
{
const Vulkan::Texture* tex;
const VKTexture* tex;
if (sr)
{
GSTextureVK* vkTex = static_cast<GSTextureVK*>(sr);
@ -3117,7 +3091,7 @@ void GSDeviceVK::PSSetSampler(GSHWDrawConfig::SamplerSelector sel)
void GSDeviceVK::SetUtilityTexture(GSTexture* tex, VkSampler sampler)
{
const Vulkan::Texture* vtex;
const VKTexture* vtex;
if (tex)
{
GSTextureVK* vkTex = static_cast<GSTextureVK*>(tex);
@ -3147,7 +3121,7 @@ void GSDeviceVK::SetUtilityPushConstants(const void* data, u32 size)
void GSDeviceVK::UnbindTexture(GSTextureVK* tex)
{
const Vulkan::Texture* vtex = tex->GetTexturePtr();
const VKTexture* vtex = tex->GetTexturePtr();
for (u32 i = 0; i < NUM_TFX_TEXTURES; i++)
{
if (m_tfx_textures[i] == vtex)

View File

@ -15,18 +15,20 @@
#pragma once
#include "GSTextureVK.h"
#include "GS/GSVector.h"
#include "GS/Renderers/Common/GSDevice.h"
#include "common/Vulkan/StreamBuffer.h"
#include "GS/Renderers/Vulkan/GSTextureVK.h"
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "GS/Renderers/Vulkan/VKStreamBuffer.h"
#include "GS/GSVector.h"
#include "common/HashCombine.h"
#include "vk_mem_alloc.h"
#include <array>
#include <unordered_map>
namespace Vulkan
{
class SwapChain;
class VKSwapChain;
}
class GSDeviceVK final : public GSDevice
@ -109,7 +111,7 @@ public:
};
private:
std::unique_ptr<Vulkan::SwapChain> m_swap_chain;
std::unique_ptr<VKSwapChain> m_swap_chain;
VkDescriptorSetLayout m_utility_ds_layout = VK_NULL_HANDLE;
VkPipelineLayout m_utility_pipeline_layout = VK_NULL_HANDLE;
@ -119,10 +121,10 @@ private:
VkDescriptorSetLayout m_tfx_rt_texture_ds_layout = VK_NULL_HANDLE;
VkPipelineLayout m_tfx_pipeline_layout = VK_NULL_HANDLE;
Vulkan::StreamBuffer m_vertex_stream_buffer;
Vulkan::StreamBuffer m_index_stream_buffer;
Vulkan::StreamBuffer m_vertex_uniform_stream_buffer;
Vulkan::StreamBuffer m_fragment_uniform_stream_buffer;
VKStreamBuffer m_vertex_stream_buffer;
VKStreamBuffer m_index_stream_buffer;
VKStreamBuffer m_vertex_uniform_stream_buffer;
VKStreamBuffer m_fragment_uniform_stream_buffer;
VkBuffer m_expand_index_buffer = VK_NULL_HANDLE;
VmaAllocation m_expand_index_buffer_allocation = VK_NULL_HANDLE;
@ -410,20 +412,20 @@ private:
GSVector4i m_scissor = GSVector4i::zero();
u8 m_blend_constant_color = 0;
std::array<const Vulkan::Texture*, NUM_TFX_TEXTURES> m_tfx_textures{};
std::array<const VKTexture*, NUM_TFX_TEXTURES> m_tfx_textures{};
VkSampler m_tfx_sampler = VK_NULL_HANDLE;
u32 m_tfx_sampler_sel = 0;
std::array<VkDescriptorSet, NUM_TFX_DESCRIPTOR_SETS> m_tfx_descriptor_sets{};
std::array<u32, NUM_TFX_DYNAMIC_OFFSETS> m_tfx_dynamic_offsets{};
const Vulkan::Texture* m_utility_texture = nullptr;
const VKTexture* m_utility_texture = nullptr;
VkSampler m_utility_sampler = VK_NULL_HANDLE;
VkDescriptorSet m_utility_descriptor_set = VK_NULL_HANDLE;
PipelineLayout m_current_pipeline_layout = PipelineLayout::Undefined;
VkPipeline m_current_pipeline = VK_NULL_HANDLE;
Vulkan::Texture m_null_texture;
VKTexture m_null_texture;
// current pipeline selector - we save this in the struct to avoid re-zeroing it every draw
PipelineSelector m_pipeline_selector = {};

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -14,17 +14,19 @@
*/
#include "PrecompiledHeader.h"
#include "GSDeviceVK.h"
#include "GSTextureVK.h"
#include "common/Align.h"
#include "common/Assertions.h"
#include "common/Vulkan/Builders.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Util.h"
#include "GS/Renderers/Vulkan/GSDeviceVK.h"
#include "GS/Renderers/Vulkan/GSTextureVK.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/GSPerfMon.h"
#include "GS/GSGL.h"
GSTextureVK::GSTextureVK(Type type, Format format, Vulkan::Texture texture)
#include "common/Align.h"
#include "common/Assertions.h"
GSTextureVK::GSTextureVK(Type type, Format format, VKTexture texture)
: m_texture(std::move(texture))
{
m_type = type;
@ -77,14 +79,14 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
swizzle = &r8_swizzle;
}
Vulkan::Texture texture;
VKTexture texture;
if (!texture.Create(width, height, levels, 1, vk_format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, usage, swizzle))
{
return {};
}
Vulkan::Util::SetObjectName(
Vulkan::SetObjectName(
g_vulkan_context->GetDevice(), texture.GetImage(), "%ux%u texture", width, height);
return std::make_unique<GSTextureVK>(type, format, std::move(texture));
}
@ -93,7 +95,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
{
pxAssert(levels == 1);
Vulkan::Texture texture;
VKTexture texture;
if (!texture.Create(width, height, levels, 1, vk_format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
@ -103,7 +105,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
return {};
}
Vulkan::Util::SetObjectName(
Vulkan::SetObjectName(
g_vulkan_context->GetDevice(), texture.GetImage(), "%ux%u render target", width, height);
return std::make_unique<GSTextureVK>(type, format, std::move(texture));
}
@ -112,7 +114,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
{
pxAssert(levels == 1);
Vulkan::Texture texture;
VKTexture texture;
if (!texture.Create(width, height, levels, 1, vk_format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
@ -121,7 +123,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
return {};
}
Vulkan::Util::SetObjectName(
Vulkan::SetObjectName(
g_vulkan_context->GetDevice(), texture.GetImage(), "%ux%u depth stencil", width, height);
return std::make_unique<GSTextureVK>(type, format, std::move(texture));
}
@ -130,7 +132,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
{
pxAssert(levels == 1);
Vulkan::Texture texture;
VKTexture texture;
if (!texture.Create(width, height, levels, 1, vk_format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
@ -139,7 +141,7 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
return {};
}
Vulkan::Util::SetObjectName(
Vulkan::SetObjectName(
g_vulkan_context->GetDevice(), texture.GetImage(), "%ux%u RW texture", width, height);
return std::make_unique<GSTextureVK>(type, format, std::move(texture));
}
@ -149,7 +151,10 @@ std::unique_ptr<GSTextureVK> GSTextureVK::Create(Type type, u32 width, u32 heigh
}
}
void* GSTextureVK::GetNativeHandle() const { return const_cast<Vulkan::Texture*>(&m_texture); }
void* GSTextureVK::GetNativeHandle() const
{
return const_cast<VKTexture*>(&m_texture);
}
VkCommandBuffer GSTextureVK::GetCommandBufferForUpdate()
{
@ -227,7 +232,7 @@ bool GSTextureVK::Update(const GSVector4i& r, const void* data, int pitch, int l
}
else
{
Vulkan::StreamBuffer& sbuffer = g_vulkan_context->GetTextureUploadBuffer();
VKStreamBuffer& sbuffer = g_vulkan_context->GetTextureUploadBuffer();
if (!sbuffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
{
GSDeviceVK::GetInstance()->ExecuteCommandBuffer(
@ -281,12 +286,12 @@ bool GSTextureVK::Map(GSMap& m, const GSVector4i* r, int layer)
m_map_area = r ? *r : GSVector4i(0, 0, m_texture.GetWidth(), m_texture.GetHeight());
m_map_level = layer;
m.pitch = Common::AlignUpPow2(m_map_area.width() * Vulkan::Util::GetTexelSize(m_texture.GetFormat()),
m.pitch = Common::AlignUpPow2(m_map_area.width() * Vulkan::GetTexelSize(m_texture.GetFormat()),
g_vulkan_context->GetBufferCopyRowPitchAlignment());
// see note in Update() for the reason why.
const u32 required_size = m.pitch * m_map_area.height();
Vulkan::StreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
VKStreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
if (required_size >= (buffer.GetCurrentSize() / 2))
return false;
@ -311,10 +316,10 @@ void GSTextureVK::Unmap()
// TODO: non-tightly-packed formats
const u32 width = static_cast<u32>(m_map_area.width());
const u32 height = static_cast<u32>(m_map_area.height());
const u32 pitch = Common::AlignUpPow2(m_map_area.width() * Vulkan::Util::GetTexelSize(m_texture.GetFormat()),
const u32 pitch = Common::AlignUpPow2(m_map_area.width() * Vulkan::GetTexelSize(m_texture.GetFormat()),
g_vulkan_context->GetBufferCopyRowPitchAlignment());
const u32 required_size = pitch * height;
Vulkan::StreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
VKStreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
const u32 buffer_offset = buffer.GetCurrentOffset();
buffer.CommitMemory(required_size);
@ -537,8 +542,9 @@ void GSDownloadTextureVK::CopyFromTexture(
}
VkBufferImageCopy image_copy = {};
const VkImageAspectFlags aspect =
Vulkan::Util::IsDepthFormat(static_cast<VkFormat>(vkTex->GetFormat())) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
const VkImageAspectFlags aspect = Vulkan::IsDepthFormat(static_cast<VkFormat>(vkTex->GetFormat())) ?
VK_IMAGE_ASPECT_DEPTH_BIT :
VK_IMAGE_ASPECT_COLOR_BIT;
image_copy.bufferOffset = copy_offset;
image_copy.bufferRowLength = GSTexture::CalcUploadRowLengthFromPitch(m_format, m_current_pitch);
image_copy.bufferImageHeight = 0;
@ -550,7 +556,7 @@ void GSDownloadTextureVK::CopyFromTexture(
vkCmdCopyImageToBuffer(cmdbuf, vkTex->GetTexture().GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer, 1, &image_copy);
// flush gpu cache
Vulkan::Util::BufferMemoryBarrier(cmdbuf, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, 0, copy_size,
Vulkan::BufferMemoryBarrier(cmdbuf, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, 0, copy_size,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_HOST_BIT);
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -17,19 +17,19 @@
#include "GS/GS.h"
#include "GS/Renderers/Common/GSTexture.h"
#include "common/Vulkan/Context.h"
#include "common/Vulkan/Texture.h"
#include "GS/Renderers/Vulkan/VKTexture.h"
#include "GS/Renderers/Vulkan/VKContext.h"
class GSTextureVK final : public GSTexture
{
public:
GSTextureVK(Type type, Format format, Vulkan::Texture texture);
GSTextureVK(Type type, Format format, VKTexture texture);
~GSTextureVK() override;
static std::unique_ptr<GSTextureVK> Create(Type type, u32 width, u32 height, u32 levels, Format format, VkFormat vk_format);
__fi Vulkan::Texture& GetTexture() { return m_texture; }
__fi Vulkan::Texture* GetTexturePtr() { return &m_texture; }
__fi VKTexture& GetTexture() { return m_texture; }
__fi VKTexture* GetTexturePtr() { return &m_texture; }
__fi VkFormat GetNativeFormat() const { return m_texture.GetFormat(); }
__fi VkImage GetImage() const { return m_texture.GetImage(); }
__fi VkImageView GetView() const { return m_texture.GetView(); }
@ -63,7 +63,7 @@ private:
void CopyTextureDataForUpload(void* dst, const void* src, u32 pitch, u32 upload_pitch, u32 height) const;
VkBuffer AllocateUploadStagingBuffer(const void* data, u32 pitch, u32 upload_pitch, u32 height) const;
Vulkan::Texture m_texture;
VKTexture m_texture;
// Contains the fence counter when the texture was last used.
// When this matches the current fence counter, the texture was used this command buffer.

View File

@ -0,0 +1,959 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKBuilders.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "common/Assertions.h"
#include <limits>
Vulkan::DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder()
{
Clear();
}
void Vulkan::DescriptorSetLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pBindings = nullptr;
m_ci.bindingCount = 0;
}
VkDescriptorSetLayout Vulkan::DescriptorSetLayoutBuilder::Create(VkDevice device)
{
VkDescriptorSetLayout layout;
VkResult res = vkCreateDescriptorSetLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void Vulkan::DescriptorSetLayoutBuilder::AddBinding(
u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages)
{
pxAssert(m_ci.bindingCount < MAX_BINDINGS);
VkDescriptorSetLayoutBinding& b = m_bindings[m_ci.bindingCount];
b.binding = binding;
b.descriptorType = dtype;
b.descriptorCount = dcount;
b.stageFlags = stages;
b.pImmutableSamplers = nullptr;
m_ci.pBindings = m_bindings.data();
m_ci.bindingCount++;
}
Vulkan::PipelineLayoutBuilder::PipelineLayoutBuilder()
{
Clear();
}
void Vulkan::PipelineLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pSetLayouts = nullptr;
m_ci.setLayoutCount = 0;
m_ci.pPushConstantRanges = nullptr;
m_ci.pushConstantRangeCount = 0;
}
VkPipelineLayout Vulkan::PipelineLayoutBuilder::Create(VkDevice device)
{
VkPipelineLayout layout;
VkResult res = vkCreatePipelineLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void Vulkan::PipelineLayoutBuilder::AddDescriptorSet(VkDescriptorSetLayout layout)
{
pxAssert(m_ci.setLayoutCount < MAX_SETS);
m_sets[m_ci.setLayoutCount] = layout;
m_ci.setLayoutCount++;
m_ci.pSetLayouts = m_sets.data();
}
void Vulkan::PipelineLayoutBuilder::AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size)
{
pxAssert(m_ci.pushConstantRangeCount < MAX_PUSH_CONSTANTS);
VkPushConstantRange& r = m_push_constants[m_ci.pushConstantRangeCount];
r.stageFlags = stages;
r.offset = offset;
r.size = size;
m_ci.pushConstantRangeCount++;
m_ci.pPushConstantRanges = m_push_constants.data();
}
Vulkan::GraphicsPipelineBuilder::GraphicsPipelineBuilder()
{
Clear();
}
void Vulkan::GraphicsPipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
m_shader_stages = {};
m_vertex_input_state = {};
m_vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_ci.pVertexInputState = &m_vertex_input_state;
m_vertex_attributes = {};
m_vertex_buffers = {};
m_input_assembly = {};
m_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
m_rasterization_state = {};
m_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
m_rasterization_state.lineWidth = 1.0f;
m_depth_state = {};
m_depth_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
m_blend_state = {};
m_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
m_blend_attachments = {};
m_viewport_state = {};
m_viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
m_viewport = {};
m_scissor = {};
m_dynamic_state = {};
m_dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
m_dynamic_state_values = {};
m_multisample_state = {};
m_multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
m_provoking_vertex = {};
m_provoking_vertex.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT;
m_line_rasterization_state = {};
m_line_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT;
// set defaults
SetNoCullRasterizationState();
SetNoDepthTestState();
SetNoBlendingState();
SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
// have to be specified even if dynamic
SetViewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
SetScissorRect(0, 0, 1, 1);
SetMultisamples(VK_SAMPLE_COUNT_1_BIT);
}
VkPipeline Vulkan::GraphicsPipelineBuilder::Create(
VkDevice device, VkPipelineCache pipeline_cache, bool clear /* = true */)
{
VkPipeline pipeline;
VkResult res = vkCreateGraphicsPipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateGraphicsPipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void Vulkan::GraphicsPipelineBuilder::SetShaderStage(
VkShaderStageFlagBits stage, VkShaderModule module, const char* entry_point)
{
pxAssert(m_ci.stageCount < MAX_SHADER_STAGES);
u32 index = 0;
for (; index < m_ci.stageCount; index++)
{
if (m_shader_stages[index].stage == stage)
break;
}
if (index == m_ci.stageCount)
{
m_ci.stageCount++;
m_ci.pStages = m_shader_stages.data();
}
VkPipelineShaderStageCreateInfo& s = m_shader_stages[index];
s.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
s.stage = stage;
s.module = module;
s.pName = entry_point;
}
void Vulkan::GraphicsPipelineBuilder::AddVertexBuffer(
u32 binding, u32 stride, VkVertexInputRate input_rate /*= VK_VERTEX_INPUT_RATE_VERTEX*/)
{
pxAssert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputBindingDescription& b = m_vertex_buffers[m_vertex_input_state.vertexBindingDescriptionCount];
b.binding = binding;
b.stride = stride;
b.inputRate = input_rate;
m_vertex_input_state.vertexBindingDescriptionCount++;
m_vertex_input_state.pVertexBindingDescriptions = m_vertex_buffers.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void Vulkan::GraphicsPipelineBuilder::AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset)
{
pxAssert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputAttributeDescription& a = m_vertex_attributes[m_vertex_input_state.vertexAttributeDescriptionCount];
a.location = location;
a.binding = binding;
a.format = format;
a.offset = offset;
m_vertex_input_state.vertexAttributeDescriptionCount++;
m_vertex_input_state.pVertexAttributeDescriptions = m_vertex_attributes.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void Vulkan::GraphicsPipelineBuilder::SetPrimitiveTopology(
VkPrimitiveTopology topology, bool enable_primitive_restart /*= false*/)
{
m_input_assembly.topology = topology;
m_input_assembly.primitiveRestartEnable = enable_primitive_restart;
m_ci.pInputAssemblyState = &m_input_assembly;
}
void Vulkan::GraphicsPipelineBuilder::SetRasterizationState(
VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, VkFrontFace front_face)
{
m_rasterization_state.polygonMode = polygon_mode;
m_rasterization_state.cullMode = cull_mode;
m_rasterization_state.frontFace = front_face;
m_ci.pRasterizationState = &m_rasterization_state;
}
void Vulkan::GraphicsPipelineBuilder::SetLineWidth(float width)
{
m_rasterization_state.lineWidth = width;
}
void Vulkan::GraphicsPipelineBuilder::SetLineRasterizationMode(VkLineRasterizationModeEXT mode)
{
AddPointerToChain(&m_rasterization_state, &m_line_rasterization_state);
m_line_rasterization_state.lineRasterizationMode = mode;
}
void Vulkan::GraphicsPipelineBuilder::SetMultisamples(u32 multisamples, bool per_sample_shading)
{
m_multisample_state.rasterizationSamples = static_cast<VkSampleCountFlagBits>(multisamples);
m_multisample_state.sampleShadingEnable = per_sample_shading;
m_multisample_state.minSampleShading = (multisamples > 1) ? 1.0f : 0.0f;
}
void Vulkan::GraphicsPipelineBuilder::SetNoCullRasterizationState()
{
SetRasterizationState(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE);
}
void Vulkan::GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op)
{
m_depth_state.depthTestEnable = depth_test;
m_depth_state.depthWriteEnable = depth_write;
m_depth_state.depthCompareOp = compare_op;
m_ci.pDepthStencilState = &m_depth_state;
}
void Vulkan::GraphicsPipelineBuilder::SetStencilState(
bool stencil_test, const VkStencilOpState& front, const VkStencilOpState& back)
{
m_depth_state.stencilTestEnable = stencil_test;
m_depth_state.front = front;
m_depth_state.back = back;
}
void Vulkan::GraphicsPipelineBuilder::SetNoStencilState()
{
m_depth_state.stencilTestEnable = VK_FALSE;
m_depth_state.front = {};
m_depth_state.back = {};
}
void Vulkan::GraphicsPipelineBuilder::SetNoDepthTestState()
{
SetDepthState(false, false, VK_COMPARE_OP_ALWAYS);
}
void Vulkan::GraphicsPipelineBuilder::SetBlendConstants(float r, float g, float b, float a)
{
m_blend_state.blendConstants[0] = r;
m_blend_state.blendConstants[1] = g;
m_blend_state.blendConstants[2] = b;
m_blend_state.blendConstants[3] = a;
m_ci.pColorBlendState = &m_blend_state;
}
void Vulkan::GraphicsPipelineBuilder::AddBlendAttachment(bool blend_enable, VkBlendFactor src_factor,
VkBlendFactor dst_factor, VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags
write_mask /* = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT */)
{
pxAssert(m_blend_state.attachmentCount < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[m_blend_state.attachmentCount];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
m_blend_state.attachmentCount++;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
void Vulkan::GraphicsPipelineBuilder::SetBlendAttachment(u32 attachment, bool blend_enable, VkBlendFactor src_factor,
VkBlendFactor dst_factor, VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags
write_mask /*= VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT*/)
{
pxAssert(attachment < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[attachment];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
if (attachment >= m_blend_state.attachmentCount)
{
m_blend_state.attachmentCount = attachment + 1u;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
}
void Vulkan::GraphicsPipelineBuilder::AddBlendFlags(u32 flags)
{
m_blend_state.flags |= flags;
}
void Vulkan::GraphicsPipelineBuilder::ClearBlendAttachments()
{
m_blend_attachments = {};
m_blend_state.attachmentCount = 0;
}
void Vulkan::GraphicsPipelineBuilder::SetNoBlendingState()
{
ClearBlendAttachments();
SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE,
VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
}
void Vulkan::GraphicsPipelineBuilder::AddDynamicState(VkDynamicState state)
{
pxAssert(m_dynamic_state.dynamicStateCount < MAX_DYNAMIC_STATE);
m_dynamic_state_values[m_dynamic_state.dynamicStateCount] = state;
m_dynamic_state.dynamicStateCount++;
m_dynamic_state.pDynamicStates = m_dynamic_state_values.data();
m_ci.pDynamicState = &m_dynamic_state;
}
void Vulkan::GraphicsPipelineBuilder::SetDynamicViewportAndScissorState()
{
AddDynamicState(VK_DYNAMIC_STATE_VIEWPORT);
AddDynamicState(VK_DYNAMIC_STATE_SCISSOR);
}
void Vulkan::GraphicsPipelineBuilder::SetViewport(
float x, float y, float width, float height, float min_depth, float max_depth)
{
m_viewport.x = x;
m_viewport.y = y;
m_viewport.width = width;
m_viewport.height = height;
m_viewport.minDepth = min_depth;
m_viewport.maxDepth = max_depth;
m_viewport_state.pViewports = &m_viewport;
m_viewport_state.viewportCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void Vulkan::GraphicsPipelineBuilder::SetScissorRect(s32 x, s32 y, u32 width, u32 height)
{
m_scissor.offset.x = x;
m_scissor.offset.y = y;
m_scissor.extent.width = width;
m_scissor.extent.height = height;
m_viewport_state.pScissors = &m_scissor;
m_viewport_state.scissorCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void Vulkan::GraphicsPipelineBuilder::SetMultisamples(VkSampleCountFlagBits samples)
{
m_multisample_state.rasterizationSamples = samples;
m_ci.pMultisampleState = &m_multisample_state;
}
void Vulkan::GraphicsPipelineBuilder::SetPipelineLayout(VkPipelineLayout layout)
{
m_ci.layout = layout;
}
void Vulkan::GraphicsPipelineBuilder::SetRenderPass(VkRenderPass render_pass, u32 subpass)
{
m_ci.renderPass = render_pass;
m_ci.subpass = subpass;
}
void Vulkan::GraphicsPipelineBuilder::SetProvokingVertex(VkProvokingVertexModeEXT mode)
{
AddPointerToChain(&m_rasterization_state, &m_provoking_vertex);
m_provoking_vertex.provokingVertexMode = mode;
}
Vulkan::ComputePipelineBuilder::ComputePipelineBuilder()
{
Clear();
}
void Vulkan::ComputePipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
m_si = {};
m_smap_entries = {};
m_smap_constants = {};
}
VkPipeline Vulkan::ComputePipelineBuilder::Create(
VkDevice device, VkPipelineCache pipeline_cache /*= VK_NULL_HANDLE*/, bool clear /*= true*/)
{
VkPipeline pipeline;
VkResult res = vkCreateComputePipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateComputePipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void Vulkan::ComputePipelineBuilder::SetShader(VkShaderModule module, const char* entry_point)
{
m_ci.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
m_ci.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
m_ci.stage.module = module;
m_ci.stage.pName = entry_point;
}
void Vulkan::ComputePipelineBuilder::SetPipelineLayout(VkPipelineLayout layout)
{
m_ci.layout = layout;
}
void Vulkan::ComputePipelineBuilder::SetSpecializationBool(u32 index, bool value)
{
const u32 u32_value = static_cast<u32>(value);
SetSpecializationValue(index, u32_value);
}
void Vulkan::ComputePipelineBuilder::SetSpecializationValue(u32 index, u32 value)
{
if (m_si.mapEntryCount == 0)
{
m_si.pMapEntries = m_smap_entries.data();
m_si.pData = m_smap_constants.data();
m_ci.stage.pSpecializationInfo = &m_si;
}
m_smap_entries[m_si.mapEntryCount++] = {index, index * SPECIALIZATION_CONSTANT_SIZE, SPECIALIZATION_CONSTANT_SIZE};
m_si.dataSize += SPECIALIZATION_CONSTANT_SIZE;
}
Vulkan::SamplerBuilder::SamplerBuilder()
{
Clear();
}
void Vulkan::SamplerBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
}
VkSampler Vulkan::SamplerBuilder::Create(VkDevice device, bool clear /* = true */)
{
VkSampler sampler;
VkResult res = vkCreateSampler(device, &m_ci, nullptr, &sampler);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSampler() failed: ");
return VK_NULL_HANDLE;
}
return sampler;
}
void Vulkan::SamplerBuilder::SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter)
{
m_ci.magFilter = mag_filter;
m_ci.minFilter = min_filter;
m_ci.mipmapMode = mip_filter;
}
void Vulkan::SamplerBuilder::SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w)
{
m_ci.addressModeU = u;
m_ci.addressModeV = v;
m_ci.addressModeW = w;
}
void Vulkan::SamplerBuilder::SetPointSampler(
VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
}
void Vulkan::SamplerBuilder::SetLinearSampler(
bool mipmaps, VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(
VK_FILTER_LINEAR, VK_FILTER_LINEAR, mipmaps ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
if (mipmaps)
{
m_ci.minLod = std::numeric_limits<float>::min();
m_ci.maxLod = std::numeric_limits<float>::max();
}
}
Vulkan::DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder()
{
Clear();
}
void Vulkan::DescriptorSetUpdateBuilder::Clear()
{
m_writes = {};
m_num_writes = 0;
}
void Vulkan::DescriptorSetUpdateBuilder::Update(VkDevice device, bool clear /*= true*/)
{
pxAssert(m_num_writes > 0);
vkUpdateDescriptorSets(device, m_num_writes, (m_num_writes > 0) ? m_writes.data() : nullptr, 0, nullptr);
if (clear)
Clear();
}
void Vulkan::DescriptorSetUpdateBuilder::AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dw.pImageInfo = &ii;
}
void Vulkan::DescriptorSetUpdateBuilder::AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &ii;
}
void Vulkan::DescriptorSetUpdateBuilder::AddSamplerDescriptorWrites(
VkDescriptorSet set, u32 binding, const VkSampler* samplers, u32 num_samplers)
{
pxAssert(m_num_writes < MAX_WRITES && (m_num_image_infos + num_samplers) < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = num_samplers;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
for (u32 i = 0; i < num_samplers; i++)
{
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ii.sampler = samplers[i];
}
}
void Vulkan::DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding,
VkImageView view, VkSampler sampler, VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &ii;
}
void Vulkan::DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrites(VkDescriptorSet set, u32 binding,
const VkImageView* views, const VkSampler* samplers, u32 num_views,
VkImageLayout layout /* = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL */)
{
pxAssert(m_num_writes < MAX_WRITES && (m_num_image_infos + num_views) < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = num_views;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
for (u32 i = 0; i < num_views; i++)
{
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = views[i];
ii.sampler = samplers[i];
ii.imageLayout = layout;
}
}
void Vulkan::DescriptorSetUpdateBuilder::AddBufferDescriptorWrite(
VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBuffer buffer, u32 offset, u32 size)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_buffer_infos < MAX_BUFFER_INFOS);
VkDescriptorBufferInfo& bi = m_buffer_infos[m_num_buffer_infos++];
bi.buffer = buffer;
bi.offset = offset;
bi.range = size;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pBufferInfo = &bi;
}
void Vulkan::DescriptorSetUpdateBuilder::AddBufferViewDescriptorWrite(
VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBufferView view)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_views < MAX_VIEWS);
VkBufferView& bi = m_views[m_num_views++];
bi = view;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pTexelBufferView = &bi;
}
void Vulkan::DescriptorSetUpdateBuilder::AddInputAttachmentDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkImageLayout layout /*= VK_IMAGE_LAYOUT_GENERAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
}
void Vulkan::DescriptorSetUpdateBuilder::AddStorageImageDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkImageLayout layout /*= VK_IMAGE_LAYOUT_GENERAL*/)
{
pxAssert(m_num_writes < MAX_WRITES && m_num_image_infos < MAX_IMAGE_INFOS);
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
dw.pImageInfo = &m_image_infos[m_num_image_infos];
VkDescriptorImageInfo& ii = m_image_infos[m_num_image_infos++];
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
}
Vulkan::FramebufferBuilder::FramebufferBuilder()
{
Clear();
}
void Vulkan::FramebufferBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
m_images = {};
}
VkFramebuffer Vulkan::FramebufferBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkFramebuffer fb;
VkResult res = vkCreateFramebuffer(device, &m_ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return fb;
}
void Vulkan::FramebufferBuilder::AddAttachment(VkImageView image)
{
pxAssert(m_ci.attachmentCount < MAX_ATTACHMENTS);
m_images[m_ci.attachmentCount] = image;
m_ci.attachmentCount++;
m_ci.pAttachments = m_images.data();
}
void Vulkan::FramebufferBuilder::SetSize(u32 width, u32 height, u32 layers)
{
m_ci.width = width;
m_ci.height = height;
m_ci.layers = layers;
}
void Vulkan::FramebufferBuilder::SetRenderPass(VkRenderPass render_pass)
{
m_ci.renderPass = render_pass;
}
Vulkan::RenderPassBuilder::RenderPassBuilder()
{
Clear();
}
void Vulkan::RenderPassBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
m_attachments = {};
m_attachment_references = {};
m_num_attachment_references = 0;
m_subpasses = {};
}
VkRenderPass Vulkan::RenderPassBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkRenderPass rp;
VkResult res = vkCreateRenderPass(device, &m_ci, nullptr, &rp);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass() failed: ");
return VK_NULL_HANDLE;
}
return rp;
}
u32 Vulkan::RenderPassBuilder::AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout, VkImageLayout final_layout)
{
pxAssert(m_ci.attachmentCount < MAX_ATTACHMENTS);
const u32 index = m_ci.attachmentCount;
VkAttachmentDescription& ad = m_attachments[index];
ad.format = format;
ad.samples = samples;
ad.loadOp = load_op;
ad.storeOp = store_op;
ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
ad.initialLayout = initial_layout;
ad.finalLayout = final_layout;
m_ci.attachmentCount++;
m_ci.pAttachments = m_attachments.data();
return index;
}
u32 Vulkan::RenderPassBuilder::AddSubpass()
{
pxAssert(m_ci.subpassCount < MAX_SUBPASSES);
const u32 index = m_ci.subpassCount;
VkSubpassDescription& sp = m_subpasses[index];
sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
m_ci.subpassCount++;
m_ci.pSubpasses = m_subpasses.data();
return index;
}
void Vulkan::RenderPassBuilder::AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
pxAssert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
if (sp.colorAttachmentCount == 0)
sp.pColorAttachments = &ar;
sp.colorAttachmentCount++;
}
void Vulkan::RenderPassBuilder::AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
pxAssert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
sp.pDepthStencilAttachment = &ar;
}
Vulkan::BufferViewBuilder::BufferViewBuilder()
{
Clear();
}
void Vulkan::BufferViewBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
}
VkBufferView Vulkan::BufferViewBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkBufferView bv;
VkResult res = vkCreateBufferView(device, &m_ci, nullptr, &bv);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBufferView() failed: ");
return VK_NULL_HANDLE;
}
return bv;
}
void Vulkan::BufferViewBuilder::Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size)
{
m_ci.buffer = buffer;
m_ci.format = format;
m_ci.offset = offset;
m_ci.range = size;
}

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -14,8 +14,9 @@
*/
#pragma once
#include "common/Pcsx2Defs.h"
#include "common/Vulkan/Loader.h"
#include "GS/Renderers/Vulkan/VKLoader.h"
#include <array>
namespace Vulkan
@ -231,8 +232,6 @@ namespace Vulkan
void AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddImageDescriptorWrites(VkDescriptorSet set, u32 binding, const VkImageView* views, u32 num_views,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler);
void AddSamplerDescriptorWrites(VkDescriptorSet set, u32 binding, const VkSampler* samplers, u32 num_samplers);
void AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,410 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "GS/Renderers/Vulkan/VKStreamBuffer.h"
#include "common/ReadbackSpinManager.h"
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
struct WindowInfo;
class VKSwapChain;
class VKContext
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 3,
TEXTURE_BUFFER_SIZE = 64 * 1024 * 1024,
};
struct OptionalExtensions
{
bool vk_ext_provoking_vertex : 1;
bool vk_ext_memory_budget : 1;
bool vk_ext_calibrated_timestamps : 1;
bool vk_ext_line_rasterization : 1;
bool vk_ext_rasterization_order_attachment_access : 1;
bool vk_ext_full_screen_exclusive : 1;
bool vk_khr_driver_properties : 1;
bool vk_khr_fragment_shader_barycentric : 1;
bool vk_khr_shader_draw_parameters : 1;
};
~VKContext();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(
const WindowInfo& wi, bool enable_debug_utils, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<std::pair<VkPhysicalDevice, std::string>>;
static GPUList EnumerateGPUs(VkInstance instance);
// Creates a new context and sets it up as global.
static bool Create(VkInstance instance, VkSurfaceKHR surface, VkPhysicalDevice physical_device,
bool threaded_presentation, bool enable_debug_utils, bool enable_validation_layer);
// Destroys context.
static void Destroy();
// Enable/disable debug message runtime.
bool EnableDebugUtils();
void DisableDebugUtils();
// Global state accessors
__fi VkInstance GetVulkanInstance() const { return m_instance; }
__fi VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
__fi VkDevice GetDevice() const { return m_device; }
__fi VmaAllocator GetAllocator() const { return m_allocator; }
__fi VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
__fi u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
__fi VkQueue GetPresentQueue() const { return m_present_queue; }
__fi u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
__fi const VkQueueFamilyProperties& GetGraphicsQueueProperties() const { return m_graphics_queue_properties; }
__fi const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
__fi const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
__fi const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
__fi const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
__fi const VkPhysicalDeviceDriverProperties& GetDeviceDriverProperties() const { return m_device_driver_properties; }
__fi const OptionalExtensions& GetOptionalExtensions() const { return m_optional_extensions; }
// Helpers for getting constants
__fi u32 GetUniformBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minUniformBufferOffsetAlignment);
}
__fi u32 GetTexelBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minTexelBufferOffsetAlignment);
}
__fi u32 GetStorageBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minStorageBufferOffsetAlignment);
}
__fi u32 GetBufferImageGranularity() const
{
return static_cast<u32>(m_device_properties.limits.bufferImageGranularity);
}
__fi u32 GetBufferCopyOffsetAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment);
}
__fi u32 GetBufferCopyRowPitchAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment);
}
__fi u32 GetMaxImageDimension2D() const
{
return m_device_properties.limits.maxImageDimension2D;
}
// Creates a simple render pass.
__ri VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format,
VkAttachmentLoadOp color_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp color_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp depth_load_op = VK_ATTACHMENT_LOAD_OP_LOAD,
VkAttachmentStoreOp depth_store_op = VK_ATTACHMENT_STORE_OP_STORE,
VkAttachmentLoadOp stencil_load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VkAttachmentStoreOp stencil_store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE,
bool color_feedback_loop = false, bool depth_sampling = false)
{
RenderPassCacheKey key = {};
key.color_format = color_format;
key.depth_format = depth_format;
key.color_load_op = color_load_op;
key.color_store_op = color_store_op;
key.depth_load_op = depth_load_op;
key.depth_store_op = depth_store_op;
key.stencil_load_op = stencil_load_op;
key.stencil_store_op = stencil_store_op;
key.color_feedback_loop = color_feedback_loop;
key.depth_sampling = depth_sampling;
auto it = m_render_pass_cache.find(key.key);
if (it != m_render_pass_cache.end())
return it->second;
return CreateCachedRenderPass(key);
}
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
__fi u32 GetCurrentCommandBufferIndex() const { return m_current_frame; }
__fi VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; }
__fi VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
__fi VKStreamBuffer& GetTextureUploadBuffer() { return m_texture_upload_buffer; }
__fi VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
VkCommandBuffer GetCurrentInitCommandBuffer();
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocatePersistentDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreeGlobalDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
__fi VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
void SubmitCommandBuffer(VKSwapChain* present_swap_chain = nullptr, bool submit_on_thread = false);
void MoveToNextCommandBuffer();
enum class WaitType
{
None,
Sleep,
Spin,
};
void ExecuteCommandBuffer(WaitType wait_for_completion);
void WaitForPresentComplete();
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
bool CheckLastSubmitFail();
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object);
void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageDestruction(VkImage object, VmaAllocation allocation);
void DeferImageViewDestruction(VkImageView object);
void DeferPipelineDestruction(VkPipeline pipeline);
void DeferSamplerDestruction(VkSampler sampler);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
float GetAndResetAccumulatedGPUTime();
bool SetEnableGPUTiming(bool enabled);
void CountRenderPass() { m_command_buffer_render_passes++; }
void NotifyOfReadback();
// Allocates a temporary CPU staging buffer, fires the callback with it to populate, then copies to a GPU buffer.
bool AllocatePreinitializedGPUBuffer(u32 size, VkBuffer* gpu_buffer, VmaAllocation* gpu_allocation,
VkBufferUsageFlags gpu_usage, const std::function<void(void*)>& fill_callback);
private:
VKContext(VkInstance instance, VkPhysicalDevice physical_device);
union RenderPassCacheKey
{
struct
{
u32 color_format : 8;
u32 depth_format : 8;
u32 color_load_op : 2;
u32 color_store_op : 1;
u32 depth_load_op : 2;
u32 depth_store_op : 1;
u32 stencil_load_op : 2;
u32 stencil_store_op : 1;
u32 color_feedback_loop : 1;
u32 depth_sampling : 1;
};
u32 key;
};
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(
ExtensionList* extension_list, const WindowInfo& wi, bool enable_debug_utils);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures(const VkPhysicalDeviceFeatures* required_features);
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer, const char** required_device_extensions,
u32 num_required_device_extensions, const char** required_device_layers, u32 num_required_device_layers,
const VkPhysicalDeviceFeatures* required_features);
void ProcessDeviceExtensions();
bool CreateAllocator();
void DestroyAllocator();
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateGlobalDescriptorPool();
void DestroyGlobalDescriptorPool();
bool CreateTextureStreamBuffer();
VkRenderPass CreateCachedRenderPass(RenderPassCacheKey key);
void DestroyRenderPassCache();
void CommandBufferCompleted(u32 index);
void ActivateCommandBuffer(u32 index);
void ScanForCommandBufferCompletion();
void WaitForCommandBufferCompletion(u32 index);
void DoSubmitCommandBuffer(u32 index, VKSwapChain* present_swap_chain, u32 spin_cycles);
void DoPresent(VKSwapChain* present_swap_chain);
void WaitForPresentComplete(std::unique_lock<std::mutex>& lock);
void PresentThread();
void StartPresentThread();
void StopPresentThread();
bool InitSpinResources();
void DestroySpinResources();
void WaitForSpinCompletion(u32 index);
void SpinCommandCompleted(u32 index);
void SubmitSpinCommand(u32 index, u32 cycles);
void CalibrateSpinTimestamp();
u64 GetCPUTimestamp();
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
std::array<VkCommandBuffer, 2> command_buffers{VK_NULL_HANDLE, VK_NULL_HANDLE};
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
s32 spin_id = -1;
u32 submit_timestamp = 0;
bool init_buffer_used = false;
bool needs_fence_wait = false;
bool timestamp_written = false;
std::vector<std::function<void()>> cleanup_resources;
};
struct SpinResources
{
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkSemaphore semaphore = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u32 cycles = 0;
bool in_progress = false;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VmaAllocator m_allocator = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
u32 m_present_queue_family_index = 0;
ReadbackSpinManager m_spin_manager;
VkQueue m_spin_queue = VK_NULL_HANDLE;
VkDescriptorSetLayout m_spin_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_spin_pipeline_layout = VK_NULL_HANDLE;
VkPipeline m_spin_pipeline = VK_NULL_HANDLE;
VkBuffer m_spin_buffer = VK_NULL_HANDLE;
VmaAllocation m_spin_buffer_allocation = VK_NULL_HANDLE;
VkDescriptorSet m_spin_descriptor_set = VK_NULL_HANDLE;
std::array<SpinResources, NUM_COMMAND_BUFFERS> m_spin_resources;
#ifdef _WIN32
double m_queryperfcounter_to_ns = 0;
#endif
double m_spin_timestamp_scale = 0;
double m_spin_timestamp_offset = 0;
u32 m_spin_queue_family_index = 0;
u32 m_command_buffer_render_passes = 0;
u32 m_spin_timer = 0;
bool m_spinning_supported = false;
bool m_spin_queue_is_graphics_queue = false;
bool m_spin_buffer_initialized = false;
VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
bool m_gpu_timing_supported = false;
bool m_wants_new_timestamp_calibration = false;
VkTimeDomainEXT m_calibrated_timestamp_type = VK_TIME_DOMAIN_DEVICE_EXT;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame = 0;
VKStreamBuffer m_texture_upload_buffer;
std::atomic_bool m_last_submit_failed{false};
std::atomic_bool m_last_present_failed{false};
std::atomic_bool m_present_done{true};
std::mutex m_present_mutex;
std::condition_variable m_present_queued_cv;
std::condition_variable m_present_done_cv;
std::thread m_present_thread;
std::atomic_bool m_present_thread_done{false};
struct QueuedPresent
{
VKSwapChain* swap_chain;
u32 command_buffer_index;
u32 spin_cycles;
};
QueuedPresent m_queued_present = {};
std::map<u32, VkRenderPass> m_render_pass_cache;
VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {};
OptionalExtensions m_optional_extensions = {};
};
extern std::unique_ptr<VKContext> g_vulkan_context;

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -25,7 +25,7 @@ extern "C" {
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) extern PFN_##name pcsx2_##name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) extern PFN_##name pcsx2_##name;
#define VULKAN_DEFINE_NAME_PREFIX pcsx2_
#include "EntryPoints.inl"
#include "VKEntryPoints.inl"
#undef VULKAN_DEFINE_NAME_PREFIX
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT

View File

@ -0,0 +1,255 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "GS/Renderers/Vulkan/VKLoader.h"
#include <atomic>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#ifndef _WIN32
#include <dlfcn.h>
#endif
#ifdef __APPLE__
#include <mach-o/dyld.h>
#endif
extern "C" {
#define VULKAN_MODULE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) PFN_##name pcsx2_##name;
#include "VKEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
}
void Vulkan::ResetVulkanLibraryFunctionPointers()
{
#define VULKAN_MODULE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) pcsx2_##name = nullptr;
#include "VKEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
}
#if defined(_WIN32)
static HMODULE vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool Vulkan::LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
vulkan_module = LoadLibraryA("vulkan-1.dll");
if (!vulkan_module)
{
std::fprintf(stderr, "Failed to load vulkan-1.dll\n");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](FARPROC* func_ptr, const char* name, bool is_required) {
*func_ptr = GetProcAddress(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast<FARPROC*>(&name), #name, required);
#include "VKEntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void Vulkan::UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
}
#else
static void* vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool Vulkan::LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
#if defined(__APPLE__)
// Check if a path to a specific Vulkan library has been specified.
char* libvulkan_env = getenv("LIBVULKAN_PATH");
if (libvulkan_env)
vulkan_module = dlopen(libvulkan_env, RTLD_NOW);
if (!vulkan_module)
{
unsigned path_size = 0;
_NSGetExecutablePath(nullptr, &path_size);
std::string path;
path.resize(path_size);
if (_NSGetExecutablePath(path.data(), &path_size) == 0)
{
path[path_size] = 0;
size_t pos = path.rfind('/');
if (pos != std::string::npos)
{
path.erase(pos);
path += "/../Frameworks/libvulkan.dylib";
vulkan_module = dlopen(path.c_str(), RTLD_NOW);
if (!vulkan_module)
{
path.erase(pos);
path += "/../Frameworks/libMoltenVK.dylib";
vulkan_module = dlopen(path.c_str(), RTLD_NOW);
}
}
}
}
if (!vulkan_module)
{
vulkan_module = dlopen("libvulkan.dylib", RTLD_NOW);
if (!vulkan_module)
vulkan_module = dlopen("libMoltenVK.dylib", RTLD_NOW);
}
#else
// Names of libraries to search. Desktop should use libvulkan.so.1 or libvulkan.so.
static const char* search_lib_names[] = {"libvulkan.so.1", "libvulkan.so"};
for (size_t i = 0; i < sizeof(search_lib_names) / sizeof(search_lib_names[0]); i++)
{
vulkan_module = dlopen(search_lib_names[i], RTLD_NOW);
if (vulkan_module)
break;
}
#endif
if (!vulkan_module)
{
std::fprintf(stderr, "Failed to load or locate libvulkan.so\n");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](void** func_ptr, const char* name, bool is_required) {
*func_ptr = dlsym(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast<void**>(&name), #name, required);
#include "VKEntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void Vulkan::UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
}
#endif
bool Vulkan::LoadVulkanInstanceFunctions(VkInstance instance)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetInstanceProcAddr(instance, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required instance function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "VKEntryPoints.inl"
#undef VULKAN_INSTANCE_ENTRY_POINT
return !required_functions_missing;
}
bool Vulkan::LoadVulkanDeviceFunctions(VkDevice device)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetDeviceProcAddr(device, name);
if (!(*func_ptr) && is_required)
{
std::fprintf(stderr, "Vulkan: Failed to load required device function %s\n", name);
required_functions_missing = true;
}
};
#define VULKAN_DEVICE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "VKEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
return !required_functions_missing;
}

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -93,7 +93,7 @@
#endif
#include "EntryPoints.h"
#include "VKEntryPoints.h"
// We include vk_mem_alloc globally, so we don't accidentally include it before the vulkan header somewhere.
#ifdef __clang__

View File

@ -0,0 +1,654 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKShaderCache.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/GS.h"
#include "Config.h"
#include "ShaderCacheVersion.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/FileSystem.h"
#include "common/MD5Digest.h"
#include "common/Path.h"
// glslang includes
#include "SPIRV/GlslangToSpv.h"
#include "StandAlone/ResourceLimits.h"
#include "glslang/Public/ShaderLang.h"
#include "fmt/format.h"
#include <cstring>
#include <fstream>
#include <memory>
// TODO: store the driver version and stuff in the shader header
std::unique_ptr<VKShaderCache> g_vulkan_shader_cache;
#pragma pack(push, 4)
struct VK_PIPELINE_CACHE_HEADER
{
u32 header_length;
u32 header_version;
u32 vendor_id;
u32 device_id;
u8 uuid[VK_UUID_SIZE];
};
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
{
if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Console.Error("Pipeline cache failed validation: Invalid header length");
return false;
}
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
{
Console.Error("Pipeline cache failed validation: Invalid header version");
return false;
}
if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID)
{
Console.Error("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)",
header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID);
return false;
}
if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID)
{
Console.Error("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)",
header.device_id, g_vulkan_context->GetDeviceProperties().deviceID);
return false;
}
if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
{
Console.Error("Pipeline cache failed validation: Incorrect UUID");
return false;
}
return true;
}
static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
{
header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID;
header->device_id = g_vulkan_context->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
}
static unsigned s_next_bad_shader_id = 1;
static bool s_glslang_initialized = false;
// Registers itself for cleanup via atexit
static bool InitializeGlslang()
{
if (s_glslang_initialized)
return true;
if (!glslang::InitializeProcess())
{
pxFailRel("Failed to initialize glslang shader compiler");
return false;
}
std::atexit(&glslang::FinalizeProcess);
s_glslang_initialized = true;
return true;
}
std::optional<VKShaderCache::SPIRVCodeVector> VKShaderCache::CompileShaderToSPV(
u32 stage, std::string_view source, bool debug)
{
if (!InitializeGlslang())
return std::nullopt;
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(static_cast<EShLanguage>(stage));
std::unique_ptr<glslang::TProgram> program;
glslang::TShader::ForbidIncluder includer;
const EProfile profile = ECoreProfile;
const EShMessages messages =
static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules | (debug ? EShMsgDebugInfo : 0));
const int default_version = 450;
std::string full_source_code;
const char* pass_source_code = source.data();
int pass_source_code_length = static_cast<int>(source.size());
shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1);
auto DumpBadShader = [&](const char* msg) {
const std::string filename =
Path::Combine(EmuFolders::Logs, fmt::format("pcsx2_bad_shader_{}.txt", s_next_bad_shader_id++));
Console.Error("CompileShaderToSPV: %s, writing to %s", msg, filename.c_str());
std::ofstream ofs(filename, std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs << source;
ofs << "\n";
ofs << msg << std::endl;
ofs << "Shader Info Log:" << std::endl;
ofs << shader->getInfoLog() << std::endl;
ofs << shader->getInfoDebugLog() << std::endl;
if (program)
{
ofs << "Program Info Log:" << std::endl;
ofs << program->getInfoLog() << std::endl;
ofs << program->getInfoDebugLog() << std::endl;
}
ofs.close();
}
};
if (!shader->parse(&glslang::DefaultTBuiltInResource, default_version, profile, false, true, messages, includer))
{
DumpBadShader("Failed to parse shader");
return std::nullopt;
}
// Even though there's only a single shader, we still need to link it to generate SPV
program = std::make_unique<glslang::TProgram>();
program->addShader(shader.get());
if (!program->link(messages))
{
DumpBadShader("Failed to link program");
return std::nullopt;
}
glslang::TIntermediate* intermediate = program->getIntermediate(static_cast<EShLanguage>(stage));
if (!intermediate)
{
DumpBadShader("Failed to generate SPIR-V");
return std::nullopt;
}
SPIRVCodeVector out_code;
spv::SpvBuildLogger logger;
glslang::SpvOptions options;
options.generateDebugInfo = debug;
glslang::GlslangToSpv(*intermediate, out_code, &logger, &options);
// Write out messages
if (std::strlen(shader->getInfoLog()) > 0)
Console.Warning("Shader info log: %s", shader->getInfoLog());
if (std::strlen(shader->getInfoDebugLog()) > 0)
Console.Warning("Shader debug info log: %s", shader->getInfoDebugLog());
if (std::strlen(program->getInfoLog()) > 0)
Console.Warning("Program info log: %s", program->getInfoLog());
if (std::strlen(program->getInfoDebugLog()) > 0)
Console.Warning("Program debug info log: %s", program->getInfoDebugLog());
std::string spv_messages = logger.getAllMessages();
if (!spv_messages.empty())
Console.Warning("SPIR-V conversion messages: %s", spv_messages.c_str());
return out_code;
}
VKShaderCache::VKShaderCache() = default;
VKShaderCache::~VKShaderCache()
{
CloseShaderCache();
FlushPipelineCache();
ClosePipelineCache();
}
bool VKShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && shader_type == key.shader_type);
}
bool VKShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || shader_type != key.shader_type);
}
bool VKShaderCache::Create()
{
pxAssert(!g_vulkan_shader_cache);
g_vulkan_shader_cache.reset(new VKShaderCache());
g_vulkan_shader_cache->Open();
return true;
}
void VKShaderCache::Destroy()
{
g_vulkan_shader_cache.reset();
}
void VKShaderCache::Open()
{
if (!GSConfig.DisableShaderCache)
{
m_pipeline_cache_filename = GetPipelineCacheBaseFileName(GSConfig.UseDebugDevice);
const std::string base_filename = GetShaderCacheBaseFileName(GSConfig.UseDebugDevice);
const std::string index_filename = base_filename + ".idx";
const std::string blob_filename = base_filename + ".bin";
if (!ReadExistingShaderCache(index_filename, blob_filename))
CreateNewShaderCache(index_filename, blob_filename);
if (!ReadExistingPipelineCache())
CreateNewPipelineCache();
}
else
{
CreateNewPipelineCache();
}
}
VkPipelineCache VKShaderCache::GetPipelineCache(bool set_dirty /*= true*/)
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return VK_NULL_HANDLE;
m_pipeline_cache_dirty |= set_dirty;
return m_pipeline_cache;
}
bool VKShaderCache::CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Console.Warning("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFilePath(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Console.Warning("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFilePath(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Console.Error("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 file_version = SHADER_CACHE_VERSION;
VK_PIPELINE_CACHE_HEADER header;
FillPipelineCacheHeader(&header);
if (std::fwrite(&file_version, sizeof(file_version), 1, m_index_file) != 1 ||
std::fwrite(&header, sizeof(header), 1, m_index_file) != 1)
{
Console.Error("Failed to write header to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFilePath(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Console.Error("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFilePath(index_filename.c_str());
return false;
}
return true;
}
bool VKShaderCache::ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
{
// special case here: when there's a sharing violation (i.e. two instances running),
// we don't want to blow away the cache. so just continue without a cache.
if (errno == EACCES)
{
Console.WriteLn("Failed to open shader cache index with EACCES, are you running two instances?");
return true;
}
return false;
}
u32 file_version = 0;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != SHADER_CACHE_VERSION)
{
Console.Error("Bad file/data version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
VK_PIPELINE_CACHE_HEADER header;
if (std::fread(&header, sizeof(header), 1, m_index_file) != 1 || !ValidatePipelineCacheHeader(header))
{
Console.Error("Mismatched pipeline cache header in '%s' (GPU/driver changed?)", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Console.Error("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Console.Error("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length, entry.shader_type};
const CacheIndexData data{entry.file_offset, entry.blob_size};
m_index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(m_index_file, 0, SEEK_END);
Console.WriteLn("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
void VKShaderCache::CloseShaderCache()
{
if (m_index_file)
{
std::fclose(m_index_file);
m_index_file = nullptr;
}
if (m_blob_file)
{
std::fclose(m_blob_file);
m_blob_file = nullptr;
}
}
bool VKShaderCache::CreateNewPipelineCache()
{
if (!m_pipeline_cache_filename.empty() && FileSystem::FileExists(m_pipeline_cache_filename.c_str()))
{
Console.Warning("Removing existing pipeline cache '%s'", m_pipeline_cache_filename.c_str());
FileSystem::DeleteFilePath(m_pipeline_cache_filename.c_str());
}
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
m_pipeline_cache_dirty = true;
return true;
}
bool VKShaderCache::ReadExistingPipelineCache()
{
std::optional<std::vector<u8>> data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str());
if (!data.has_value())
return false;
if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Console.Error("Pipeline cache at '%s' is too small", m_pipeline_cache_filename.c_str());
return false;
}
VK_PIPELINE_CACHE_HEADER header;
std::memcpy(&header, data->data(), sizeof(header));
if (!ValidatePipelineCacheHeader(header))
return false;
const VkPipelineCacheCreateInfo ci{
VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(), data->data()};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
return true;
}
bool VKShaderCache::FlushPipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE || !m_pipeline_cache_dirty || m_pipeline_cache_filename.empty())
return false;
size_t data_size;
VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
return false;
}
std::vector<u8> data(data_size);
res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
return false;
}
data.resize(data_size);
// Save disk writes if it hasn't changed, think of the poor SSDs.
FILESYSTEM_STAT_DATA sd;
if (!FileSystem::StatFile(m_pipeline_cache_filename.c_str(), &sd) || sd.Size != static_cast<s64>(data_size))
{
Console.WriteLn("Writing %zu bytes to '%s'", data_size, m_pipeline_cache_filename.c_str());
if (!FileSystem::WriteBinaryFile(m_pipeline_cache_filename.c_str(), data.data(), data.size()))
{
Console.Error("Failed to write pipeline cache to '%s'", m_pipeline_cache_filename.c_str());
return false;
}
}
else
{
Console.WriteLn("Skipping updating pipeline cache '%s' due to no changes.", m_pipeline_cache_filename.c_str());
}
m_pipeline_cache_dirty = false;
return true;
}
void VKShaderCache::ClosePipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return;
vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr);
m_pipeline_cache = VK_NULL_HANDLE;
}
std::string VKShaderCache::GetShaderCacheBaseFileName(bool debug)
{
std::string base_filename = "vulkan_shaders";
if (debug)
base_filename += "_debug";
return Path::Combine(EmuFolders::Cache, base_filename);
}
std::string VKShaderCache::GetPipelineCacheBaseFileName(bool debug)
{
std::string base_filename = "vulkan_pipelines";
if (debug)
base_filename += "_debug";
base_filename += ".bin";
return Path::Combine(EmuFolders::Cache, base_filename);
}
VKShaderCache::CacheIndexKey VKShaderCache::GetCacheKey(u32 type, const std::string_view& shader_code)
{
union HashParts
{
struct
{
u64 hash_low;
u64 hash_high;
};
u8 hash[16];
};
HashParts h;
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(h.hash);
return CacheIndexKey{h.hash_low, h.hash_high, static_cast<u32>(shader_code.length()), type};
}
std::optional<VKShaderCache::SPIRVCodeVector> VKShaderCache::GetShaderSPV(u32 type, std::string_view shader_code)
{
const auto key = GetCacheKey(type, shader_code);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddShaderSPV(key, shader_code);
std::optional<SPIRVCodeVector> spv = SPIRVCodeVector(iter->second.blob_size);
if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(spv->data(), sizeof(SPIRVCodeType), iter->second.blob_size, m_blob_file) != iter->second.blob_size)
{
Console.Error("Read blob from file failed, recompiling");
spv = CompileShaderToSPV(type, shader_code, GSConfig.UseDebugDevice);
}
return spv;
}
VkShaderModule VKShaderCache::GetShaderModule(u32 type, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = GetShaderSPV(type, shader_code);
if (!spv.has_value())
return VK_NULL_HANDLE;
const VkShaderModuleCreateInfo ci{
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0, spv->size() * sizeof(SPIRVCodeType), spv->data()};
VkShaderModule mod;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: ");
return VK_NULL_HANDLE;
}
return mod;
}
VkShaderModule VKShaderCache::GetVertexShader(std::string_view shader_code)
{
return GetShaderModule(EShLangVertex, std::move(shader_code));
}
VkShaderModule VKShaderCache::GetFragmentShader(std::string_view shader_code)
{
return GetShaderModule(EShLangFragment, std::move(shader_code));
}
VkShaderModule VKShaderCache::GetComputeShader(std::string_view shader_code)
{
return GetShaderModule(EShLangCompute, std::move(shader_code));
}
std::optional<VKShaderCache::SPIRVCodeVector> VKShaderCache::CompileAndAddShaderSPV(
const CacheIndexKey& key, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = CompileShaderToSPV(key.shader_type, shader_code, GSConfig.UseDebugDevice);
if (!spv.has_value())
return {};
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return spv;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(spv->size());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.shader_type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(spv->data(), sizeof(SPIRVCodeType), entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Console.Error("Failed to write shader blob to file");
return spv;
}
m_index.emplace(key, data);
return spv;
}

View File

@ -0,0 +1,114 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "common/HashCombine.h"
#include <cstdio>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
class VKShaderCache
{
public:
~VKShaderCache();
static bool Create();
static void Destroy();
/// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally.
VkPipelineCache GetPipelineCache(bool set_dirty = true);
/// Writes pipeline cache to file, saving all newly compiled pipelines.
bool FlushPipelineCache();
VkShaderModule GetVertexShader(std::string_view shader_code);
VkShaderModule GetFragmentShader(std::string_view shader_code);
VkShaderModule GetComputeShader(std::string_view shader_code);
private:
// SPIR-V compiled code type
using SPIRVCodeType = u32;
using SPIRVCodeVector = std::vector<SPIRVCodeType>;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
HashCombine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
VKShaderCache();
static std::string GetShaderCacheBaseFileName(bool debug);
static std::string GetPipelineCacheBaseFileName(bool debug);
static CacheIndexKey GetCacheKey(u32 type, const std::string_view& shader_code);
static std::optional<VKShaderCache::SPIRVCodeVector> CompileShaderToSPV(
u32 stage, std::string_view source, bool debug);
void Open();
bool CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename);
bool ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename);
void CloseShaderCache();
bool CreateNewPipelineCache();
bool ReadExistingPipelineCache();
void ClosePipelineCache();
std::optional<SPIRVCodeVector> GetShaderSPV(u32 type, std::string_view shader_code);
std::optional<SPIRVCodeVector> CompileAndAddShaderSPV(const CacheIndexKey& key, std::string_view shader_code);
VkShaderModule GetShaderModule(u32 type, std::string_view shader_code);
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
std::string m_pipeline_cache_filename;
CacheIndex m_index;
VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE;
bool m_pipeline_cache_dirty = false;
};
extern std::unique_ptr<VKShaderCache> g_vulkan_shader_cache;

View File

@ -0,0 +1,322 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKStreamBuffer.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "common/Align.h"
#include "common/Assertions.h"
#include "common/Console.h"
VKStreamBuffer::VKStreamBuffer() = default;
VKStreamBuffer::VKStreamBuffer(VKStreamBuffer&& move)
: m_size(move.m_size)
, m_current_offset(move.m_current_offset)
, m_current_space(move.m_current_space)
, m_current_gpu_position(move.m_current_gpu_position)
, m_allocation(move.m_allocation)
, m_buffer(move.m_buffer)
, m_host_pointer(move.m_host_pointer)
, m_tracked_fences(std::move(move.m_tracked_fences))
{
move.m_size = 0;
move.m_current_offset = 0;
move.m_current_space = 0;
move.m_current_gpu_position = 0;
move.m_allocation = VK_NULL_HANDLE;
move.m_buffer = VK_NULL_HANDLE;
move.m_host_pointer = nullptr;
}
VKStreamBuffer::~VKStreamBuffer()
{
if (IsValid())
Destroy(true);
}
VKStreamBuffer& VKStreamBuffer::operator=(VKStreamBuffer&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_size, move.m_size);
std::swap(m_current_offset, move.m_current_offset);
std::swap(m_current_space, move.m_current_space);
std::swap(m_current_gpu_position, move.m_current_gpu_position);
std::swap(m_buffer, move.m_buffer);
std::swap(m_host_pointer, move.m_host_pointer);
std::swap(m_tracked_fences, move.m_tracked_fences);
return *this;
}
bool VKStreamBuffer::Create(VkBufferUsageFlags usage, u32 size)
{
const VkBufferCreateInfo bci = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, nullptr, 0, static_cast<VkDeviceSize>(size),
usage, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr};
VmaAllocationCreateInfo aci = {};
aci.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
aci.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
aci.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VmaAllocationInfo ai = {};
VkBuffer new_buffer = VK_NULL_HANDLE;
VmaAllocation new_allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &new_buffer, &new_allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
if (IsValid())
Destroy(true);
// Replace with the new buffer
m_size = size;
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
m_allocation = new_allocation;
m_buffer = new_buffer;
m_host_pointer = static_cast<u8*>(ai.pMappedData);
return true;
}
void VKStreamBuffer::Destroy(bool defer)
{
if (m_buffer != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferBufferDestruction(m_buffer, m_allocation);
else
vmaDestroyBuffer(g_vulkan_context->GetAllocator(), m_buffer, m_allocation);
}
m_size = 0;
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
m_buffer = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
m_host_pointer = nullptr;
}
bool VKStreamBuffer::ReserveMemory(u32 num_bytes, u32 alignment)
{
const u32 required_bytes = num_bytes + alignment;
// Check for sane allocations
if (required_bytes > m_size)
{
Console.Error("Attempting to allocate %u bytes from a %u byte stream buffer", static_cast<u32>(num_bytes),
static_cast<u32>(m_size));
pxFailRel("Stream buffer overflow");
return false;
}
UpdateGPUPosition();
// Is the GPU behind or up to date with our current offset?
if (m_current_offset >= m_current_gpu_position)
{
const u32 remaining_bytes = m_size - m_current_offset;
if (required_bytes <= remaining_bytes)
{
// Place at the current position, after the GPU position.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_size - m_current_offset;
return true;
}
// Check for space at the start of the buffer
// We use < here because we don't want to have the case of m_current_offset ==
// m_current_gpu_position. That would mean the code above would assume the
// GPU has caught up to us, which it hasn't.
if (required_bytes < m_current_gpu_position)
{
// Reset offset to zero, since we're allocating behind the gpu now
m_current_offset = 0;
m_current_space = m_current_gpu_position - 1;
return true;
}
}
// Is the GPU ahead of our current offset?
if (m_current_offset < m_current_gpu_position)
{
// We have from m_current_offset..m_current_gpu_position space to use.
const u32 remaining_bytes = m_current_gpu_position - m_current_offset;
if (required_bytes < remaining_bytes)
{
// Place at the current position, since this is still behind the GPU.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_current_gpu_position - m_current_offset - 1;
return true;
}
}
// Can we find a fence to wait on that will give us enough memory?
if (WaitForClearSpace(required_bytes))
{
const u32 align_diff = Common::AlignUp(m_current_offset, alignment) - m_current_offset;
m_current_offset += align_diff;
m_current_space -= align_diff;
return true;
}
// We tried everything we could, and still couldn't get anything. This means that too much space
// in the buffer is being used by the command buffer currently being recorded. Therefore, the
// only option is to execute it, and wait until it's done.
return false;
}
void VKStreamBuffer::CommitMemory(u32 final_num_bytes)
{
pxAssert((m_current_offset + final_num_bytes) <= m_size);
pxAssert(final_num_bytes <= m_current_space);
// For non-coherent mappings, flush the memory range
vmaFlushAllocation(g_vulkan_context->GetAllocator(), m_allocation, m_current_offset, final_num_bytes);
m_current_offset += final_num_bytes;
m_current_space -= final_num_bytes;
UpdateCurrentFencePosition();
}
void VKStreamBuffer::UpdateCurrentFencePosition()
{
// Has the offset changed since the last fence?
const u64 counter = g_vulkan_context->GetCurrentFenceCounter();
if (!m_tracked_fences.empty() && m_tracked_fences.back().first == counter)
{
// Still haven't executed a command buffer, so just update the offset.
m_tracked_fences.back().second = m_current_offset;
return;
}
// New buffer, so update the GPU position while we're at it.
m_tracked_fences.emplace_back(counter, m_current_offset);
}
void VKStreamBuffer::UpdateGPUPosition()
{
auto start = m_tracked_fences.begin();
auto end = start;
const u64 completed_counter = g_vulkan_context->GetCompletedFenceCounter();
while (end != m_tracked_fences.end() && completed_counter >= end->first)
{
m_current_gpu_position = end->second;
++end;
}
if (start != end)
{
m_tracked_fences.erase(start, end);
if (m_current_offset == m_current_gpu_position)
{
// GPU is all caught up now.
m_current_offset = 0;
m_current_gpu_position = 0;
m_current_space = m_size;
}
}
}
bool VKStreamBuffer::WaitForClearSpace(u32 num_bytes)
{
u32 new_offset = 0;
u32 new_space = 0;
u32 new_gpu_position = 0;
auto iter = m_tracked_fences.begin();
for (; iter != m_tracked_fences.end(); ++iter)
{
// Would this fence bring us in line with the GPU?
// This is the "last resort" case, where a command buffer execution has been forced
// after no additional data has been written to it, so we can assume that after the
// fence has been signaled the entire buffer is now consumed.
u32 gpu_position = iter->second;
if (m_current_offset == gpu_position)
{
new_offset = 0;
new_space = m_size;
new_gpu_position = 0;
break;
}
// Assuming that we wait for this fence, are we allocating in front of the GPU?
if (m_current_offset > gpu_position)
{
// This would suggest the GPU has now followed us and wrapped around, so we have from
// m_current_position..m_size free, as well as and 0..gpu_position.
const u32 remaining_space_after_offset = m_size - m_current_offset;
if (remaining_space_after_offset >= num_bytes)
{
// Switch to allocating in front of the GPU, using the remainder of the buffer.
new_offset = m_current_offset;
new_space = m_size - m_current_offset;
new_gpu_position = gpu_position;
break;
}
// We can wrap around to the start, behind the GPU, if there is enough space.
// We use > here because otherwise we'd end up lining up with the GPU, and then the
// allocator would assume that the GPU has consumed what we just wrote.
if (gpu_position > num_bytes)
{
new_offset = 0;
new_space = gpu_position - 1;
new_gpu_position = gpu_position;
break;
}
}
else
{
// We're currently allocating behind the GPU. This would give us between the current
// offset and the GPU position worth of space to work with. Again, > because we can't
// align the GPU position with the buffer offset.
u32 available_space_inbetween = gpu_position - m_current_offset;
if (available_space_inbetween > num_bytes)
{
// Leave the offset as-is, but update the GPU position.
new_offset = m_current_offset;
new_space = available_space_inbetween - 1;
new_gpu_position = gpu_position;
break;
}
}
}
// Did any fences satisfy this condition?
// Has the command buffer been executed yet? If not, the caller should execute it.
if (iter == m_tracked_fences.end() || iter->first == g_vulkan_context->GetCurrentFenceCounter())
return false;
// Wait until this fence is signaled. This will fire the callback, updating the GPU position.
g_vulkan_context->WaitForFenceCounter(iter->first);
m_tracked_fences.erase(
m_tracked_fences.begin(), m_current_offset == iter->second ? m_tracked_fences.end() : ++iter);
m_current_offset = new_offset;
m_current_space = new_space;
m_current_gpu_position = new_gpu_position;
return true;
}

View File

@ -0,0 +1,69 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "vk_mem_alloc.h"
#include <deque>
#include <memory>
class VKStreamBuffer
{
public:
VKStreamBuffer();
VKStreamBuffer(VKStreamBuffer&& move);
VKStreamBuffer(const VKStreamBuffer&) = delete;
~VKStreamBuffer();
VKStreamBuffer& operator=(VKStreamBuffer&& move);
VKStreamBuffer& operator=(const VKStreamBuffer&) = delete;
__fi bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); }
__fi VkBuffer GetBuffer() const { return m_buffer; }
__fi u8* GetHostPointer() const { return m_host_pointer; }
__fi u8* GetCurrentHostPointer() const { return m_host_pointer + m_current_offset; }
__fi u32 GetCurrentSize() const { return m_size; }
__fi u32 GetCurrentSpace() const { return m_current_space; }
__fi u32 GetCurrentOffset() const { return m_current_offset; }
bool Create(VkBufferUsageFlags usage, u32 size);
void Destroy(bool defer);
bool ReserveMemory(u32 num_bytes, u32 alignment);
void CommitMemory(u32 final_num_bytes);
private:
bool AllocateBuffer(VkBufferUsageFlags usage, u32 size);
void UpdateCurrentFencePosition();
void UpdateGPUPosition();
// Waits for as many fences as needed to allocate num_bytes bytes from the buffer.
bool WaitForClearSpace(u32 num_bytes);
u32 m_size = 0;
u32 m_current_offset = 0;
u32 m_current_space = 0;
u32 m_current_gpu_position = 0;
VmaAllocation m_allocation = VK_NULL_HANDLE;
VkBuffer m_buffer = VK_NULL_HANDLE;
u8* m_host_pointer = nullptr;
// List of fences and the corresponding positions in the buffer
std::deque<std::pair<u64, u32>> m_tracked_fences;
};

View File

@ -0,0 +1,613 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKSwapChain.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "common/Assertions.h"
#include "common/CocoaTools.h"
#include "common/Console.h"
#include <algorithm>
#include <array>
#include <cmath>
#if defined(VK_USE_PLATFORM_XLIB_KHR)
#include <X11/Xlib.h>
#endif
VKSwapChain::VKSwapChain(const WindowInfo& wi, VkSurfaceKHR surface, VkPresentModeKHR preferred_present_mode,
std::optional<bool> exclusive_fullscreen_control)
: m_window_info(wi)
, m_surface(surface)
, m_preferred_present_mode(preferred_present_mode)
, m_exclusive_fullscreen_control(exclusive_fullscreen_control)
{
}
VKSwapChain::~VKSwapChain()
{
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
}
VkSurfaceKHR VKSwapChain::CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi)
{
#if defined(VK_USE_PLATFORM_WIN32_KHR)
if (wi->type == WindowInfo::Type::Win32)
{
VkWin32SurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkWin32SurfaceCreateFlagsKHR flags
nullptr, // HINSTANCE hinstance
reinterpret_cast<HWND>(wi->window_handle) // HWND hwnd
};
VkSurfaceKHR surface;
VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_XLIB_KHR)
if (wi->type == WindowInfo::Type::X11)
{
VkXlibSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXlibSurfaceCreateFlagsKHR flags
static_cast<Display*>(wi->display_connection), // Display* dpy
reinterpret_cast<Window>(wi->window_handle) // Window window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
if (wi->type == WindowInfo::Type::Wayland)
{
VkWaylandSurfaceCreateInfoKHR surface_create_info = {VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, nullptr,
0, static_cast<struct wl_display*>(wi->display_connection),
static_cast<struct wl_surface*>(wi->window_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateWaylandSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWaylandSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_METAL_EXT)
if (wi->type == WindowInfo::Type::MacOS)
{
if (!wi->surface_handle && !CocoaTools::CreateMetalLayer(wi))
return VK_NULL_HANDLE;
VkMetalSurfaceCreateInfoEXT surface_create_info = {VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, nullptr, 0,
static_cast<const CAMetalLayer*>(wi->surface_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateMetalSurfaceEXT(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMetalSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#elif defined(VK_USE_PLATFORM_MACOS_MVK)
if (wi->type == WindowInfo::Type::MacOS)
{
VkMacOSSurfaceCreateInfoMVK surface_create_info = {
VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, nullptr, 0, wi->window_handle};
VkSurfaceKHR surface;
VkResult res = vkCreateMacOSSurfaceMVK(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMacOSSurfaceMVK failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
return VK_NULL_HANDLE;
}
void VKSwapChain::DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface)
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), surface, nullptr);
#if defined(__APPLE__)
if (wi->type == WindowInfo::Type::MacOS && wi->surface_handle)
CocoaTools::DestroyMetalLayer(wi);
#endif
}
std::unique_ptr<VKSwapChain> VKSwapChain::Create(const WindowInfo& wi, VkSurfaceKHR surface,
VkPresentModeKHR preferred_present_mode, std::optional<bool> exclusive_fullscreen_control)
{
std::unique_ptr<VKSwapChain> swap_chain = std::unique_ptr<VKSwapChain>(
new VKSwapChain(wi, surface, preferred_present_mode, exclusive_fullscreen_control));
if (!swap_chain->CreateSwapChain() || !swap_chain->SetupSwapChainImages())
return nullptr;
return swap_chain;
}
bool VKSwapChain::SelectSurfaceFormat()
{
u32 format_count;
VkResult res =
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, surface_formats.data());
pxAssert(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
{
m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
// Try to find a suitable format.
for (const VkSurfaceFormatKHR& surface_format : surface_formats)
{
// Some drivers seem to return a SRGB format here (Intel Mesa).
// This results in gamma correction when presenting to the screen, which we don't want.
// Use a linear format instead, if this is the case.
m_surface_format.format = Vulkan::GetLinearFormat(surface_format.format);
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
pxFailRel("Failed to find a suitable format for swap chain buffers.");
return false;
}
bool VKSwapChain::SelectPresentMode()
{
VkResult res;
u32 mode_count;
res = vkGetPhysicalDeviceSurfacePresentModesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, present_modes.data());
pxAssert(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
auto it = std::find_if(present_modes.begin(), present_modes.end(),
[check_mode](VkPresentModeKHR mode) { return check_mode == mode; });
return it != present_modes.end();
};
// Use preferred mode if available.
if (CheckForMode(m_preferred_present_mode))
{
m_present_mode = m_preferred_present_mode;
return true;
}
// Prefer mailbox over fifo for adaptive vsync/no-vsync.
if ((m_preferred_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
m_preferred_present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) &&
CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR))
{
m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
return true;
}
// Fallback to FIFO if we're using any kind of vsync.
if (m_preferred_present_mode == VK_PRESENT_MODE_FIFO_KHR ||
m_preferred_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
{
// This should never fail, FIFO is mandated.
if (CheckForMode(VK_PRESENT_MODE_FIFO_KHR))
{
m_present_mode = VK_PRESENT_MODE_FIFO_KHR;
return true;
}
}
// Fall back to whatever is available.
m_present_mode = present_modes[0];
return true;
}
bool VKSwapChain::CreateSwapChain()
{
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
return false;
}
// Select swap chain format and present mode
if (!SelectSurfaceFormat() || !SelectPresentMode())
return false;
DevCon.WriteLn("(SwapChain) Preferred present mode: %s, selected: %s",
Vulkan::PresentModeToString(m_preferred_present_mode), Vulkan::PresentModeToString(m_present_mode));
// Select number of images in swap chain, we prefer one buffer in the background to work on
u32 image_count = std::max(surface_capabilities.minImageCount + 1u, 2u);
// maxImageCount can be zero, in which case there isn't an upper limit on the number of buffers.
if (surface_capabilities.maxImageCount > 0)
image_count = std::min(image_count, surface_capabilities.maxImageCount);
// Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here
// determines window size?
VkExtent2D size = surface_capabilities.currentExtent;
if (size.width == UINT32_MAX)
{
size.width = m_window_info.surface_width;
size.height = m_window_info.surface_height;
}
size.width =
std::clamp(size.width, surface_capabilities.minImageExtent.width, surface_capabilities.maxImageExtent.width);
size.height =
std::clamp(size.height, surface_capabilities.minImageExtent.height, surface_capabilities.maxImageExtent.height);
// Prefer identity transform if possible
VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR))
transform = surface_capabilities.currentTransform;
VkCompositeAlphaFlagBitsKHR alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
if (!(surface_capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR))
{
// If we only support pre-multiplied/post-multiplied... :/
if (surface_capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
alpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
}
// Select swap chain flags, we only need a colour attachment
VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if ((surface_capabilities.supportedUsageFlags & image_usage) != image_usage)
{
Console.Error("Vulkan: Swap chain does not support usage as color attachment");
return false;
}
// Store the old/current swap chain when recreating for resize
// Old swap chain is destroyed regardless of whether the create call succeeds
VkSwapchainKHR old_swap_chain = m_swap_chain;
m_swap_chain = VK_NULL_HANDLE;
// Now we can actually create the swap chain
VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, nullptr, 0, m_surface,
image_count, m_surface_format.format, m_surface_format.colorSpace, size, 1u, image_usage,
VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, transform, alpha, m_present_mode, VK_TRUE, old_swap_chain};
std::array<uint32_t, 2> indices = {{
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
g_vulkan_context->GetPresentQueueFamilyIndex(),
}};
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex())
{
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swap_chain_info.queueFamilyIndexCount = 2;
swap_chain_info.pQueueFamilyIndices = indices.data();
}
#ifdef _WIN32
VkSurfaceFullScreenExclusiveInfoEXT exclusive_info = {VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT};
if (g_vulkan_context->GetOptionalExtensions().vk_ext_full_screen_exclusive)
{
exclusive_info.fullScreenExclusive =
m_exclusive_fullscreen_control.has_value() ?
(m_exclusive_fullscreen_control.value() ? VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT :
VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT) :
VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT;
Vulkan::AddPointerToChain(&swap_chain_info, &exclusive_info);
}
else if (m_exclusive_fullscreen_control.has_value())
{
Console.Error("Exclusive fullscreen control requested, but VK_EXT_full_screen_exclusive is not supported.");
}
#else
if (m_exclusive_fullscreen_control.has_value())
Console.Error("Exclusive fullscreen control requested, but is not supported on this platform.");
#endif
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
return false;
}
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
m_window_info.surface_width = std::max(1u, size.width);
m_window_info.surface_height = std::max(1u, size.height);
return true;
}
bool VKSwapChain::SetupSwapChainImages()
{
pxAssert(m_images.empty());
u32 image_count;
VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
return false;
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data());
pxAssert(res == VK_SUCCESS);
m_load_render_pass =
g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, VK_ATTACHMENT_LOAD_OP_LOAD);
m_clear_render_pass =
g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, VK_ATTACHMENT_LOAD_OP_CLEAR);
if (m_load_render_pass == VK_NULL_HANDLE || m_clear_render_pass == VK_NULL_HANDLE)
{
pxFailRel("Failed to get swap chain render passes.");
return false;
}
m_images.reserve(image_count);
m_current_image = 0;
for (u32 i = 0; i < image_count; i++)
{
SwapChainImage image;
image.image = images[i];
// Create texture object, which creates a view of the backbuffer
if (!image.texture.Adopt(image.image, VK_IMAGE_VIEW_TYPE_2D, m_window_info.surface_width,
m_window_info.surface_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT))
{
return false;
}
image.framebuffer = image.texture.CreateFramebuffer(m_load_render_pass);
if (image.framebuffer == VK_NULL_HANDLE)
return false;
m_images.emplace_back(std::move(image));
}
m_semaphores.reserve(image_count);
m_current_semaphore = (image_count - 1);
for (u32 i = 0; i < image_count; i++)
{
ImageSemaphores sema;
const VkSemaphoreCreateInfo semaphore_info = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.available_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
return false;
}
res = vkCreateSemaphore(
g_vulkan_context->GetDevice(), &semaphore_info, nullptr, &sema.rendering_finished_semaphore);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
vkDestroySemaphore(g_vulkan_context->GetDevice(), sema.available_semaphore, nullptr);
return false;
}
m_semaphores.push_back(sema);
}
return true;
}
void VKSwapChain::DestroySwapChainImages()
{
for (auto& it : m_images)
{
// Images themselves are cleaned up by the swap chain object
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr);
}
m_images.clear();
for (auto& it : m_semaphores)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.rendering_finished_semaphore, nullptr);
vkDestroySemaphore(g_vulkan_context->GetDevice(), it.available_semaphore, nullptr);
}
m_semaphores.clear();
m_image_acquire_result.reset();
}
void VKSwapChain::DestroySwapChain()
{
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
m_window_info.surface_width = 0;
m_window_info.surface_height = 0;
}
VkResult VKSwapChain::AcquireNextImage()
{
if (m_image_acquire_result.has_value())
return m_image_acquire_result.value();
if (!m_swap_chain)
return VK_ERROR_SURFACE_LOST_KHR;
// Use a different semaphore for each image.
m_current_semaphore = (m_current_semaphore + 1) % static_cast<u32>(m_semaphores.size());
const VkResult res = vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX,
m_semaphores[m_current_semaphore].available_semaphore, VK_NULL_HANDLE, &m_current_image);
m_image_acquire_result = res;
return res;
}
void VKSwapChain::ReleaseCurrentImage()
{
m_image_acquire_result.reset();
}
bool VKSwapChain::ResizeSwapChain(u32 new_width, u32 new_height, float new_scale)
{
DestroySwapChainImages();
if (new_width != 0 && new_height != 0)
{
m_window_info.surface_width = new_width;
m_window_info.surface_height = new_height;
}
m_window_info.surface_scale = new_scale;
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool VKSwapChain::RecreateSwapChain()
{
DestroySwapChainImages();
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool VKSwapChain::SetVSync(VkPresentModeKHR preferred_mode)
{
if (m_preferred_present_mode == preferred_mode)
return true;
// Recreate the swap chain with the new present mode.
m_preferred_present_mode = preferred_mode;
return RecreateSwapChain();
}
bool VKSwapChain::RecreateSurface(const WindowInfo& new_wi)
{
// Destroy the old swap chain, images, and surface.
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
// Re-create the surface with the new native handle
m_window_info = new_wi;
m_surface = CreateVulkanSurface(
g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), &m_window_info);
if (m_surface == VK_NULL_HANDLE)
return false;
// The validation layers get angry at us if we don't call this before creating the swapchain.
VkBool32 present_supported = VK_TRUE;
VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(),
g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
return false;
}
if (!present_supported)
{
pxFailRel("Recreated surface does not support presenting.");
return false;
}
// Finally re-create the swap chain
if (!CreateSwapChain())
return false;
if (!SetupSwapChainImages())
{
DestroySwapChain();
DestroySurface();
return false;
}
return true;
}
void VKSwapChain::DestroySurface()
{
if (m_surface == VK_NULL_HANDLE)
return;
DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, m_surface);
m_surface = VK_NULL_HANDLE;
}

View File

@ -0,0 +1,138 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "GS/Renderers/Vulkan/VKTexture.h"
#include "common/WindowInfo.h"
#include <memory>
#include <optional>
#include <vector>
class VKSwapChain
{
public:
~VKSwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi);
// Destroys a previously-created surface.
static void DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<VKSwapChain> Create(const WindowInfo& wi, VkSurfaceKHR surface,
VkPresentModeKHR preferred_present_mode, std::optional<bool> exclusive_fullscreen_control);
__fi VkSurfaceKHR GetSurface() const { return m_surface; }
__fi VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
__fi VkFormat GetTextureFormat() const { return m_surface_format.format; }
__fi VkPresentModeKHR GetPreferredPresentMode() const { return m_preferred_present_mode; }
__fi VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
__fi const VkSwapchainKHR* GetSwapChainPtr() const { return &m_swap_chain; }
__fi const WindowInfo& GetWindowInfo() const { return m_window_info; }
__fi u32 GetWidth() const { return m_window_info.surface_width; }
__fi u32 GetHeight() const { return m_window_info.surface_height; }
__fi float GetScale() const { return m_window_info.surface_scale; }
__fi u32 GetCurrentImageIndex() const { return m_current_image; }
__fi const u32* GetCurrentImageIndexPtr() const { return &m_current_image; }
__fi u32 GetImageCount() const { return static_cast<u32>(m_images.size()); }
__fi VkImage GetCurrentImage() const { return m_images[m_current_image].image; }
__fi const VKTexture& GetCurrentTexture() const { return m_images[m_current_image].texture; }
__fi VKTexture& GetCurrentTexture() { return m_images[m_current_image].texture; }
__fi VkFramebuffer GetCurrentFramebuffer() const { return m_images[m_current_image].framebuffer; }
__fi VkRenderPass GetLoadRenderPass() const { return m_load_render_pass; }
__fi VkRenderPass GetClearRenderPass() const { return m_clear_render_pass; }
__fi VkSemaphore GetImageAvailableSemaphore() const
{
return m_semaphores[m_current_semaphore].available_semaphore;
}
__fi const VkSemaphore* GetImageAvailableSemaphorePtr() const
{
return &m_semaphores[m_current_semaphore].available_semaphore;
}
__fi VkSemaphore GetRenderingFinishedSemaphore() const
{
return m_semaphores[m_current_semaphore].rendering_finished_semaphore;
}
__fi const VkSemaphore* GetRenderingFinishedSemaphorePtr() const
{
return &m_semaphores[m_current_semaphore].rendering_finished_semaphore;
}
VkResult AcquireNextImage();
void ReleaseCurrentImage();
bool RecreateSurface(const WindowInfo& new_wi);
bool ResizeSwapChain(u32 new_width = 0, u32 new_height = 0, float new_scale = 1.0f);
bool RecreateSwapChain();
// Change vsync enabled state. This may fail as it causes a swapchain recreation.
bool SetVSync(VkPresentModeKHR preferred_mode);
// Returns true if the current present mode is synchronizing (adaptive or hard).
bool IsPresentModeSynchronizing() const
{
return (m_present_mode == VK_PRESENT_MODE_FIFO_KHR || m_present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR);
}
private:
VKSwapChain(const WindowInfo& wi, VkSurfaceKHR surface, VkPresentModeKHR preferred_present_mode,
std::optional<bool> exclusive_fullscreen_control);
bool SelectSurfaceFormat();
bool SelectPresentMode();
bool CreateSwapChain();
void DestroySwapChain();
bool SetupSwapChainImages();
void DestroySwapChainImages();
void DestroySurface();
struct SwapChainImage
{
VkImage image;
VKTexture texture;
VkFramebuffer framebuffer;
};
struct ImageSemaphores
{
VkSemaphore available_semaphore;
VkSemaphore rendering_finished_semaphore;
};
WindowInfo m_window_info;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_preferred_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkRenderPass m_load_render_pass = VK_NULL_HANDLE;
VkRenderPass m_clear_render_pass = VK_NULL_HANDLE;
VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE;
std::vector<SwapChainImage> m_images;
std::vector<ImageSemaphores> m_semaphores;
u32 m_current_image = 0;
u32 m_current_semaphore = 0;
std::optional<VkResult> m_image_acquire_result;
std::optional<bool> m_exclusive_fullscreen_control;
};

View File

@ -0,0 +1,402 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKTexture.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include <algorithm>
static constexpr VkComponentMapping s_identity_swizzle{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
VKTexture::VKTexture() = default;
VKTexture::VKTexture(VKTexture&& move)
: m_width(move.m_width)
, m_height(move.m_height)
, m_levels(move.m_levels)
, m_layers(move.m_layers)
, m_format(move.m_format)
, m_samples(move.m_samples)
, m_view_type(move.m_view_type)
, m_layout(move.m_layout)
, m_image(move.m_image)
, m_allocation(move.m_allocation)
, m_view(move.m_view)
{
move.m_width = 0;
move.m_height = 0;
move.m_levels = 0;
move.m_layers = 0;
move.m_format = VK_FORMAT_UNDEFINED;
move.m_samples = VK_SAMPLE_COUNT_1_BIT;
move.m_view_type = VK_IMAGE_VIEW_TYPE_2D;
move.m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
move.m_image = VK_NULL_HANDLE;
move.m_allocation = VK_NULL_HANDLE;
move.m_view = VK_NULL_HANDLE;
}
VKTexture::~VKTexture()
{
if (IsValid())
Destroy(true);
}
VKTexture& VKTexture::operator=(VKTexture&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_width, move.m_width);
std::swap(m_height, move.m_height);
std::swap(m_levels, move.m_levels);
std::swap(m_layers, move.m_layers);
std::swap(m_format, move.m_format);
std::swap(m_samples, move.m_samples);
std::swap(m_view_type, move.m_view_type);
std::swap(m_layout, move.m_layout);
std::swap(m_image, move.m_image);
std::swap(m_allocation, move.m_allocation);
std::swap(m_view, move.m_view);
return *this;
}
bool VKTexture::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage,
const VkComponentMapping* swizzle /* = nullptr*/)
{
const VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, 0, VK_IMAGE_TYPE_2D, format,
{width, height, 1}, levels, layers, samples, tiling, usage, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VmaAllocationCreateInfo aci = {};
aci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
aci.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT;
aci.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
VkImage image = VK_NULL_HANDLE;
VmaAllocation allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateImage(g_vulkan_context->GetAllocator(), &image_info, &aci, &image, &allocation, nullptr);
if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY)
{
DevCon.WriteLn("Failed to allocate device memory for %ux%u texture", width, height);
return false;
}
else if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vmaCreateImage failed: ");
return false;
}
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, view_type,
format, swizzle ? *swizzle : s_identity_swizzle,
{Vulkan::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vmaDestroyImage(g_vulkan_context->GetAllocator(), image, allocation);
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = image;
m_allocation = allocation;
m_view = view;
return true;
}
bool VKTexture::Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples, const VkComponentMapping* swizzle /* = nullptr*/)
{
// Only need to create the image view, this is mainly for swap chains.
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, existing_image,
view_type, format, swizzle ? *swizzle : s_identity_swizzle,
{Vulkan::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
// Memory is managed by the owner of the image.
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = existing_image;
m_view = view;
return true;
}
void VKTexture::Destroy(bool defer /* = true */)
{
if (m_view != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageViewDestruction(m_view);
else
vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr);
m_view = VK_NULL_HANDLE;
}
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_allocation != VK_NULL_HANDLE)
{
pxAssert(m_image != VK_NULL_HANDLE);
if (defer)
g_vulkan_context->DeferImageDestruction(m_image, m_allocation);
else
vmaDestroyImage(g_vulkan_context->GetAllocator(), m_image, m_allocation);
m_image = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
}
m_width = 0;
m_height = 0;
m_levels = 0;
m_layers = 0;
m_format = VK_FORMAT_UNDEFINED;
m_samples = VK_SAMPLE_COUNT_1_BIT;
m_view_type = VK_IMAGE_VIEW_TYPE_2D;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
void VKTexture::OverrideImageLayout(VkImageLayout new_layout)
{
m_layout = new_layout;
}
void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout)
{
if (m_layout == new_layout)
return;
TransitionSubresourcesToLayout(command_buffer, 0, m_levels, 0, m_layers, m_layout, new_layout);
m_layout = new_layout;
}
void VKTexture::TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels,
u32 start_layer, u32 num_layers, VkImageLayout old_layout, VkImageLayout new_layout)
{
VkImageAspectFlags aspect;
if (Vulkan::IsDepthStencilFormat(m_format))
aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
else if (Vulkan::IsDepthFormat(m_format))
aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
else
aspect = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
0, // VkAccessFlags dstAccessMask
old_layout, // VkImageLayout oldLayout
new_layout, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{aspect, start_level, num_levels, start_layer, num_layers} // VkImageSubresourceRange subresourceRange
};
// srcStageMask -> Stages that must complete before the barrier
// dstStageMask -> Stages that must wait for after the barrier before beginning
VkPipelineStageFlags srcStageMask, dstStageMask;
switch (old_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Layout undefined therefore contents undefined, and we don't care what happens to it.
barrier.srcAccessMask = 0;
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image has been pre-initialized by the host, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image was being used as a color attachment, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image was being used as a shader resource, make sure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image was being used as a copy source, ensure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image was being used as a copy destination, ensure all writes have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_GENERAL:
// General is used for feedback loops.
barrier.srcAccessMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT) :
(VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
srcStageMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) :
(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
break;
default:
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
}
switch (new_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
barrier.dstAccessMask = 0;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_GENERAL:
// General is used for feedback loops.
barrier.dstAccessMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT) :
(VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
dstStageMask = (aspect == VK_IMAGE_ASPECT_COLOR_BIT) ?
(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) :
(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
break;
default:
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
}
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
VkFramebuffer VKTexture::CreateFramebuffer(VkRenderPass render_pass)
{
const VkFramebufferCreateInfo ci = {
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0u, render_pass, 1, &m_view, m_width, m_height, m_layers};
VkFramebuffer fb = VK_NULL_HANDLE;
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
return fb;
}
void VKTexture::UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width, u32 height,
u32 buffer_height, u32 row_length, VkBuffer buffer, u32 buffer_offset)
{
const VkImageLayout old_layout = m_layout;
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
TransitionSubresourcesToLayout(cmdbuf, level, 1, layer, 1, old_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkBufferImageCopy bic = {static_cast<VkDeviceSize>(buffer_offset), row_length, buffer_height,
{VK_IMAGE_ASPECT_COLOR_BIT, level, layer, 1u}, {static_cast<int32_t>(x), static_cast<int32_t>(y), 0},
{width, height, 1u}};
vkCmdCopyBufferToImage(cmdbuf, buffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bic);
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
TransitionSubresourcesToLayout(cmdbuf, level, 1, layer, 1, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, old_layout);
}

View File

@ -0,0 +1,89 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "GS/Renderers/Vulkan/VKLoader.h"
#include <algorithm>
#include <memory>
class VKTexture
{
public:
VKTexture();
VKTexture(VKTexture&& move);
VKTexture(const VKTexture&) = delete;
~VKTexture();
VKTexture& operator=(VKTexture&& move);
VKTexture& operator=(const VKTexture&) = delete;
__fi bool IsValid() const { return (m_image != VK_NULL_HANDLE); }
/// An image is considered owned/managed if we control the memory.
__fi bool IsOwned() const { return (m_allocation != VK_NULL_HANDLE); }
__fi u32 GetWidth() const { return m_width; }
__fi u32 GetHeight() const { return m_height; }
__fi u32 GetLevels() const { return m_levels; }
__fi u32 GetLayers() const { return m_layers; }
__fi u32 GetMipWidth(u32 level) const { return std::max<u32>(m_width >> level, 1u); }
__fi u32 GetMipHeight(u32 level) const { return std::max<u32>(m_height >> level, 1u); }
__fi VkFormat GetFormat() const { return m_format; }
__fi VkSampleCountFlagBits GetSamples() const { return m_samples; }
__fi VkImageLayout GetLayout() const { return m_layout; }
__fi VkImageViewType GetViewType() const { return m_view_type; }
__fi VkImage GetImage() const { return m_image; }
__fi VmaAllocation GetAllocation() const { return m_allocation; }
__fi VkImageView GetView() const { return m_view; }
bool Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage,
const VkComponentMapping* swizzle = nullptr);
bool Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples, const VkComponentMapping* swizzle = nullptr);
void Destroy(bool defer = true);
// Used when the render pass is changing the image layout, or to force it to
// VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is
// irrelevant and will not be loaded.
void OverrideImageLayout(VkImageLayout new_layout);
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout);
void TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels,
u32 start_layer, u32 num_layers, VkImageLayout old_layout, VkImageLayout new_layout);
VkFramebuffer CreateFramebuffer(VkRenderPass render_pass);
void UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width, u32 height,
u32 buffer_height, u32 row_length, VkBuffer buffer, u32 buffer_offset);
private:
u32 m_width = 0;
u32 m_height = 0;
u32 m_levels = 0;
u32 m_layers = 0;
VkFormat m_format = VK_FORMAT_UNDEFINED;
VkSampleCountFlagBits m_samples = VK_SAMPLE_COUNT_1_BIT;
VkImageViewType m_view_type = VK_IMAGE_VIEW_TYPE_2D;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage m_image = VK_NULL_HANDLE;
VmaAllocation m_allocation = VK_NULL_HANDLE;
VkImageView m_view = VK_NULL_HANDLE;
};

View File

@ -0,0 +1,351 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "PrecompiledHeader.h"
#include "GS/Renderers/Vulkan/VKUtil.h"
#include "GS/Renderers/Vulkan/VKContext.h"
#include "common/Assertions.h"
#include "common/Console.h"
#include "common/StringUtil.h"
#include <cmath>
bool Vulkan::IsDepthFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
bool Vulkan::IsDepthStencilFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
VkFormat Vulkan::GetLinearFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_SRGB:
return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB:
return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB:
return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB:
return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8_SRGB:
return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB:
return VK_FORMAT_B8G8R8A8_UNORM;
default:
return format;
}
}
u32 Vulkan::GetTexelSize(VkFormat format)
{
// Only contains pixel formats we use.
switch (format)
{
case VK_FORMAT_R8_UNORM:
return 1;
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_R16_UINT:
return 2;
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return 8;
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 16;
default:
pxFailRel("Unhandled pixel format");
return 1;
}
}
void Vulkan::SafeDestroyFramebuffer(VkFramebuffer& fb)
{
if (fb != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
fb = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyShaderModule(VkShaderModule& sm)
{
if (sm != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), sm, nullptr);
sm = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyPipeline(VkPipeline& p)
{
if (p != VK_NULL_HANDLE)
{
vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr);
p = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyPipelineLayout(VkPipelineLayout& pl)
{
if (pl != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr);
pl = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl)
{
if (dsl != VK_NULL_HANDLE)
{
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr);
dsl = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyBufferView(VkBufferView& bv)
{
if (bv != VK_NULL_HANDLE)
{
vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr);
bv = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroyImageView(VkImageView& iv)
{
if (iv != VK_NULL_HANDLE)
{
vkDestroyImageView(g_vulkan_context->GetDevice(), iv, nullptr);
iv = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroySampler(VkSampler& samp)
{
if (samp != VK_NULL_HANDLE)
{
vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr);
samp = VK_NULL_HANDLE;
}
}
void Vulkan::SafeDestroySemaphore(VkSemaphore& sem)
{
if (sem != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), sem, nullptr);
sem = VK_NULL_HANDLE;
}
}
void Vulkan::SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds)
{
if (ds != VK_NULL_HANDLE)
{
g_vulkan_context->FreeGlobalDescriptorSet(ds);
ds = VK_NULL_HANDLE;
}
}
void Vulkan::BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
}
void Vulkan::AddPointerToChain(void* head, const void* ptr)
{
VkBaseInStructure* last_st = static_cast<VkBaseInStructure*>(head);
while (last_st->pNext)
{
if (last_st->pNext == ptr)
return;
last_st = const_cast<VkBaseInStructure*>(last_st->pNext);
}
last_st->pNext = static_cast<const VkBaseInStructure*>(ptr);
}
const char* Vulkan::VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
const char* Vulkan::PresentModeToString(VkPresentModeKHR mode)
{
switch (mode)
{
case VK_PRESENT_MODE_IMMEDIATE_KHR:
return "VK_PRESENT_MODE_IMMEDIATE_KHR";
case VK_PRESENT_MODE_MAILBOX_KHR:
return "VK_PRESENT_MODE_MAILBOX_KHR";
case VK_PRESENT_MODE_FIFO_KHR:
return "VK_PRESENT_MODE_FIFO_KHR";
case VK_PRESENT_MODE_FIFO_RELAXED_KHR:
return "VK_PRESENT_MODE_FIFO_RELAXED_KHR";
case VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR:
return "VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR";
case VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR:
return "VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR";
default:
return "UNKNOWN_VK_PRESENT_MODE";
}
}
void Vulkan::LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Console.Error("(%s) %s (%d: %s)", func_name, real_msg.c_str(), static_cast<int>(res), VkResultToString(res));
}

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -15,9 +15,10 @@
#pragma once
#include "common/Pcsx2Defs.h"
#include "GS/Renderers/Vulkan/VKLoader.h"
#include "common/StringUtil.h"
#include "common/Vulkan/Loader.h"
#include <algorithm>
#include <array>
#include <cstdarg>
@ -25,38 +26,36 @@
namespace Vulkan
{
namespace Util
{
bool IsDepthFormat(VkFormat format);
bool IsDepthStencilFormat(VkFormat format);
VkFormat GetLinearFormat(VkFormat format);
u32 GetTexelSize(VkFormat format);
bool IsDepthFormat(VkFormat format);
bool IsDepthStencilFormat(VkFormat format);
VkFormat GetLinearFormat(VkFormat format);
u32 GetTexelSize(VkFormat format);
// Safe destroy helpers
void SafeDestroyFramebuffer(VkFramebuffer& fb);
void SafeDestroyShaderModule(VkShaderModule& sm);
void SafeDestroyPipeline(VkPipeline& p);
void SafeDestroyPipelineLayout(VkPipelineLayout& pl);
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl);
void SafeDestroyBufferView(VkBufferView& bv);
void SafeDestroyImageView(VkImageView& iv);
void SafeDestroySampler(VkSampler& samp);
void SafeDestroySemaphore(VkSemaphore& sem);
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds);
// Safe destroy helpers
void SafeDestroyFramebuffer(VkFramebuffer& fb);
void SafeDestroyShaderModule(VkShaderModule& sm);
void SafeDestroyPipeline(VkPipeline& p);
void SafeDestroyPipelineLayout(VkPipelineLayout& pl);
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl);
void SafeDestroyBufferView(VkBufferView& bv);
void SafeDestroyImageView(VkImageView& iv);
void SafeDestroySampler(VkSampler& samp);
void SafeDestroySemaphore(VkSemaphore& sem);
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask);
// Adds a structure to a chain.
void AddPointerToChain(void* head, const void* ptr);
// Adds a structure to a chain.
void AddPointerToChain(void* head, const void* ptr);
const char* VkResultToString(VkResult res);
const char* PresentModeToString(VkPresentModeKHR mode);
void LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...) /*printflike(4, 5)*/;
const char* VkResultToString(VkResult res);
const char* PresentModeToString(VkPresentModeKHR mode);
void LogVulkanResult(const char* func_name, VkResult res, const char* msg, ...) /*printflike(4, 5)*/;
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::Util::LogVulkanResult(__func__, res, __VA_ARGS__)
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::LogVulkanResult(__func__, res, __VA_ARGS__)
#if defined(_DEBUG)
@ -70,11 +69,11 @@ namespace Vulkan
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
// Provides a compile-time mapping between a Vulkan-type into its matching VkObjectType
template <typename T>
struct VkObjectTypeMap;
// Provides a compile-time mapping between a Vulkan-type into its matching VkObjectType
template <typename T>
struct VkObjectTypeMap;
// clang-format off
// clang-format off
template<> struct VkObjectTypeMap<VkInstance > { using type = VkInstance ; static constexpr VkObjectType value = VK_OBJECT_TYPE_INSTANCE; };
template<> struct VkObjectTypeMap<VkPhysicalDevice > { using type = VkPhysicalDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PHYSICAL_DEVICE; };
template<> struct VkObjectTypeMap<VkDevice > { using type = VkDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE; };
@ -104,36 +103,35 @@ template<> struct VkObjectTypeMap<VkDescriptorUpdateTemplate> { using type = VkD
template<> struct VkObjectTypeMap<VkSurfaceKHR > { using type = VkSurfaceKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SURFACE_KHR; };
template<> struct VkObjectTypeMap<VkSwapchainKHR > { using type = VkSwapchainKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SWAPCHAIN_KHR; };
template<> struct VkObjectTypeMap<VkDebugUtilsMessengerEXT > { using type = VkDebugUtilsMessengerEXT ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT; };
// clang-format on
// clang-format on
#endif
static inline void SetObjectName(
VkDevice device, void* object_handle, VkObjectType object_type, const char* format, va_list ap)
{
static inline void SetObjectName(
VkDevice device, void* object_handle, VkObjectType object_type, const char* format, va_list ap)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkSetDebugUtilsObjectNameEXT)
{
return;
}
const std::string str(StringUtil::StdStringFromFormatV(format, ap));
const VkDebugUtilsObjectNameInfoEXT nameInfo{VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, nullptr,
object_type, reinterpret_cast<uint64_t>(object_handle), str.c_str()};
vkSetDebugUtilsObjectNameEXT(device, &nameInfo);
#endif
if (!vkSetDebugUtilsObjectNameEXT)
{
return;
}
template <typename T>
static inline void SetObjectName(VkDevice device, T object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
va_start(ap, format);
SetObjectName(device, reinterpret_cast<void*>((typename VkObjectTypeMap<T>::type)object_handle),
VkObjectTypeMap<T>::value, format, ap);
va_end(ap);
const std::string str(StringUtil::StdStringFromFormatV(format, ap));
const VkDebugUtilsObjectNameInfoEXT nameInfo{VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, nullptr,
object_type, reinterpret_cast<uint64_t>(object_handle), str.c_str()};
vkSetDebugUtilsObjectNameEXT(device, &nameInfo);
#endif
}
} // namespace Util
}
template <typename T>
static inline void SetObjectName(VkDevice device, T object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
va_start(ap, format);
SetObjectName(device, reinterpret_cast<void*>((typename VkObjectTypeMap<T>::type)object_handle),
VkObjectTypeMap<T>::value, format, ap);
va_end(ap);
#endif
}
} // namespace Vulkan

View File

@ -1,5 +1,5 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2021 PCSX2 Dev Team
* Copyright (C) 2002-2023 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
@ -13,8 +13,8 @@
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "common/PrecompiledHeader.h"
#include "PrecompiledHeader.h"
#define VMA_IMPLEMENTATION 1
#include "common/Vulkan/Loader.h"
#include "GS/Renderers/Vulkan/VKLoader.h"

View File

@ -41,6 +41,7 @@
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\zlib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\libpng;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\glad\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\glslang\glslang;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\cubeb\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\imgui\include;$(SolutionDir)3rdparty\imgui\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)3rdparty\simpleini\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
@ -100,6 +101,7 @@
<None Include="..\bin\resources\shaders\dx11\merge.fx" />
<None Include="..\bin\resources\shaders\dx11\shadeboost.fx" />
<None Include="..\bin\resources\shaders\dx11\tfx.fx" />
<None Include="GS\Renderers\Vulkan\VKEntryPoints.inl" />
<None Include="Utilities\folderdesc.txt" />
<None Include="Docs\License.txt" />
<None Include="ps2\eeHwTraceLog.inl" />
@ -211,6 +213,15 @@
<ClCompile Include="GS\Renderers\OpenGL\GLProgram.cpp" />
<ClCompile Include="GS\Renderers\OpenGL\GLShaderCache.cpp" />
<ClCompile Include="GS\Renderers\OpenGL\GLStreamBuffer.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKTexture.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKBuilders.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKContext.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKLoader.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKShaderCache.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKStreamBuffer.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKSwapChain.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\VKUtil.cpp" />
<ClCompile Include="GS\Renderers\Vulkan\vk_mem_alloc.cpp" />
<ClCompile Include="INISettingsInterface.cpp" />
<ClCompile Include="Frontend\InputManager.cpp" />
<ClCompile Include="Frontend\InputSource.cpp" />
@ -563,6 +574,15 @@
<ClInclude Include="GS\Renderers\OpenGL\GLProgram.h" />
<ClInclude Include="GS\Renderers\OpenGL\GLShaderCache.h" />
<ClInclude Include="GS\Renderers\OpenGL\GLStreamBuffer.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKTexture.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKBuilders.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKContext.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKEntryPoints.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKLoader.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKShaderCache.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKStreamBuffer.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKSwapChain.h" />
<ClInclude Include="GS\Renderers\Vulkan\VKUtil.h" />
<ClInclude Include="INISettingsInterface.h" />
<ClInclude Include="Frontend\InputManager.h" />
<ClInclude Include="Frontend\InputSource.h" />
@ -856,6 +876,9 @@
<ProjectReference Include="..\3rdparty\discord-rpc\discord-rpc.vcxproj">
<Project>{e960dfdf-1bd3-4c29-b251-d1a0919c9b09}</Project>
</ProjectReference>
<ProjectReference Include="..\3rdparty\glslang\glslang.vcxproj">
<Project>{ef6834a9-11f3-4331-bc34-21b325abb180}</Project>
</ProjectReference>
<ProjectReference Include="..\3rdparty\libzip\libzip.vcxproj">
<Project>{20b2e9fe-f020-42a0-b324-956f5b06ea68}</Project>
</ProjectReference>

View File

@ -366,6 +366,9 @@
<None Include="..\bin\resources\shaders\dx11\convert.fx">
<Filter>System\Ps2\GS\Shaders\Direct3D</Filter>
</None>
<None Include="GS\Renderers\Vulkan\VKEntryPoints.inl">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</None>
</ItemGroup>
<ItemGroup>
<ClCompile Include="Patch.cpp">
@ -1391,6 +1394,33 @@
<ClCompile Include="GS\Renderers\OpenGL\GLContext.cpp">
<Filter>System\Ps2\GS\Renderers\OpenGL</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\vk_mem_alloc.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKBuilders.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKContext.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKLoader.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKShaderCache.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKStreamBuffer.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKSwapChain.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKUtil.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
<ClCompile Include="GS\Renderers\Vulkan\VKTexture.cpp">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Patch.h">
@ -2327,6 +2357,33 @@
<ClInclude Include="GS\Renderers\OpenGL\GLContext.h">
<Filter>System\Ps2\GS\Renderers\OpenGL</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKTexture.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKBuilders.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKContext.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKEntryPoints.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKLoader.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKShaderCache.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKStreamBuffer.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKSwapChain.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
<ClInclude Include="GS\Renderers\Vulkan\VKUtil.h">
<Filter>System\Ps2\GS\Renderers\Vulkan</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<CustomBuildStep Include="rdebug\deci2.h">