[Vulkan] Load Vulkan manually for more lifetime and extension control

This commit is contained in:
Triang3l 2021-07-11 22:56:01 +03:00
parent 458e4e1a31
commit 692e329e9c
38 changed files with 1224 additions and 788 deletions

View File

@ -231,7 +231,6 @@ workspace("xenia")
include("third_party/mspack.lua")
include("third_party/snappy.lua")
include("third_party/spirv-tools.lua")
include("third_party/volk.lua")
include("third_party/xxhash.lua")
if not os.istarget("android") then

View File

@ -43,7 +43,6 @@ project("xenia-app")
"mspack",
"snappy",
"spirv-tools",
"volk",
"xxhash",
})
defines({

View File

@ -120,7 +120,7 @@ VkResult BufferCache::Initialize() {
// Create a memory allocator for textures.
VmaVulkanFunctions vulkan_funcs = {};
ui::vulkan::FillVMAVulkanFunctions(&vulkan_funcs);
ui::vulkan::FillVMAVulkanFunctions(&vulkan_funcs, *device_);
VmaAllocatorCreateInfo alloc_info = {
0, *device_, *device_, 0, 0, nullptr, nullptr, 0, nullptr, &vulkan_funcs,
@ -144,7 +144,8 @@ VkResult BufferCache::Initialize() {
return VK_SUCCESS;
}
VkResult xe::gpu::vulkan::BufferCache::CreateVertexDescriptorPool() {
VkResult BufferCache::CreateVertexDescriptorPool() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
std::vector<VkDescriptorPoolSize> pool_sizes;
@ -170,8 +171,8 @@ VkResult xe::gpu::vulkan::BufferCache::CreateVertexDescriptorPool() {
1,
&binding,
};
status = vkCreateDescriptorSetLayout(*device_, &layout_info, nullptr,
&vertex_descriptor_set_layout_);
status = dfn.vkCreateDescriptorSetLayout(*device_, &layout_info, nullptr,
&vertex_descriptor_set_layout_);
if (status != VK_SUCCESS) {
return status;
}
@ -179,14 +180,16 @@ VkResult xe::gpu::vulkan::BufferCache::CreateVertexDescriptorPool() {
return VK_SUCCESS;
}
void xe::gpu::vulkan::BufferCache::FreeVertexDescriptorPool() {
void BufferCache::FreeVertexDescriptorPool() {
vertex_descriptor_pool_.reset();
VK_SAFE_DESTROY(vkDestroyDescriptorSetLayout, *device_,
vertex_descriptor_set_layout_, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyDescriptorSetLayout, *device_,
vertex_descriptor_set_layout_);
}
VkResult BufferCache::CreateConstantDescriptorSet() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Descriptor pool used for all of our cached descriptors.
@ -204,8 +207,8 @@ VkResult BufferCache::CreateConstantDescriptorSet() {
pool_sizes[0].descriptorCount = 2;
transient_descriptor_pool_info.poolSizeCount = 1;
transient_descriptor_pool_info.pPoolSizes = pool_sizes;
status = vkCreateDescriptorPool(*device_, &transient_descriptor_pool_info,
nullptr, &constant_descriptor_pool_);
status = dfn.vkCreateDescriptorPool(*device_, &transient_descriptor_pool_info,
nullptr, &constant_descriptor_pool_);
if (status != VK_SUCCESS) {
return status;
}
@ -237,9 +240,9 @@ VkResult BufferCache::CreateConstantDescriptorSet() {
descriptor_set_layout_info.bindingCount =
static_cast<uint32_t>(xe::countof(bindings));
descriptor_set_layout_info.pBindings = bindings;
status =
vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info,
nullptr, &constant_descriptor_set_layout_);
status = dfn.vkCreateDescriptorSetLayout(*device_,
&descriptor_set_layout_info, nullptr,
&constant_descriptor_set_layout_);
if (status != VK_SUCCESS) {
return status;
}
@ -253,8 +256,8 @@ VkResult BufferCache::CreateConstantDescriptorSet() {
set_alloc_info.descriptorPool = constant_descriptor_pool_;
set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &constant_descriptor_set_layout_;
status = vkAllocateDescriptorSets(*device_, &set_alloc_info,
&constant_descriptor_set_);
status = dfn.vkAllocateDescriptorSets(*device_, &set_alloc_info,
&constant_descriptor_set_);
if (status != VK_SUCCESS) {
return status;
}
@ -286,22 +289,24 @@ VkResult BufferCache::CreateConstantDescriptorSet() {
fragment_uniform_binding_write.descriptorType =
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
fragment_uniform_binding_write.pBufferInfo = &buffer_info;
vkUpdateDescriptorSets(*device_, 2, descriptor_writes, 0, nullptr);
dfn.vkUpdateDescriptorSets(*device_, 2, descriptor_writes, 0, nullptr);
return VK_SUCCESS;
}
void BufferCache::FreeConstantDescriptorSet() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (constant_descriptor_set_) {
vkFreeDescriptorSets(*device_, constant_descriptor_pool_, 1,
&constant_descriptor_set_);
dfn.vkFreeDescriptorSets(*device_, constant_descriptor_pool_, 1,
&constant_descriptor_set_);
constant_descriptor_set_ = nullptr;
}
VK_SAFE_DESTROY(vkDestroyDescriptorSetLayout, *device_,
constant_descriptor_set_layout_, nullptr);
VK_SAFE_DESTROY(vkDestroyDescriptorPool, *device_, constant_descriptor_pool_,
nullptr);
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyDescriptorSetLayout, *device_,
constant_descriptor_set_layout_);
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyDescriptorPool, *device_,
constant_descriptor_pool_);
}
void BufferCache::Shutdown() {
@ -314,7 +319,9 @@ void BufferCache::Shutdown() {
FreeVertexDescriptorPool();
transient_buffer_->Shutdown();
VK_SAFE_DESTROY(vkFreeMemory, *device_, gpu_memory_pool_, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
ui::vulkan::DestroyAndNullHandle(dfn.vkFreeMemory, *device_,
gpu_memory_pool_);
}
std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
@ -361,9 +368,10 @@ std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
offset,
kConstantRegisterUniformRange,
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
return {offset, offset};
@ -476,9 +484,10 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
offset,
source_length,
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
return {transient_buffer_->gpu_buffer(), offset};
}
@ -543,9 +552,10 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
offset,
upload_size,
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr,
1, &barrier, 0, nullptr);
CacheTransientData(upload_base, upload_size, offset);
return {transient_buffer_->gpu_buffer(), offset + source_offset};
@ -687,7 +697,8 @@ VkDescriptorSet BufferCache::PrepareVertexSet(
};
}
vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
vertex_sets_[hash] = set;
return set;
}
@ -760,13 +771,15 @@ void BufferCache::CacheTransientData(uint32_t guest_address,
}
void BufferCache::Flush(VkCommandBuffer command_buffer) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// If we are flushing a big enough chunk queue up an event.
// We don't want to do this for everything but often enough so that we won't
// run out of space.
if (true) {
// VkEvent finish_event;
// vkCmdSetEvent(cmd_buffer, finish_event,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
// dfn.vkCmdSetEvent(cmd_buffer, finish_event,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
}
// Flush memory.
@ -777,7 +790,7 @@ void BufferCache::Flush(VkCommandBuffer command_buffer) {
dirty_range.memory = transient_buffer_->gpu_memory();
dirty_range.offset = 0;
dirty_range.size = transient_buffer_->capacity();
vkFlushMappedMemoryRanges(*device_, 1, &dirty_range);
dfn.vkFlushMappedMemoryRanges(*device_, 1, &dirty_range);
}
void BufferCache::InvalidateCache() {

View File

@ -45,6 +45,7 @@ VkResult PipelineCache::Initialize(
VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout,
VkDescriptorSetLayout vertex_descriptor_set_layout) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Initialize the shared driver pipeline cache.
@ -57,8 +58,8 @@ VkResult PipelineCache::Initialize(
pipeline_cache_info.flags = 0;
pipeline_cache_info.initialDataSize = 0;
pipeline_cache_info.pInitialData = nullptr;
status = vkCreatePipelineCache(*device_, &pipeline_cache_info, nullptr,
&pipeline_cache_);
status = dfn.vkCreatePipelineCache(*device_, &pipeline_cache_info, nullptr,
&pipeline_cache_);
if (status != VK_SUCCESS) {
return status;
}
@ -95,8 +96,8 @@ VkResult PipelineCache::Initialize(
pipeline_layout_info.pushConstantRangeCount =
static_cast<uint32_t>(xe::countof(push_constant_ranges));
pipeline_layout_info.pPushConstantRanges = push_constant_ranges;
status = vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr,
&pipeline_layout_);
status = dfn.vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr,
&pipeline_layout_);
if (status != VK_SUCCESS) {
return status;
}
@ -112,8 +113,8 @@ VkResult PipelineCache::Initialize(
static_cast<uint32_t>(sizeof(line_quad_list_geom));
shader_module_info.pCode =
reinterpret_cast<const uint32_t*>(line_quad_list_geom);
status = vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.line_quad_list);
status = dfn.vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.line_quad_list);
if (status != VK_SUCCESS) {
return status;
}
@ -123,8 +124,8 @@ VkResult PipelineCache::Initialize(
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(point_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(point_list_geom);
status = vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.point_list);
status = dfn.vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.point_list);
if (status != VK_SUCCESS) {
return status;
}
@ -134,8 +135,8 @@ VkResult PipelineCache::Initialize(
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(quad_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(quad_list_geom);
status = vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.quad_list);
status = dfn.vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.quad_list);
if (status != VK_SUCCESS) {
return status;
}
@ -145,8 +146,8 @@ VkResult PipelineCache::Initialize(
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(rect_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(rect_list_geom);
status = vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.rect_list);
status = dfn.vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&geometry_shaders_.rect_list);
if (status != VK_SUCCESS) {
return status;
}
@ -156,8 +157,8 @@ VkResult PipelineCache::Initialize(
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(dummy_frag));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(dummy_frag);
status = vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&dummy_pixel_shader_);
status = dfn.vkCreateShaderModule(*device_, &shader_module_info, nullptr,
&dummy_pixel_shader_);
if (status != VK_SUCCESS) {
return status;
}
@ -171,34 +172,37 @@ VkResult PipelineCache::Initialize(
void PipelineCache::Shutdown() {
ClearCache();
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Destroy geometry shaders.
if (geometry_shaders_.line_quad_list) {
vkDestroyShaderModule(*device_, geometry_shaders_.line_quad_list, nullptr);
dfn.vkDestroyShaderModule(*device_, geometry_shaders_.line_quad_list,
nullptr);
geometry_shaders_.line_quad_list = nullptr;
}
if (geometry_shaders_.point_list) {
vkDestroyShaderModule(*device_, geometry_shaders_.point_list, nullptr);
dfn.vkDestroyShaderModule(*device_, geometry_shaders_.point_list, nullptr);
geometry_shaders_.point_list = nullptr;
}
if (geometry_shaders_.quad_list) {
vkDestroyShaderModule(*device_, geometry_shaders_.quad_list, nullptr);
dfn.vkDestroyShaderModule(*device_, geometry_shaders_.quad_list, nullptr);
geometry_shaders_.quad_list = nullptr;
}
if (geometry_shaders_.rect_list) {
vkDestroyShaderModule(*device_, geometry_shaders_.rect_list, nullptr);
dfn.vkDestroyShaderModule(*device_, geometry_shaders_.rect_list, nullptr);
geometry_shaders_.rect_list = nullptr;
}
if (dummy_pixel_shader_) {
vkDestroyShaderModule(*device_, dummy_pixel_shader_, nullptr);
dfn.vkDestroyShaderModule(*device_, dummy_pixel_shader_, nullptr);
dummy_pixel_shader_ = nullptr;
}
if (pipeline_layout_) {
vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr);
dfn.vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr);
pipeline_layout_ = nullptr;
}
if (pipeline_cache_) {
vkDestroyPipelineCache(*device_, pipeline_cache_, nullptr);
dfn.vkDestroyPipelineCache(*device_, pipeline_cache_, nullptr);
pipeline_cache_ = nullptr;
}
}
@ -274,9 +278,10 @@ PipelineCache::UpdateStatus PipelineCache::ConfigurePipeline(
}
void PipelineCache::ClearCache() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Destroy all pipelines.
for (auto it : cached_pipelines_) {
vkDestroyPipeline(*device_, it.second, nullptr);
dfn.vkDestroyPipeline(*device_, it.second, nullptr);
}
cached_pipelines_.clear();
COUNT_profile_set("gpu/pipeline_cache/pipelines", 0);
@ -338,8 +343,9 @@ VkPipeline PipelineCache::GetPipeline(const RenderState* render_state,
pipeline_info.basePipelineHandle = nullptr;
pipeline_info.basePipelineIndex = -1;
VkPipeline pipeline = nullptr;
auto result = vkCreateGraphicsPipelines(*device_, pipeline_cache_, 1,
&pipeline_info, nullptr, &pipeline);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto result = dfn.vkCreateGraphicsPipelines(
*device_, pipeline_cache_, 1, &pipeline_info, nullptr, &pipeline);
if (result != VK_SUCCESS) {
XELOGE("vkCreateGraphicsPipelines failed with code {}", result);
assert_always();
@ -415,9 +421,7 @@ static void DumpShaderStatisticsAMD(const VkShaderStatisticsInfoAMD& stats) {
}
void PipelineCache::DumpShaderDisasmAMD(VkPipeline pipeline) {
auto fn_GetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)vkGetDeviceProcAddr(
*device_, "vkGetShaderInfoAMD");
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
size_t data_size = 0;
@ -425,18 +429,18 @@ void PipelineCache::DumpShaderDisasmAMD(VkPipeline pipeline) {
data_size = sizeof(stats);
// Vertex shader
status = fn_GetShaderInfoAMD(*device_, pipeline, VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_INFO_TYPE_STATISTICS_AMD, &data_size,
&stats);
status = dfn.vkGetShaderInfoAMD(
*device_, pipeline, VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_INFO_TYPE_STATISTICS_AMD, &data_size, &stats);
if (status == VK_SUCCESS) {
XELOGI("AMD Vertex Shader Statistics:");
DumpShaderStatisticsAMD(stats);
}
// Fragment shader
status = fn_GetShaderInfoAMD(*device_, pipeline, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_INFO_TYPE_STATISTICS_AMD, &data_size,
&stats);
status = dfn.vkGetShaderInfoAMD(
*device_, pipeline, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_INFO_TYPE_STATISTICS_AMD, &data_size, &stats);
if (status == VK_SUCCESS) {
XELOGI("AMD Fragment Shader Statistics:");
DumpShaderStatisticsAMD(stats);
@ -451,6 +455,8 @@ void PipelineCache::DumpShaderDisasmNV(
// This code is super ugly. Update this when NVidia includes an official
// way to dump shader disassembly.
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkPipelineCacheCreateInfo pipeline_cache_info;
VkPipelineCache dummy_pipeline_cache;
pipeline_cache_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
@ -458,23 +464,24 @@ void PipelineCache::DumpShaderDisasmNV(
pipeline_cache_info.flags = 0;
pipeline_cache_info.initialDataSize = 0;
pipeline_cache_info.pInitialData = nullptr;
auto status = vkCreatePipelineCache(*device_, &pipeline_cache_info, nullptr,
&dummy_pipeline_cache);
auto status = dfn.vkCreatePipelineCache(*device_, &pipeline_cache_info,
nullptr, &dummy_pipeline_cache);
CheckResult(status, "vkCreatePipelineCache");
// Create a pipeline on the dummy cache and dump it.
VkPipeline dummy_pipeline;
status = vkCreateGraphicsPipelines(*device_, dummy_pipeline_cache, 1,
&pipeline_info, nullptr, &dummy_pipeline);
status =
dfn.vkCreateGraphicsPipelines(*device_, dummy_pipeline_cache, 1,
&pipeline_info, nullptr, &dummy_pipeline);
std::vector<uint8_t> pipeline_data;
size_t data_size = 0;
status = vkGetPipelineCacheData(*device_, dummy_pipeline_cache, &data_size,
nullptr);
status = dfn.vkGetPipelineCacheData(*device_, dummy_pipeline_cache,
&data_size, nullptr);
if (status == VK_SUCCESS) {
pipeline_data.resize(data_size);
vkGetPipelineCacheData(*device_, dummy_pipeline_cache, &data_size,
pipeline_data.data());
dfn.vkGetPipelineCacheData(*device_, dummy_pipeline_cache, &data_size,
pipeline_data.data());
// Scan the data for the disassembly.
std::string disasm_vp, disasm_fp;
@ -530,8 +537,8 @@ void PipelineCache::DumpShaderDisasmNV(
disasm_fp);
}
vkDestroyPipeline(*device_, dummy_pipeline, nullptr);
vkDestroyPipelineCache(*device_, dummy_pipeline_cache, nullptr);
dfn.vkDestroyPipeline(*device_, dummy_pipeline, nullptr);
dfn.vkDestroyPipelineCache(*device_, dummy_pipeline_cache, nullptr);
}
VkShaderModule PipelineCache::GetGeometryShader(
@ -575,6 +582,7 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto& regs = set_dynamic_state_registers_;
bool window_offset_dirty = SetShadowRegister(&regs.pa_sc_window_offset,
@ -620,7 +628,7 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
scissor_rect.offset.y = ws_y - adj_y;
scissor_rect.extent.width = std::max(ws_w + adj_x, 0);
scissor_rect.extent.height = std::max(ws_h + adj_y, 0);
vkCmdSetScissor(command_buffer, 0, 1, &scissor_rect);
dfn.vkCmdSetScissor(command_buffer, 0, 1, &scissor_rect);
}
// VK_DYNAMIC_STATE_VIEWPORT
@ -713,7 +721,7 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
assert_true(viewport_rect.minDepth >= 0 && viewport_rect.minDepth <= 1);
assert_true(viewport_rect.maxDepth >= -1 && viewport_rect.maxDepth <= 1);
vkCmdSetViewport(command_buffer, 0, 1, &viewport_rect);
dfn.vkCmdSetViewport(command_buffer, 0, 1, &viewport_rect);
}
// VK_DYNAMIC_STATE_DEPTH_BIAS
@ -767,13 +775,13 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
regs.pa_su_poly_offset_offset != depth_bias_offset_vulkan) {
regs.pa_su_poly_offset_scale = depth_bias_scale_vulkan;
regs.pa_su_poly_offset_offset = depth_bias_offset_vulkan;
vkCmdSetDepthBias(command_buffer, depth_bias_offset_vulkan, 0.0f,
depth_bias_scale_vulkan);
dfn.vkCmdSetDepthBias(command_buffer, depth_bias_offset_vulkan, 0.0f,
depth_bias_scale_vulkan);
}
} else if (full_update) {
regs.pa_su_poly_offset_scale = 0.0f;
regs.pa_su_poly_offset_offset = 0.0f;
vkCmdSetDepthBias(command_buffer, 0.0f, 0.0f, 0.0f);
dfn.vkCmdSetDepthBias(command_buffer, 0.0f, 0.0f, 0.0f);
}
// VK_DYNAMIC_STATE_BLEND_CONSTANTS
@ -787,7 +795,7 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
blend_constant_state_dirty |=
SetShadowRegister(&regs.rb_blend_rgba[3], XE_GPU_REG_RB_BLEND_ALPHA);
if (blend_constant_state_dirty) {
vkCmdSetBlendConstants(command_buffer, regs.rb_blend_rgba);
dfn.vkCmdSetBlendConstants(command_buffer, regs.rb_blend_rgba);
}
bool stencil_state_dirty = full_update;
@ -799,16 +807,16 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
uint32_t stencil_write_mask = (regs.rb_stencilrefmask >> 16) & 0xFF;
// VK_DYNAMIC_STATE_STENCIL_REFERENCE
vkCmdSetStencilReference(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_ref);
dfn.vkCmdSetStencilReference(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_ref);
// VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
vkCmdSetStencilCompareMask(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_read_mask);
dfn.vkCmdSetStencilCompareMask(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_read_mask);
// VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
vkCmdSetStencilWriteMask(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_write_mask);
dfn.vkCmdSetStencilWriteMask(command_buffer, VK_STENCIL_FRONT_AND_BACK,
stencil_write_mask);
}
bool push_constants_dirty = full_update || viewport_state_dirty;
@ -905,19 +913,19 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
push_constants.ps_param_gen =
regs.sq_program_cntl.param_gen ? ps_param_gen : -1;
vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT |
VK_SHADER_STAGE_GEOMETRY_BIT |
VK_SHADER_STAGE_FRAGMENT_BIT,
0, kSpirvPushConstantsSize, &push_constants);
dfn.vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT |
VK_SHADER_STAGE_GEOMETRY_BIT |
VK_SHADER_STAGE_FRAGMENT_BIT,
0, kSpirvPushConstantsSize, &push_constants);
}
if (full_update) {
// VK_DYNAMIC_STATE_LINE_WIDTH
vkCmdSetLineWidth(command_buffer, 1.0f);
dfn.vkCmdSetLineWidth(command_buffer, 1.0f);
// VK_DYNAMIC_STATE_DEPTH_BOUNDS
vkCmdSetDepthBounds(command_buffer, 0.0f, 1.0f);
dfn.vkCmdSetDepthBounds(command_buffer, 0.0f, 1.0f);
}
return true;

View File

@ -8,7 +8,6 @@ project("xenia-gpu-vulkan")
language("C++")
links({
"fmt",
"volk",
"xenia-base",
"xenia-gpu",
"xenia-ui",
@ -57,7 +56,6 @@ project("xenia-gpu-vulkan-trace-viewer")
"mspack",
"snappy",
"spirv-tools",
"volk",
"xxhash",
})
defines({
@ -126,7 +124,6 @@ project("xenia-gpu-vulkan-trace-dump")
"mspack",
"snappy",
"spirv-tools",
"volk",
"xxhash",
})
defines({

View File

@ -105,8 +105,9 @@ class CachedFramebuffer {
// Associated render pass
VkRenderPass render_pass = nullptr;
CachedFramebuffer(VkDevice device, VkRenderPass render_pass,
uint32_t surface_width, uint32_t surface_height,
CachedFramebuffer(const ui::vulkan::VulkanDevice& device,
VkRenderPass render_pass, uint32_t surface_width,
uint32_t surface_height,
CachedTileView* target_color_attachments[4],
CachedTileView* target_depth_stencil_attachment);
~CachedFramebuffer();
@ -116,7 +117,7 @@ class CachedFramebuffer {
bool IsCompatible(const RenderConfiguration& desired_config) const;
private:
VkDevice device_ = nullptr;
const ui::vulkan::VulkanDevice& device_;
};
// Cached render passes based on register states.
@ -133,7 +134,8 @@ class CachedRenderPass {
// Cache of framebuffers for the various tile attachments.
std::vector<CachedFramebuffer*> cached_framebuffers;
CachedRenderPass(VkDevice device, const RenderConfiguration& desired_config);
CachedRenderPass(const ui::vulkan::VulkanDevice& device,
const RenderConfiguration& desired_config);
~CachedRenderPass();
VkResult Initialize();
@ -141,7 +143,7 @@ class CachedRenderPass {
bool IsCompatible(const RenderConfiguration& desired_config) const;
private:
VkDevice device_ = nullptr;
const ui::vulkan::VulkanDevice& device_;
};
CachedTileView::CachedTileView(ui::vulkan::VulkanDevice* device,
@ -150,14 +152,19 @@ CachedTileView::CachedTileView(ui::vulkan::VulkanDevice* device,
: device_(device), key(std::move(view_key)) {}
CachedTileView::~CachedTileView() {
VK_SAFE_DESTROY(vkDestroyImageView, *device_, image_view, nullptr);
VK_SAFE_DESTROY(vkDestroyImageView, *device_, image_view_depth, nullptr);
VK_SAFE_DESTROY(vkDestroyImageView, *device_, image_view_stencil, nullptr);
VK_SAFE_DESTROY(vkDestroyImage, *device_, image, nullptr);
VK_SAFE_DESTROY(vkFreeMemory, *device_, memory, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyImageView, *device_,
image_view);
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyImageView, *device_,
image_view_depth);
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyImageView, *device_,
image_view_stencil);
ui::vulkan::DestroyAndNullHandle(dfn.vkDestroyImage, *device_, image);
ui::vulkan::DestroyAndNullHandle(dfn.vkFreeMemory, *device_, memory);
}
VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Map format to Vulkan.
@ -230,7 +237,7 @@ VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
image_info.queueFamilyIndexCount = 0;
image_info.pQueueFamilyIndices = nullptr;
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
status = vkCreateImage(*device_, &image_info, nullptr, &image);
status = dfn.vkCreateImage(*device_, &image_info, nullptr, &image);
if (status != VK_SUCCESS) {
return status;
}
@ -244,12 +251,12 @@ VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
uint32_t(key.msaa_samples), uint32_t(key.edram_format)));
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(*device_, image, &memory_requirements);
dfn.vkGetImageMemoryRequirements(*device_, image, &memory_requirements);
// Bind to a newly allocated chunk.
// TODO: Alias from a really big buffer?
memory = device_->AllocateMemory(memory_requirements, 0);
status = vkBindImageMemory(*device_, image, memory, 0);
status = dfn.vkBindImageMemory(*device_, image, memory, 0);
if (status != VK_SUCCESS) {
return status;
}
@ -276,7 +283,8 @@ VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
image_view_info.subresourceRange.aspectMask =
VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
}
status = vkCreateImageView(*device_, &image_view_info, nullptr, &image_view);
status =
dfn.vkCreateImageView(*device_, &image_view_info, nullptr, &image_view);
if (status != VK_SUCCESS) {
return status;
}
@ -284,15 +292,15 @@ VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
// Create separate depth/stencil views.
if (key.color_or_depth == 0) {
image_view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
status = vkCreateImageView(*device_, &image_view_info, nullptr,
&image_view_depth);
status = dfn.vkCreateImageView(*device_, &image_view_info, nullptr,
&image_view_depth);
if (status != VK_SUCCESS) {
return status;
}
image_view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
status = vkCreateImageView(*device_, &image_view_info, nullptr,
&image_view_stencil);
status = dfn.vkCreateImageView(*device_, &image_view_info, nullptr,
&image_view_stencil);
if (status != VK_SUCCESS) {
return status;
}
@ -319,19 +327,20 @@ VkResult CachedTileView::Initialize(VkCommandBuffer command_buffer) {
image_barrier.subresourceRange.levelCount = 1;
image_barrier.subresourceRange.baseArrayLayer = 0;
image_barrier.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
key.color_or_depth
? VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
: VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
0, 0, nullptr, 0, nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
key.color_or_depth
? VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
: VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
0, 0, nullptr, 0, nullptr, 1, &image_barrier);
image_layout = image_barrier.newLayout;
return VK_SUCCESS;
}
CachedFramebuffer::CachedFramebuffer(
VkDevice device, VkRenderPass render_pass, uint32_t surface_width,
uint32_t surface_height, CachedTileView* target_color_attachments[4],
const ui::vulkan::VulkanDevice& device, VkRenderPass render_pass,
uint32_t surface_width, uint32_t surface_height,
CachedTileView* target_color_attachments[4],
CachedTileView* target_depth_stencil_attachment)
: device_(device),
width(surface_width),
@ -344,7 +353,10 @@ CachedFramebuffer::CachedFramebuffer(
}
CachedFramebuffer::~CachedFramebuffer() {
VK_SAFE_DESTROY(vkDestroyFramebuffer, device_, handle, nullptr);
if (handle != VK_NULL_HANDLE) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkDestroyFramebuffer(device_, handle, nullptr);
}
}
VkResult CachedFramebuffer::Initialize() {
@ -369,7 +381,8 @@ VkResult CachedFramebuffer::Initialize() {
framebuffer_info.width = width;
framebuffer_info.height = height;
framebuffer_info.layers = 1;
return vkCreateFramebuffer(device_, &framebuffer_info, nullptr, &handle);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_.dfn();
return dfn.vkCreateFramebuffer(device_, &framebuffer_info, nullptr, &handle);
}
bool CachedFramebuffer::IsCompatible(
@ -414,7 +427,7 @@ bool CachedFramebuffer::IsCompatible(
return true;
}
CachedRenderPass::CachedRenderPass(VkDevice device,
CachedRenderPass::CachedRenderPass(const ui::vulkan::VulkanDevice& device,
const RenderConfiguration& desired_config)
: device_(device) {
std::memcpy(&config, &desired_config, sizeof(config));
@ -426,7 +439,10 @@ CachedRenderPass::~CachedRenderPass() {
}
cached_framebuffers.clear();
VK_SAFE_DESTROY(vkDestroyRenderPass, device_, handle, nullptr);
if (handle != VK_NULL_HANDLE) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkDestroyRenderPass(device_, handle, nullptr);
}
}
VkResult CachedRenderPass::Initialize() {
@ -537,7 +553,8 @@ VkResult CachedRenderPass::Initialize() {
render_pass_info.dependencyCount = 1;
render_pass_info.pDependencies = dependencies;
return vkCreateRenderPass(device_, &render_pass_info, nullptr, &handle);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_.dfn();
return dfn.vkCreateRenderPass(device_, &render_pass_info, nullptr, &handle);
}
bool CachedRenderPass::IsCompatible(
@ -566,6 +583,7 @@ RenderCache::RenderCache(RegisterFile* register_file,
RenderCache::~RenderCache() { Shutdown(); }
VkResult RenderCache::Initialize() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Create the buffer we'll bind to our memory.
@ -579,7 +597,7 @@ VkResult RenderCache::Initialize() {
buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_info.queueFamilyIndexCount = 0;
buffer_info.pQueueFamilyIndices = nullptr;
status = vkCreateBuffer(*device_, &buffer_info, nullptr, &edram_buffer_);
status = dfn.vkCreateBuffer(*device_, &buffer_info, nullptr, &edram_buffer_);
CheckResult(status, "vkCreateBuffer");
if (status != VK_SUCCESS) {
return status;
@ -588,7 +606,8 @@ VkResult RenderCache::Initialize() {
// Query requirements for the buffer.
// It should be 1:1.
VkMemoryRequirements buffer_requirements;
vkGetBufferMemoryRequirements(*device_, edram_buffer_, &buffer_requirements);
dfn.vkGetBufferMemoryRequirements(*device_, edram_buffer_,
&buffer_requirements);
assert_true(buffer_requirements.size == kEdramBufferCapacity);
// Allocate EDRAM memory.
@ -600,7 +619,7 @@ VkResult RenderCache::Initialize() {
}
// Bind buffer to map our entire memory.
status = vkBindBufferMemory(*device_, edram_buffer_, edram_memory_, 0);
status = dfn.vkBindBufferMemory(*device_, edram_buffer_, edram_memory_, 0);
CheckResult(status, "vkBindBufferMemory");
if (status != VK_SUCCESS) {
return status;
@ -609,15 +628,16 @@ VkResult RenderCache::Initialize() {
if (status == VK_SUCCESS) {
// For debugging, upload a grid into the EDRAM buffer.
uint32_t* gpu_data = nullptr;
status = vkMapMemory(*device_, edram_memory_, 0, buffer_requirements.size,
0, reinterpret_cast<void**>(&gpu_data));
status =
dfn.vkMapMemory(*device_, edram_memory_, 0, buffer_requirements.size, 0,
reinterpret_cast<void**>(&gpu_data));
if (status == VK_SUCCESS) {
for (int i = 0; i < kEdramBufferCapacity / 4; i++) {
gpu_data[i] = (i % 8) >= 4 ? 0xFF0000FF : 0xFFFFFFFF;
}
vkUnmapMemory(*device_, edram_memory_);
dfn.vkUnmapMemory(*device_, edram_memory_);
}
}
@ -627,6 +647,8 @@ VkResult RenderCache::Initialize() {
void RenderCache::Shutdown() {
// TODO(benvanik): wait for idle.
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Dispose all render passes (and their framebuffers).
for (auto render_pass : cached_render_passes_) {
delete render_pass;
@ -641,11 +663,11 @@ void RenderCache::Shutdown() {
// Release underlying EDRAM memory.
if (edram_buffer_) {
vkDestroyBuffer(*device_, edram_buffer_, nullptr);
dfn.vkDestroyBuffer(*device_, edram_buffer_, nullptr);
edram_buffer_ = nullptr;
}
if (edram_memory_) {
vkFreeMemory(*device_, edram_memory_, nullptr);
dfn.vkFreeMemory(*device_, edram_memory_, nullptr);
edram_memory_ = nullptr;
}
}
@ -781,8 +803,9 @@ const RenderState* RenderCache::BeginRenderPass(VkCommandBuffer command_buffer,
render_pass_begin_info.pClearValues = nullptr;
// Begin the render pass.
vkCmdBeginRenderPass(command_buffer, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_INLINE);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdBeginRenderPass(command_buffer, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_INLINE);
return &current_state_;
}
@ -1019,6 +1042,8 @@ CachedTileView* RenderCache::FindOrCreateTileView(
void RenderCache::UpdateTileView(VkCommandBuffer command_buffer,
CachedTileView* view, bool load,
bool insert_barrier) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
uint32_t tile_width =
view->key.msaa_samples == uint16_t(xenos::MsaaSamples::k4X) ? 40 : 80;
uint32_t tile_height =
@ -1043,9 +1068,9 @@ void RenderCache::UpdateTileView(VkCommandBuffer command_buffer,
tile_height * view->key.color_or_depth
? 4
: 1;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr,
1, &barrier, 0, nullptr);
}
// TODO(DrChat): Stencil copies.
@ -1061,11 +1086,12 @@ void RenderCache::UpdateTileView(VkCommandBuffer command_buffer,
region.imageExtent = {view->key.tile_width * tile_width,
view->key.tile_height * tile_height, 1};
if (load) {
vkCmdCopyBufferToImage(command_buffer, edram_buffer_, view->image,
VK_IMAGE_LAYOUT_GENERAL, 1, &region);
dfn.vkCmdCopyBufferToImage(command_buffer, edram_buffer_, view->image,
VK_IMAGE_LAYOUT_GENERAL, 1, &region);
} else {
vkCmdCopyImageToBuffer(command_buffer, view->image, VK_IMAGE_LAYOUT_GENERAL,
edram_buffer_, 1, &region);
dfn.vkCmdCopyImageToBuffer(command_buffer, view->image,
VK_IMAGE_LAYOUT_GENERAL, edram_buffer_, 1,
&region);
}
}
@ -1085,7 +1111,8 @@ void RenderCache::EndRenderPass() {
assert_not_null(current_command_buffer_);
// End the render pass.
vkCmdEndRenderPass(current_command_buffer_);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdEndRenderPass(current_command_buffer_);
// Copy all render targets back into our EDRAM buffer.
// Don't bother waiting on this command to complete, as next render pass may
@ -1138,6 +1165,8 @@ void RenderCache::RawCopyToImage(VkCommandBuffer command_buffer,
VkImageLayout image_layout,
bool color_or_depth, VkOffset3D offset,
VkExtent3D extents) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Transition the texture into a transfer destination layout.
VkImageMemoryBarrier image_barrier;
image_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
@ -1157,9 +1186,9 @@ void RenderCache::RawCopyToImage(VkCommandBuffer command_buffer,
? VK_IMAGE_ASPECT_COLOR_BIT
: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr,
0, nullptr, 1, &image_barrier);
}
VkBufferMemoryBarrier buffer_barrier;
@ -1173,9 +1202,9 @@ void RenderCache::RawCopyToImage(VkCommandBuffer command_buffer,
// TODO: Calculate this accurately (need texel size)
buffer_barrier.size = extents.width * extents.height * 4;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 1,
&buffer_barrier, 0, nullptr);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 1,
&buffer_barrier, 0, nullptr);
// Issue the copy command.
// TODO(DrChat): Stencil copies.
@ -1188,8 +1217,8 @@ void RenderCache::RawCopyToImage(VkCommandBuffer command_buffer,
region.imageSubresource = {0, 0, 0, 1};
region.imageSubresource.aspectMask =
color_or_depth ? VK_IMAGE_ASPECT_COLOR_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
vkCmdCopyBufferToImage(command_buffer, edram_buffer_, image, image_layout, 1,
&region);
dfn.vkCmdCopyBufferToImage(command_buffer, edram_buffer_, image, image_layout,
1, &region);
// Transition the image back into its previous layout.
if (image_layout != VK_IMAGE_LAYOUT_GENERAL &&
@ -1197,9 +1226,9 @@ void RenderCache::RawCopyToImage(VkCommandBuffer command_buffer,
image_barrier.srcAccessMask = image_barrier.dstAccessMask;
image_barrier.dstAccessMask = 0;
std::swap(image_barrier.oldLayout, image_barrier.newLayout);
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr,
0, nullptr, 1, &image_barrier);
}
}
@ -1210,6 +1239,8 @@ void RenderCache::BlitToImage(VkCommandBuffer command_buffer,
bool color_or_depth, uint32_t format,
VkFilter filter, VkOffset3D offset,
VkExtent3D extents) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (color_or_depth) {
// Adjust similar formats for easier matching.
format = static_cast<uint32_t>(
@ -1252,9 +1283,9 @@ void RenderCache::BlitToImage(VkCommandBuffer command_buffer,
color_or_depth ? VK_IMAGE_ASPECT_COLOR_BIT
: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
// If we overflow we'll lose the device here.
// assert_true(extents.width <= key.tile_width * tile_width);
@ -1277,8 +1308,9 @@ void RenderCache::BlitToImage(VkCommandBuffer command_buffer,
image_blit.dstOffsets[1] = {offset.x + int32_t(extents.width),
offset.y + int32_t(extents.height),
offset.z + int32_t(extents.depth)};
vkCmdBlitImage(command_buffer, tile_view->image, VK_IMAGE_LAYOUT_GENERAL,
image, image_layout, 1, &image_blit, filter);
dfn.vkCmdBlitImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, image, image_layout, 1,
&image_blit, filter);
} else {
VkImageResolve image_resolve;
image_resolve.srcSubresource = {0, 0, 0, 1};
@ -1292,8 +1324,9 @@ void RenderCache::BlitToImage(VkCommandBuffer command_buffer,
image_resolve.dstOffset = offset;
image_resolve.extent = extents;
vkCmdResolveImage(command_buffer, tile_view->image, VK_IMAGE_LAYOUT_GENERAL,
image, image_layout, 1, &image_resolve);
dfn.vkCmdResolveImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, image, image_layout, 1,
&image_resolve);
}
// Add another barrier on the tile view.
@ -1302,9 +1335,9 @@ void RenderCache::BlitToImage(VkCommandBuffer command_buffer,
color_or_depth ? VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
: VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
std::swap(image_barrier.oldLayout, image_barrier.newLayout);
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
}
void RenderCache::ClearEDRAMColor(VkCommandBuffer command_buffer,
@ -1339,8 +1372,9 @@ void RenderCache::ClearEDRAMColor(VkCommandBuffer command_buffer,
std::memcpy(clear_value.float32, color, sizeof(float) * 4);
// Issue a clear command
vkCmdClearColorImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdClearColorImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range);
// Copy image back into EDRAM buffer
// UpdateTileView(command_buffer, tile_view, false, false);
@ -1378,16 +1412,19 @@ void RenderCache::ClearEDRAMDepthStencil(VkCommandBuffer command_buffer,
clear_value.stencil = stencil;
// Issue a clear command
vkCmdClearDepthStencilImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdClearDepthStencilImage(command_buffer, tile_view->image,
VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1,
&range);
// Copy image back into EDRAM buffer
// UpdateTileView(command_buffer, tile_view, false, false);
}
void RenderCache::FillEDRAM(VkCommandBuffer command_buffer, uint32_t value) {
vkCmdFillBuffer(command_buffer, edram_buffer_, 0, kEdramBufferCapacity,
value);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdFillBuffer(command_buffer, edram_buffer_, 0, kEdramBufferCapacity,
value);
}
bool RenderCache::SetShadowRegister(uint32_t* dest, uint32_t register_name) {

View File

@ -22,6 +22,7 @@
#include "xenia/gpu/texture_info.h"
#include "xenia/gpu/vulkan/texture_config.h"
#include "xenia/gpu/vulkan/vulkan_gpu_flags.h"
#include "xenia/ui/vulkan/vulkan_instance.h"
#include "xenia/ui/vulkan/vulkan_mem_alloc.h"
DECLARE_bool(texture_dump);
@ -67,6 +68,7 @@ TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
TextureCache::~TextureCache() { Shutdown(); }
VkResult TextureCache::Initialize() {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Descriptor pool used for all of our cached descriptors.
@ -115,8 +117,8 @@ VkResult TextureCache::Initialize() {
static_cast<uint32_t>(xe::countof(bindings));
descriptor_set_layout_info.pBindings = bindings;
status =
vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info,
nullptr, &texture_descriptor_set_layout_);
dfn.vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info,
nullptr, &texture_descriptor_set_layout_);
if (status != VK_SUCCESS) {
return status;
}
@ -133,15 +135,15 @@ VkResult TextureCache::Initialize() {
// Create a memory allocator for textures.
VmaVulkanFunctions vulkan_funcs = {};
ui::vulkan::FillVMAVulkanFunctions(&vulkan_funcs);
ui::vulkan::FillVMAVulkanFunctions(&vulkan_funcs, *device_);
VmaAllocatorCreateInfo alloc_info = {
0, *device_, *device_, 0, 0, nullptr, nullptr, 0, nullptr, &vulkan_funcs,
};
status = vmaCreateAllocator(&alloc_info, &mem_allocator_);
if (status != VK_SUCCESS) {
vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr);
dfn.vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr);
return status;
}
@ -177,8 +179,9 @@ void TextureCache::Shutdown() {
vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = nullptr;
}
vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr);
}
TextureCache::Texture* TextureCache::AllocateTexture(
@ -229,9 +232,12 @@ TextureCache::Texture* TextureCache::AllocateTexture(
image_info.usage =
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
const ui::vulkan::VulkanInstance* instance = device_->instance();
const ui::vulkan::VulkanInstance::InstanceFunctions& ifn = instance->ifn();
// Check the device limits for the format before we create it.
VkFormatProperties props;
vkGetPhysicalDeviceFormatProperties(*device_, format, &props);
ifn.vkGetPhysicalDeviceFormatProperties(*device_, format, &props);
if ((props.optimalTilingFeatures & required_flags) != required_flags) {
// Texture needs conversion on upload to a native format.
XELOGE(
@ -257,7 +263,7 @@ TextureCache::Texture* TextureCache::AllocateTexture(
}
VkImageFormatProperties image_props;
vkGetPhysicalDeviceImageFormatProperties(
ifn.vkGetPhysicalDeviceImageFormatProperties(
*device_, format, image_info.imageType, image_info.tiling,
image_info.usage, image_info.flags, &image_props);
@ -308,8 +314,10 @@ TextureCache::Texture* TextureCache::AllocateTexture(
}
bool TextureCache::FreeTexture(Texture* texture) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (texture->in_flight_fence) {
VkResult status = vkGetFenceStatus(*device_, texture->in_flight_fence);
VkResult status = dfn.vkGetFenceStatus(*device_, texture->in_flight_fence);
if (status != VK_SUCCESS && status != VK_ERROR_DEVICE_LOST) {
// Texture still in flight.
return false;
@ -317,11 +325,11 @@ bool TextureCache::FreeTexture(Texture* texture) {
}
if (texture->framebuffer) {
vkDestroyFramebuffer(*device_, texture->framebuffer, nullptr);
dfn.vkDestroyFramebuffer(*device_, texture->framebuffer, nullptr);
}
for (auto it = texture->views.begin(); it != texture->views.end();) {
vkDestroyImageView(*device_, (*it)->view, nullptr);
dfn.vkDestroyImageView(*device_, (*it)->view, nullptr);
it = texture->views.erase(it);
}
@ -692,7 +700,8 @@ TextureCache::TextureView* TextureCache::DemandView(Texture* texture,
!is_cube ? 1 : 1 + texture->texture_info.depth;
VkImageView view;
auto status = vkCreateImageView(*device_, &view_info, nullptr, &view);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto status = dfn.vkCreateImageView(*device_, &view_info, nullptr, &view);
CheckResult(status, "vkCreateImageView");
if (status == VK_SUCCESS) {
auto texture_view = new TextureView();
@ -832,8 +841,9 @@ TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
sampler_create_info.unnormalizedCoordinates = VK_FALSE;
VkSampler vk_sampler;
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
status =
vkCreateSampler(*device_, &sampler_create_info, nullptr, &vk_sampler);
dfn.vkCreateSampler(*device_, &sampler_create_info, nullptr, &vk_sampler);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return nullptr;
@ -947,7 +957,8 @@ TextureCache::Texture* TextureCache::LookupAddress(uint32_t guest_address,
void TextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
VkFence completion_fence) {
auto status = vkEndCommandBuffer(command_buffer);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto status = dfn.vkEndCommandBuffer(command_buffer);
CheckResult(status, "vkEndCommandBuffer");
VkSubmitInfo submit_info;
@ -958,27 +969,27 @@ void TextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
if (device_queue_) {
auto status =
vkQueueSubmit(device_queue_, 1, &submit_info, completion_fence);
dfn.vkQueueSubmit(device_queue_, 1, &submit_info, completion_fence);
CheckResult(status, "vkQueueSubmit");
} else {
std::lock_guard<std::mutex> lock(device_->primary_queue_mutex());
auto status = vkQueueSubmit(device_->primary_queue(), 1, &submit_info,
completion_fence);
auto status = dfn.vkQueueSubmit(device_->primary_queue(), 1, &submit_info,
completion_fence);
CheckResult(status, "vkQueueSubmit");
}
vkWaitForFences(*device_, 1, &completion_fence, VK_TRUE, -1);
dfn.vkWaitForFences(*device_, 1, &completion_fence, VK_TRUE, -1);
staging_buffer_.Scavenge();
vkResetFences(*device_, 1, &completion_fence);
dfn.vkResetFences(*device_, 1, &completion_fence);
// Reset the command buffer and put it back into the recording state.
vkResetCommandBuffer(command_buffer, 0);
dfn.vkResetCommandBuffer(command_buffer, 0);
VkCommandBufferBeginInfo begin_info;
std::memset(&begin_info, 0, sizeof(begin_info));
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(command_buffer, &begin_info);
dfn.vkBeginCommandBuffer(command_buffer, &begin_info);
}
bool TextureCache::ConvertTexture(uint8_t* dest, VkBufferImageCopy* copy_region,
@ -1155,6 +1166,8 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
TextureDump(src, unpack_buffer, unpack_length);
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Transition the texture into a transfer destination layout.
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
@ -1181,9 +1194,9 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
barrier.subresourceRange.layerCount =
copy_regions[0].imageSubresource.layerCount;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
// Now move the converted texture into the destination.
if (dest->format == VK_FORMAT_D16_UNORM_S8_UINT ||
@ -1195,19 +1208,19 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
copy_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
}
vkCmdCopyBufferToImage(command_buffer, staging_buffer_.gpu_buffer(),
dest->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
copy_region_count, copy_regions.data());
dfn.vkCmdCopyBufferToImage(command_buffer, staging_buffer_.gpu_buffer(),
dest->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
copy_region_count, copy_regions.data());
// Now transition the texture into a shader readonly source.
barrier.srcAccessMask = barrier.dstAccessMask;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.oldLayout = barrier.newLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, nullptr, 0, nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, nullptr, 0, nullptr, 1, &barrier);
dest->image_layout = barrier.newLayout;
return true;
@ -1297,6 +1310,7 @@ uint32_t TextureCache::ComputeTextureStorage(const TextureInfo& src) {
}
void TextureCache::WritebackTexture(Texture* texture) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
VkFence fence = wb_command_pool_->BeginBatch();
auto alloc = wb_staging_buffer_.Acquire(texture->alloc_info.size, fence);
@ -1313,7 +1327,7 @@ void TextureCache::WritebackTexture(Texture* texture) {
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
nullptr,
};
vkBeginCommandBuffer(command_buffer, &begin_info);
dfn.vkBeginCommandBuffer(command_buffer, &begin_info);
// TODO: Transition the texture to a transfer source.
// TODO: copy depth/layers?
@ -1333,13 +1347,13 @@ void TextureCache::WritebackTexture(Texture* texture) {
region.imageExtent.height = texture->texture_info.height + 1;
region.imageExtent.depth = 1;
vkCmdCopyImageToBuffer(command_buffer, texture->image,
VK_IMAGE_LAYOUT_GENERAL,
wb_staging_buffer_.gpu_buffer(), 1, &region);
dfn.vkCmdCopyImageToBuffer(command_buffer, texture->image,
VK_IMAGE_LAYOUT_GENERAL,
wb_staging_buffer_.gpu_buffer(), 1, &region);
// TODO: Transition the texture back to a shader resource.
vkEndCommandBuffer(command_buffer);
dfn.vkEndCommandBuffer(command_buffer);
// Submit the command buffer.
// Submit commands and wait.
@ -1356,11 +1370,12 @@ void TextureCache::WritebackTexture(Texture* texture) {
0,
nullptr,
};
status = vkQueueSubmit(device_->primary_queue(), 1, &submit_info, fence);
status =
dfn.vkQueueSubmit(device_->primary_queue(), 1, &submit_info, fence);
CheckResult(status, "vkQueueSubmit");
if (status == VK_SUCCESS) {
status = vkQueueWaitIdle(device_->primary_queue());
status = dfn.vkQueueWaitIdle(device_->primary_queue());
CheckResult(status, "vkQueueWaitIdle");
}
}
@ -1453,8 +1468,9 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
// Update the descriptor set.
if (update_set_info->image_write_count > 0) {
vkUpdateDescriptorSets(*device_, update_set_info->image_write_count,
update_set_info->image_writes, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkUpdateDescriptorSets(*device_, update_set_info->image_write_count,
update_set_info->image_writes, 0, nullptr);
}
texture_sets_[hash] = descriptor_set;
@ -1613,8 +1629,9 @@ void TextureCache::ClearCache() {
textures_.clear();
COUNT_profile_set("gpu/texture_cache/textures", 0);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
for (auto it = samplers_.begin(); it != samplers_.end(); ++it) {
vkDestroySampler(*device_, it->second->sampler, nullptr);
dfn.vkDestroySampler(*device_, it->second->sampler, nullptr);
delete it->second;
}
samplers_.clear();

View File

@ -237,6 +237,8 @@ void VulkanCommandProcessor::WriteRegister(uint32_t index, uint32_t value) {
void VulkanCommandProcessor::CreateSwapImage(VkCommandBuffer setup_buffer,
VkExtent2D extents) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkImageCreateInfo image_info;
std::memset(&image_info, 0, sizeof(VkImageCreateInfo));
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
@ -255,16 +257,16 @@ void VulkanCommandProcessor::CreateSwapImage(VkCommandBuffer setup_buffer,
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage image_fb;
auto status = vkCreateImage(*device_, &image_info, nullptr, &image_fb);
auto status = dfn.vkCreateImage(*device_, &image_info, nullptr, &image_fb);
CheckResult(status, "vkCreateImage");
// Bind memory to image.
VkMemoryRequirements mem_requirements;
vkGetImageMemoryRequirements(*device_, image_fb, &mem_requirements);
dfn.vkGetImageMemoryRequirements(*device_, image_fb, &mem_requirements);
fb_memory_ = device_->AllocateMemory(mem_requirements, 0);
assert_not_null(fb_memory_);
status = vkBindImageMemory(*device_, image_fb, fb_memory_, 0);
status = dfn.vkBindImageMemory(*device_, image_fb, fb_memory_, 0);
CheckResult(status, "vkBindImageMemory");
std::lock_guard<std::mutex> lock(swap_state_.mutex);
@ -281,8 +283,8 @@ void VulkanCommandProcessor::CreateSwapImage(VkCommandBuffer setup_buffer,
VK_COMPONENT_SWIZZLE_A},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
status =
vkCreateImageView(*device_, &view_create_info, nullptr, &fb_image_view_);
status = dfn.vkCreateImageView(*device_, &view_create_info, nullptr,
&fb_image_view_);
CheckResult(status, "vkCreateImageView");
VkFramebufferCreateInfo framebuffer_create_info = {
@ -296,8 +298,8 @@ void VulkanCommandProcessor::CreateSwapImage(VkCommandBuffer setup_buffer,
extents.height,
1,
};
status = vkCreateFramebuffer(*device_, &framebuffer_create_info, nullptr,
&fb_framebuffer_);
status = dfn.vkCreateFramebuffer(*device_, &framebuffer_create_info, nullptr,
&fb_framebuffer_);
CheckResult(status, "vkCreateFramebuffer");
// Transition image to general layout.
@ -313,20 +315,22 @@ void VulkanCommandProcessor::CreateSwapImage(VkCommandBuffer setup_buffer,
barrier.image = image_fb;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vkCmdPipelineBarrier(setup_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(setup_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
}
void VulkanCommandProcessor::DestroySwapImage() {
vkDestroyFramebuffer(*device_, fb_framebuffer_, nullptr);
vkDestroyImageView(*device_, fb_image_view_, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkDestroyFramebuffer(*device_, fb_framebuffer_, nullptr);
dfn.vkDestroyImageView(*device_, fb_image_view_, nullptr);
std::lock_guard<std::mutex> lock(swap_state_.mutex);
vkDestroyImage(*device_,
reinterpret_cast<VkImage>(swap_state_.front_buffer_texture),
nullptr);
vkFreeMemory(*device_, fb_memory_, nullptr);
dfn.vkDestroyImage(
*device_, reinterpret_cast<VkImage>(swap_state_.front_buffer_texture),
nullptr);
dfn.vkFreeMemory(*device_, fb_memory_, nullptr);
swap_state_.front_buffer_texture = 0;
fb_memory_ = nullptr;
@ -345,17 +349,19 @@ void VulkanCommandProcessor::BeginFrame() {
current_command_buffer_ = command_buffer_pool_->AcquireEntry();
current_setup_buffer_ = command_buffer_pool_->AcquireEntry();
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkCommandBufferBeginInfo command_buffer_begin_info;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.pNext = nullptr;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
command_buffer_begin_info.pInheritanceInfo = nullptr;
auto status =
vkBeginCommandBuffer(current_command_buffer_, &command_buffer_begin_info);
auto status = dfn.vkBeginCommandBuffer(current_command_buffer_,
&command_buffer_begin_info);
CheckResult(status, "vkBeginCommandBuffer");
status =
vkBeginCommandBuffer(current_setup_buffer_, &command_buffer_begin_info);
status = dfn.vkBeginCommandBuffer(current_setup_buffer_,
&command_buffer_begin_info);
CheckResult(status, "vkBeginCommandBuffer");
// Flag renderdoc down to start a capture if requested.
@ -385,10 +391,11 @@ void VulkanCommandProcessor::EndFrame() {
current_render_state_ = nullptr;
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
status = vkEndCommandBuffer(current_setup_buffer_);
status = dfn.vkEndCommandBuffer(current_setup_buffer_);
CheckResult(status, "vkEndCommandBuffer");
status = vkEndCommandBuffer(current_command_buffer_);
status = dfn.vkEndCommandBuffer(current_command_buffer_);
CheckResult(status, "vkEndCommandBuffer");
current_command_buffer_ = nullptr;
@ -403,6 +410,8 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
uint32_t frontbuffer_height) {
SCOPE_profile_cpu_f("gpu");
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Build a final command buffer that copies the game's frontbuffer texture
// into our backbuffer texture.
VkCommandBuffer copy_commands = nullptr;
@ -420,7 +429,7 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
std::memset(&begin_info, 0, sizeof(begin_info));
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
auto status = vkBeginCommandBuffer(copy_commands, &begin_info);
auto status = dfn.vkBeginCommandBuffer(copy_commands, &begin_info);
CheckResult(status, "vkBeginCommandBuffer");
if (!frontbuffer_ptr) {
@ -463,20 +472,20 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
barrier.image = texture->image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vkCmdPipelineBarrier(copy_commands,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(copy_commands,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.image = swap_fb;
vkCmdPipelineBarrier(copy_commands, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(copy_commands, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0,
0, nullptr, 0, nullptr, 1, &barrier);
// Part of the source image that we want to blit from.
VkRect2D src_rect = {
@ -502,7 +511,7 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
std::swap(barrier.oldLayout, barrier.newLayout);
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(
dfn.vkCmdPipelineBarrier(
copy_commands, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
@ -511,7 +520,7 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
swap_state_.height = frontbuffer_height;
}
status = vkEndCommandBuffer(copy_commands);
status = dfn.vkEndCommandBuffer(copy_commands);
CheckResult(status, "vkEndCommandBuffer");
// Queue up current command buffers.
@ -549,7 +558,7 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
status = vkQueueSubmit(queue_, 1, &submit_info, current_batch_fence_);
status = dfn.vkQueueSubmit(queue_, 1, &submit_info, current_batch_fence_);
if (device_->is_renderdoc_attached() && capturing_) {
device_->EndRenderDocFrameCapture();
capturing_ = false;
@ -559,7 +568,7 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
}
}
vkWaitForFences(*device_, 1, &current_batch_fence_, VK_TRUE, -1);
dfn.vkWaitForFences(*device_, 1, &current_batch_fence_, VK_TRUE, -1);
if (cache_clear_requested_) {
cache_clear_requested_ = false;
@ -664,6 +673,8 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type,
}
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Configure the pipeline for drawing.
// This encodes all render state (blend, depth, etc), our shader stages,
// and our vertex input layout.
@ -675,8 +686,8 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type,
return false;
} else if (pipeline_status == PipelineCache::UpdateStatus::kMismatch ||
full_update) {
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
dfn.vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
}
pipeline_cache_->SetDynamicState(command_buffer, full_update);
@ -710,8 +721,8 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type,
uint32_t first_vertex =
register_file_->values[XE_GPU_REG_VGT_INDX_OFFSET].u32;
uint32_t first_instance = 0;
vkCmdDraw(command_buffer, index_count, instance_count, first_vertex,
first_instance);
dfn.vkCmdDraw(command_buffer, index_count, instance_count, first_vertex,
first_instance);
} else {
// Index buffer draw.
uint32_t instance_count = 1;
@ -719,8 +730,8 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type,
uint32_t vertex_offset =
register_file_->values[XE_GPU_REG_VGT_INDX_OFFSET].u32;
uint32_t first_instance = 0;
vkCmdDrawIndexed(command_buffer, index_count, instance_count, first_index,
vertex_offset, first_instance);
dfn.vkCmdDrawIndexed(command_buffer, index_count, instance_count,
first_index, vertex_offset, first_instance);
}
return true;
@ -754,7 +765,8 @@ bool VulkanCommandProcessor::PopulateConstants(VkCommandBuffer command_buffer,
uint32_t set_constant_offsets[2] = {
static_cast<uint32_t>(constant_offsets.first),
static_cast<uint32_t>(constant_offsets.second)};
vkCmdBindDescriptorSets(
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1,
&constant_descriptor_set,
static_cast<uint32_t>(xe::countof(set_constant_offsets)),
@ -806,8 +818,9 @@ bool VulkanCommandProcessor::PopulateIndexBuffer(
VkIndexType index_type = info.format == xenos::IndexFormat::kInt32
? VK_INDEX_TYPE_UINT32
: VK_INDEX_TYPE_UINT16;
vkCmdBindIndexBuffer(command_buffer, buffer_ref.first, buffer_ref.second,
index_type);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdBindIndexBuffer(command_buffer, buffer_ref.first, buffer_ref.second,
index_type);
return true;
}
@ -835,9 +848,10 @@ bool VulkanCommandProcessor::PopulateVertexBuffers(
return false;
}
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_cache_->pipeline_layout(), 2, 1,
&descriptor_set, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_cache_->pipeline_layout(), 2, 1,
&descriptor_set, 0, nullptr);
return true;
}
@ -859,9 +873,10 @@ bool VulkanCommandProcessor::PopulateSamplers(VkCommandBuffer command_buffer,
return false;
}
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_cache_->pipeline_layout(), 1, 1,
&descriptor_set, 0, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_cache_->pipeline_layout(), 1, 1,
&descriptor_set, 0, nullptr);
return true;
}
@ -1066,6 +1081,7 @@ bool VulkanCommandProcessor::IssueCopy() {
render_cache_->EndRenderPass();
current_render_state_ = nullptr;
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto command_buffer = current_command_buffer_;
if (texture->image_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
@ -1087,9 +1103,9 @@ bool VulkanCommandProcessor::IssueCopy() {
: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
texture->image_layout = VK_IMAGE_LAYOUT_GENERAL;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &image_barrier);
}
// Transition the image into a transfer destination layout, if needed.
@ -1113,9 +1129,9 @@ bool VulkanCommandProcessor::IssueCopy() {
is_color_source ? VK_IMAGE_ASPECT_COLOR_BIT
: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
// Ask the render cache to copy to the resolve texture.
auto edram_base = is_color_source ? color_edram_base : depth_edram_base;
@ -1176,10 +1192,11 @@ bool VulkanCommandProcessor::IssueCopy() {
is_color_source
? VK_IMAGE_ASPECT_COLOR_BIT
: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, nullptr, 0, nullptr, 1, &tile_image_barrier);
dfn.vkCmdPipelineBarrier(
command_buffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, nullptr, 0, nullptr, 1, &tile_image_barrier);
auto render_pass =
blitter_->GetRenderPass(texture->format, is_color_source);
@ -1200,8 +1217,8 @@ bool VulkanCommandProcessor::IssueCopy() {
1,
};
VkResult res = vkCreateFramebuffer(*device_, &fb_create_info, nullptr,
&texture->framebuffer);
VkResult res = dfn.vkCreateFramebuffer(*device_, &fb_create_info,
nullptr, &texture->framebuffer);
CheckResult(res, "vkCreateFramebuffer");
}
@ -1268,11 +1285,11 @@ bool VulkanCommandProcessor::IssueCopy() {
std::swap(tile_image_barrier.srcAccessMask,
tile_image_barrier.dstAccessMask);
std::swap(tile_image_barrier.oldLayout, tile_image_barrier.newLayout);
vkCmdPipelineBarrier(command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, nullptr, 0,
nullptr, 1, &tile_image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &tile_image_barrier);
} break;
case CopyCommand::kConstantOne:
@ -1286,14 +1303,14 @@ bool VulkanCommandProcessor::IssueCopy() {
image_barrier.dstAccessMask =
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT;
std::swap(image_barrier.newLayout, image_barrier.oldLayout);
vkCmdPipelineBarrier(command_buffer,
is_color_source
? VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
: VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, nullptr, 0, nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer,
is_color_source
? VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
: VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, nullptr, 0, nullptr, 1, &image_barrier);
// Perform any requested clears.
uint32_t copy_depth_clear = regs[XE_GPU_REG_RB_DEPTH_CLEAR].u32;

View File

@ -59,8 +59,9 @@ X_STATUS VulkanGraphicsSystem::Setup(cpu::Processor* processor,
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
device_->queue_family_index(),
};
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto status =
vkCreateCommandPool(*device_, &create_info, nullptr, &command_pool_);
dfn.vkCreateCommandPool(*device_, &create_info, nullptr, &command_pool_);
CheckResult(status, "vkCreateCommandPool");
return X_STATUS_SUCCESS;
@ -69,7 +70,11 @@ X_STATUS VulkanGraphicsSystem::Setup(cpu::Processor* processor,
void VulkanGraphicsSystem::Shutdown() {
GraphicsSystem::Shutdown();
vkDestroyCommandPool(*device_, command_pool_, nullptr);
if (command_pool_ != VK_NULL_HANDLE) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkDestroyCommandPool(*device_, command_pool_, nullptr);
command_pool_ = VK_NULL_HANDLE;
}
}
std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
@ -79,6 +84,7 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
return nullptr;
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
VkCommandBufferAllocateInfo alloc_info = {
@ -90,7 +96,7 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
};
VkCommandBuffer cmd = nullptr;
status = vkAllocateCommandBuffers(*device_, &alloc_info, &cmd);
status = dfn.vkAllocateCommandBuffers(*device_, &alloc_info, &cmd);
CheckResult(status, "vkAllocateCommandBuffers");
if (status != VK_SUCCESS) {
return nullptr;
@ -102,14 +108,14 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
nullptr,
};
vkBeginCommandBuffer(cmd, &begin_info);
dfn.vkBeginCommandBuffer(cmd, &begin_info);
auto front_buffer =
reinterpret_cast<VkImage>(swap_state.front_buffer_texture);
status = CreateCaptureBuffer(cmd, {swap_state.width, swap_state.height});
if (status != VK_SUCCESS) {
vkFreeCommandBuffers(*device_, command_pool_, 1, &cmd);
dfn.vkFreeCommandBuffers(*device_, command_pool_, 1, &cmd);
return nullptr;
}
@ -124,9 +130,9 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = front_buffer;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
// Copy front buffer into capture image.
VkBufferImageCopy region = {
@ -135,8 +141,8 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
{0, 0, 0}, {swap_state.width, swap_state.height, 1},
};
vkCmdCopyImageToBuffer(cmd, front_buffer, VK_IMAGE_LAYOUT_GENERAL,
capture_buffer_, 1, &region);
dfn.vkCmdCopyImageToBuffer(cmd, front_buffer, VK_IMAGE_LAYOUT_GENERAL,
capture_buffer_, 1, &region);
VkBufferMemoryBarrier memory_barrier = {
VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@ -149,11 +155,11 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
0,
VK_WHOLE_SIZE,
};
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, nullptr, 1,
&memory_barrier, 0, nullptr);
dfn.vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, nullptr,
1, &memory_barrier, 0, nullptr);
status = vkEndCommandBuffer(cmd);
status = dfn.vkEndCommandBuffer(cmd);
// Submit commands and wait.
if (status == VK_SUCCESS) {
@ -169,21 +175,22 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
0,
nullptr,
};
status = vkQueueSubmit(device_->primary_queue(), 1, &submit_info, nullptr);
status =
dfn.vkQueueSubmit(device_->primary_queue(), 1, &submit_info, nullptr);
CheckResult(status, "vkQueueSubmit");
if (status == VK_SUCCESS) {
status = vkQueueWaitIdle(device_->primary_queue());
status = dfn.vkQueueWaitIdle(device_->primary_queue());
CheckResult(status, "vkQueueWaitIdle");
}
}
vkFreeCommandBuffers(*device_, command_pool_, 1, &cmd);
dfn.vkFreeCommandBuffers(*device_, command_pool_, 1, &cmd);
void* data;
if (status == VK_SUCCESS) {
status = vkMapMemory(*device_, capture_buffer_memory_, 0, VK_WHOLE_SIZE, 0,
&data);
status = dfn.vkMapMemory(*device_, capture_buffer_memory_, 0, VK_WHOLE_SIZE,
0, &data);
CheckResult(status, "vkMapMemory");
}
@ -197,7 +204,7 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
std::memcpy(raw_image->data.data(), data,
raw_image->stride * raw_image->height);
vkUnmapMemory(*device_, capture_buffer_memory_);
dfn.vkUnmapMemory(*device_, capture_buffer_memory_);
DestroyCaptureBuffer();
return raw_image;
}
@ -208,6 +215,7 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
VkResult VulkanGraphicsSystem::CreateCaptureBuffer(VkCommandBuffer cmd,
VkExtent2D extents) {
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
VkBufferCreateInfo buffer_info = {
@ -220,7 +228,8 @@ VkResult VulkanGraphicsSystem::CreateCaptureBuffer(VkCommandBuffer cmd,
0,
nullptr,
};
status = vkCreateBuffer(*device_, &buffer_info, nullptr, &capture_buffer_);
status =
dfn.vkCreateBuffer(*device_, &buffer_info, nullptr, &capture_buffer_);
if (status != VK_SUCCESS) {
return status;
}
@ -229,17 +238,18 @@ VkResult VulkanGraphicsSystem::CreateCaptureBuffer(VkCommandBuffer cmd,
// Bind memory to buffer.
VkMemoryRequirements mem_requirements;
vkGetBufferMemoryRequirements(*device_, capture_buffer_, &mem_requirements);
dfn.vkGetBufferMemoryRequirements(*device_, capture_buffer_,
&mem_requirements);
capture_buffer_memory_ = device_->AllocateMemory(
mem_requirements, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
assert_not_null(capture_buffer_memory_);
status =
vkBindBufferMemory(*device_, capture_buffer_, capture_buffer_memory_, 0);
status = dfn.vkBindBufferMemory(*device_, capture_buffer_,
capture_buffer_memory_, 0);
CheckResult(status, "vkBindImageMemory");
if (status != VK_SUCCESS) {
vkDestroyBuffer(*device_, capture_buffer_, nullptr);
dfn.vkDestroyBuffer(*device_, capture_buffer_, nullptr);
return status;
}
@ -247,8 +257,9 @@ VkResult VulkanGraphicsSystem::CreateCaptureBuffer(VkCommandBuffer cmd,
}
void VulkanGraphicsSystem::DestroyCaptureBuffer() {
vkDestroyBuffer(*device_, capture_buffer_, nullptr);
vkFreeMemory(*device_, capture_buffer_memory_, nullptr);
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkDestroyBuffer(*device_, capture_buffer_, nullptr);
dfn.vkFreeMemory(*device_, capture_buffer_memory_, nullptr);
capture_buffer_ = nullptr;
capture_buffer_memory_ = nullptr;
capture_buffer_size_ = 0;
@ -286,6 +297,7 @@ void VulkanGraphicsSystem::Swap(xe::ui::UIEvent* e) {
return;
}
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device_->dfn();
auto swap_chain = display_context_->swap_chain();
auto copy_cmd_buffer = swap_chain->copy_cmd_buffer();
auto front_buffer =
@ -302,9 +314,9 @@ void VulkanGraphicsSystem::Swap(xe::ui::UIEvent* e) {
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = front_buffer;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vkCmdPipelineBarrier(copy_cmd_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
dfn.vkCmdPipelineBarrier(copy_cmd_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
VkImageBlit region;
region.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
@ -317,10 +329,10 @@ void VulkanGraphicsSystem::Swap(xe::ui::UIEvent* e) {
region.dstOffsets[1] = {static_cast<int32_t>(swap_chain->surface_width()),
static_cast<int32_t>(swap_chain->surface_height()),
1};
vkCmdBlitImage(copy_cmd_buffer, front_buffer, VK_IMAGE_LAYOUT_GENERAL,
swap_chain->surface_image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region,
VK_FILTER_LINEAR);
dfn.vkCmdBlitImage(copy_cmd_buffer, front_buffer, VK_IMAGE_LAYOUT_GENERAL,
swap_chain->surface_image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region,
VK_FILTER_LINEAR);
}
} // namespace vulkan

View File

@ -30,7 +30,9 @@ VulkanShader::VulkanShader(ui::vulkan::VulkanDevice* device,
VulkanShader::VulkanTranslation::~VulkanTranslation() {
if (shader_module_) {
const VulkanShader& vulkan_shader = static_cast<VulkanShader&>(shader());
vkDestroyShaderModule(*vulkan_shader.device_, shader_module_, nullptr);
const ui::vulkan::VulkanDevice* device = vulkan_shader.device_;
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device->dfn();
dfn.vkDestroyShaderModule(*device, shader_module_, nullptr);
shader_module_ = nullptr;
}
}
@ -40,7 +42,8 @@ bool VulkanShader::VulkanTranslation::Prepare() {
assert_true(is_valid());
const VulkanShader& vulkan_shader = static_cast<VulkanShader&>(shader());
ui::vulkan::VulkanDevice* device = vulkan_shader.device_;
const ui::vulkan::VulkanDevice* device = vulkan_shader.device_;
const ui::vulkan::VulkanDevice::DeviceFunctions& dfn = device->dfn();
// Create the shader module.
VkShaderModuleCreateInfo shader_info;
@ -51,7 +54,7 @@ bool VulkanShader::VulkanTranslation::Prepare() {
shader_info.pCode =
reinterpret_cast<const uint32_t*>(translated_binary().data());
auto status =
vkCreateShaderModule(*device, &shader_info, nullptr, &shader_module_);
dfn.vkCreateShaderModule(*device, &shader_info, nullptr, &shader_module_);
CheckResult(status, "vkCreateShaderModule");
char type_char;

View File

@ -22,7 +22,6 @@ project("xenia-hid-demo")
links({
"fmt",
"imgui",
"volk",
"xenia-base",
"xenia-helper-sdl",
"xenia-hid",

View File

@ -26,6 +26,7 @@ Blitter::~Blitter() { Shutdown(); }
VkResult Blitter::Initialize(VulkanDevice* device) {
device_ = device;
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Shaders
@ -34,8 +35,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
shader_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
shader_create_info.codeSize = sizeof(blit_vert);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_vert);
status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_vertex_);
status = dfn.vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_vertex_);
CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
@ -46,8 +47,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
shader_create_info.codeSize = sizeof(blit_color_frag);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_color_frag);
status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_color_);
status = dfn.vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_color_);
CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
@ -58,8 +59,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
shader_create_info.codeSize = sizeof(blit_depth_frag);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_depth_frag);
status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_depth_);
status = dfn.vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_depth_);
CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
@ -83,8 +84,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
texture_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
texture_binding.pImmutableSamplers = nullptr;
texture_set_layout_info.pBindings = &texture_binding;
status = vkCreateDescriptorSetLayout(*device_, &texture_set_layout_info,
nullptr, &descriptor_set_layout_);
status = dfn.vkCreateDescriptorSetLayout(*device_, &texture_set_layout_info,
nullptr, &descriptor_set_layout_);
CheckResult(status, "vkCreateDescriptorSetLayout");
if (status != VK_SUCCESS) {
return status;
@ -119,8 +120,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
pipeline_layout_info.pushConstantRangeCount =
static_cast<uint32_t>(xe::countof(push_constant_ranges));
pipeline_layout_info.pPushConstantRanges = push_constant_ranges;
status = vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr,
&pipeline_layout_);
status = dfn.vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr,
&pipeline_layout_);
CheckResult(status, "vkCreatePipelineLayout");
if (status != VK_SUCCESS) {
return status;
@ -147,8 +148,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,
VK_FALSE,
};
status =
vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_nearest_);
status = dfn.vkCreateSampler(*device_, &sampler_create_info, nullptr,
&samp_nearest_);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -157,8 +158,8 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
sampler_create_info.minFilter = VK_FILTER_LINEAR;
sampler_create_info.magFilter = VK_FILTER_LINEAR;
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
status =
vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_linear_);
status = dfn.vkCreateSampler(*device_, &sampler_create_info, nullptr,
&samp_linear_);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -168,49 +169,50 @@ VkResult Blitter::Initialize(VulkanDevice* device) {
}
void Blitter::Shutdown() {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (samp_nearest_) {
vkDestroySampler(*device_, samp_nearest_, nullptr);
dfn.vkDestroySampler(*device_, samp_nearest_, nullptr);
samp_nearest_ = nullptr;
}
if (samp_linear_) {
vkDestroySampler(*device_, samp_linear_, nullptr);
dfn.vkDestroySampler(*device_, samp_linear_, nullptr);
samp_linear_ = nullptr;
}
if (blit_vertex_) {
vkDestroyShaderModule(*device_, blit_vertex_, nullptr);
dfn.vkDestroyShaderModule(*device_, blit_vertex_, nullptr);
blit_vertex_ = nullptr;
}
if (blit_color_) {
vkDestroyShaderModule(*device_, blit_color_, nullptr);
dfn.vkDestroyShaderModule(*device_, blit_color_, nullptr);
blit_color_ = nullptr;
}
if (blit_depth_) {
vkDestroyShaderModule(*device_, blit_depth_, nullptr);
dfn.vkDestroyShaderModule(*device_, blit_depth_, nullptr);
blit_depth_ = nullptr;
}
if (pipeline_color_) {
vkDestroyPipeline(*device_, pipeline_color_, nullptr);
dfn.vkDestroyPipeline(*device_, pipeline_color_, nullptr);
pipeline_color_ = nullptr;
}
if (pipeline_depth_) {
vkDestroyPipeline(*device_, pipeline_depth_, nullptr);
dfn.vkDestroyPipeline(*device_, pipeline_depth_, nullptr);
pipeline_depth_ = nullptr;
}
if (pipeline_layout_) {
vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr);
dfn.vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr);
pipeline_layout_ = nullptr;
}
if (descriptor_set_layout_) {
vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr);
dfn.vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr);
descriptor_set_layout_ = nullptr;
}
for (auto& pipeline : pipelines_) {
vkDestroyPipeline(*device_, pipeline.second, nullptr);
dfn.vkDestroyPipeline(*device_, pipeline.second, nullptr);
}
pipelines_.clear();
for (auto& pass : render_passes_) {
vkDestroyRenderPass(*device_, pass.second, nullptr);
dfn.vkDestroyRenderPass(*device_, pass.second, nullptr);
}
render_passes_.clear();
}
@ -230,6 +232,8 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
VkFramebuffer dst_framebuffer, VkViewport viewport,
VkRect2D scissor, VkFilter filter,
bool color_or_depth, bool swap_channels) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Do we need a full draw, or can we cheap out with a blit command?
bool full_draw = swap_channels || true;
if (full_draw) {
@ -249,18 +253,18 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
nullptr,
};
vkCmdBeginRenderPass(command_buffer, &render_pass_info,
VK_SUBPASS_CONTENTS_INLINE);
dfn.vkCmdBeginRenderPass(command_buffer, &render_pass_info,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdSetViewport(command_buffer, 0, 1, &viewport);
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
dfn.vkCmdSetViewport(command_buffer, 0, 1, &viewport);
dfn.vkCmdSetScissor(command_buffer, 0, 1, &scissor);
// Acquire a pipeline.
auto pipeline =
GetPipeline(render_pass, color_or_depth ? blit_color_ : blit_depth_,
color_or_depth);
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
dfn.vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
// Acquire and update a descriptor set for this image.
auto set = descriptor_pool_->AcquireEntry(descriptor_set_layout_);
@ -287,10 +291,10 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
write.pImageInfo = &image;
write.pBufferInfo = nullptr;
write.pTexelBufferView = nullptr;
vkUpdateDescriptorSets(*device_, 1, &write, 0, nullptr);
dfn.vkUpdateDescriptorSets(*device_, 1, &write, 0, nullptr);
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_layout_, 0, 1, &set, 0, nullptr);
dfn.vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_layout_, 0, 1, &set, 0, nullptr);
VtxPushConstants vtx_constants = {
{
@ -306,9 +310,9 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
float(dst_rect.extent.height) / dst_extents.height,
},
};
vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(VtxPushConstants),
&vtx_constants);
dfn.vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT, 0,
sizeof(VtxPushConstants), &vtx_constants);
PixPushConstants pix_constants = {
0,
@ -316,12 +320,12 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
0,
swap_channels ? 1 : 0,
};
vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(VtxPushConstants),
sizeof(PixPushConstants), &pix_constants);
dfn.vkCmdPushConstants(
command_buffer, pipeline_layout_, VK_SHADER_STAGE_FRAGMENT_BIT,
sizeof(VtxPushConstants), sizeof(PixPushConstants), &pix_constants);
vkCmdDraw(command_buffer, 4, 1, 0, 0);
vkCmdEndRenderPass(command_buffer);
dfn.vkCmdDraw(command_buffer, 4, 1, 0, 0);
dfn.vkCmdEndRenderPass(command_buffer);
}
}
@ -421,8 +425,9 @@ VkRenderPass Blitter::CreateRenderPass(VkFormat output_format,
nullptr,
};
VkRenderPass renderpass = nullptr;
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult result =
vkCreateRenderPass(*device_, &renderpass_info, nullptr, &renderpass);
dfn.vkCreateRenderPass(*device_, &renderpass_info, nullptr, &renderpass);
CheckResult(result, "vkCreateRenderPass");
return renderpass;
@ -431,6 +436,7 @@ VkRenderPass Blitter::CreateRenderPass(VkFormat output_format,
VkPipeline Blitter::CreatePipeline(VkRenderPass render_pass,
VkShaderModule frag_shader,
bool color_or_depth) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult result = VK_SUCCESS;
// Pipeline
@ -576,8 +582,8 @@ VkPipeline Blitter::CreatePipeline(VkRenderPass render_pass,
pipeline_info.basePipelineIndex = -1;
VkPipeline pipeline = nullptr;
result = vkCreateGraphicsPipelines(*device_, nullptr, 1, &pipeline_info,
nullptr, &pipeline);
result = dfn.vkCreateGraphicsPipelines(*device_, nullptr, 1, &pipeline_info,
nullptr, &pipeline);
CheckResult(result, "vkCreateGraphicsPipelines");
return pipeline;

View File

@ -22,6 +22,7 @@ namespace vulkan {
CircularBuffer::CircularBuffer(VulkanDevice* device, VkBufferUsageFlags usage,
VkDeviceSize capacity, VkDeviceSize alignment)
: device_(device), capacity_(capacity) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Create our internal buffer.
@ -34,14 +35,14 @@ CircularBuffer::CircularBuffer(VulkanDevice* device, VkBufferUsageFlags usage,
buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_info.queueFamilyIndexCount = 0;
buffer_info.pQueueFamilyIndices = nullptr;
status = vkCreateBuffer(*device_, &buffer_info, nullptr, &gpu_buffer_);
status = dfn.vkCreateBuffer(*device_, &buffer_info, nullptr, &gpu_buffer_);
CheckResult(status, "vkCreateBuffer");
if (status != VK_SUCCESS) {
assert_always();
}
VkMemoryRequirements reqs;
vkGetBufferMemoryRequirements(*device_, gpu_buffer_, &reqs);
dfn.vkGetBufferMemoryRequirements(*device_, gpu_buffer_, &reqs);
alignment_ = xe::round_up(alignment, reqs.alignment);
}
CircularBuffer::~CircularBuffer() { Shutdown(); }
@ -52,10 +53,12 @@ VkResult CircularBuffer::Initialize(VkDeviceMemory memory,
gpu_memory_ = memory;
gpu_base_ = offset;
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
// Bind the buffer to its backing memory.
status = vkBindBufferMemory(*device_, gpu_buffer_, gpu_memory_, gpu_base_);
status =
dfn.vkBindBufferMemory(*device_, gpu_buffer_, gpu_memory_, gpu_base_);
CheckResult(status, "vkBindBufferMemory");
if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to bind memory!");
@ -64,8 +67,8 @@ VkResult CircularBuffer::Initialize(VkDeviceMemory memory,
}
// Map the memory so we can access it.
status = vkMapMemory(*device_, gpu_memory_, gpu_base_, capacity_, 0,
reinterpret_cast<void**>(&host_base_));
status = dfn.vkMapMemory(*device_, gpu_memory_, gpu_base_, capacity_, 0,
reinterpret_cast<void**>(&host_base_));
CheckResult(status, "vkMapMemory");
if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to map memory!");
@ -77,10 +80,11 @@ VkResult CircularBuffer::Initialize(VkDeviceMemory memory,
}
VkResult CircularBuffer::Initialize() {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status = VK_SUCCESS;
VkMemoryRequirements reqs;
vkGetBufferMemoryRequirements(*device_, gpu_buffer_, &reqs);
dfn.vkGetBufferMemoryRequirements(*device_, gpu_buffer_, &reqs);
// Allocate memory from the device to back the buffer.
owns_gpu_memory_ = true;
@ -95,7 +99,8 @@ VkResult CircularBuffer::Initialize() {
gpu_base_ = 0;
// Bind the buffer to its backing memory.
status = vkBindBufferMemory(*device_, gpu_buffer_, gpu_memory_, gpu_base_);
status =
dfn.vkBindBufferMemory(*device_, gpu_buffer_, gpu_memory_, gpu_base_);
CheckResult(status, "vkBindBufferMemory");
if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to bind memory!");
@ -104,8 +109,8 @@ VkResult CircularBuffer::Initialize() {
}
// Map the memory so we can access it.
status = vkMapMemory(*device_, gpu_memory_, gpu_base_, capacity_, 0,
reinterpret_cast<void**>(&host_base_));
status = dfn.vkMapMemory(*device_, gpu_memory_, gpu_base_, capacity_, 0,
reinterpret_cast<void**>(&host_base_));
CheckResult(status, "vkMapMemory");
if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to map memory!");
@ -118,22 +123,24 @@ VkResult CircularBuffer::Initialize() {
void CircularBuffer::Shutdown() {
Clear();
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (host_base_) {
vkUnmapMemory(*device_, gpu_memory_);
dfn.vkUnmapMemory(*device_, gpu_memory_);
host_base_ = nullptr;
}
if (gpu_buffer_) {
vkDestroyBuffer(*device_, gpu_buffer_, nullptr);
dfn.vkDestroyBuffer(*device_, gpu_buffer_, nullptr);
gpu_buffer_ = nullptr;
}
if (gpu_memory_ && owns_gpu_memory_) {
vkFreeMemory(*device_, gpu_memory_, nullptr);
dfn.vkFreeMemory(*device_, gpu_memory_, nullptr);
gpu_memory_ = nullptr;
}
}
void CircularBuffer::GetBufferMemoryRequirements(VkMemoryRequirements* reqs) {
vkGetBufferMemoryRequirements(*device_, gpu_buffer_, reqs);
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
dfn.vkGetBufferMemoryRequirements(*device_, gpu_buffer_, reqs);
}
bool CircularBuffer::CanAcquire(VkDeviceSize length) {
@ -224,23 +231,25 @@ CircularBuffer::Allocation* CircularBuffer::Acquire(VkDeviceSize length,
}
void CircularBuffer::Flush(Allocation* allocation) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkMappedMemoryRange range;
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = nullptr;
range.memory = gpu_memory_;
range.offset = gpu_base_ + allocation->offset;
range.size = allocation->length;
vkFlushMappedMemoryRanges(*device_, 1, &range);
dfn.vkFlushMappedMemoryRanges(*device_, 1, &range);
}
void CircularBuffer::Flush(VkDeviceSize offset, VkDeviceSize length) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkMappedMemoryRange range;
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = nullptr;
range.memory = gpu_memory_;
range.offset = gpu_base_ + offset;
range.size = length;
vkFlushMappedMemoryRanges(*device_, 1, &range);
dfn.vkFlushMappedMemoryRanges(*device_, 1, &range);
}
void CircularBuffer::Clear() {
@ -249,12 +258,14 @@ void CircularBuffer::Clear() {
}
void CircularBuffer::Scavenge() {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Stash the last signalled fence
VkFence fence = nullptr;
while (!allocations_.empty()) {
Allocation& alloc = allocations_.front();
if (fence != alloc.fence &&
vkGetFenceStatus(*device_, alloc.fence) != VK_SUCCESS) {
dfn.vkGetFenceStatus(*device_, alloc.fence) != VK_SUCCESS) {
// Don't bother freeing following allocations to ensure proper ordering.
break;
}

View File

@ -19,9 +19,11 @@ namespace vulkan {
using xe::ui::vulkan::CheckResult;
CommandBufferPool::CommandBufferPool(VkDevice device,
CommandBufferPool::CommandBufferPool(const VulkanDevice& device,
uint32_t queue_family_index)
: BaseFencedPool(device) {
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
// Create the pool used for allocating buffers.
// They are marked as transient (short-lived) and cycled frequently.
VkCommandPoolCreateInfo cmd_pool_info;
@ -31,7 +33,7 @@ CommandBufferPool::CommandBufferPool(VkDevice device,
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
cmd_pool_info.queueFamilyIndex = queue_family_index;
auto err =
vkCreateCommandPool(device_, &cmd_pool_info, nullptr, &command_pool_);
dfn.vkCreateCommandPool(device_, &cmd_pool_info, nullptr, &command_pool_);
CheckResult(err, "vkCreateCommandPool");
// Allocate a bunch of command buffers to start.
@ -43,8 +45,8 @@ CommandBufferPool::CommandBufferPool(VkDevice device,
command_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
command_buffer_info.commandBufferCount = kDefaultCount;
VkCommandBuffer command_buffers[kDefaultCount];
err =
vkAllocateCommandBuffers(device_, &command_buffer_info, command_buffers);
err = dfn.vkAllocateCommandBuffers(device_, &command_buffer_info,
command_buffers);
CheckResult(err, "vkCreateCommandBuffer");
for (size_t i = 0; i < xe::countof(command_buffers); ++i) {
PushEntry(command_buffers[i], nullptr);
@ -53,7 +55,8 @@ CommandBufferPool::CommandBufferPool(VkDevice device,
CommandBufferPool::~CommandBufferPool() {
FreeAllEntries();
vkDestroyCommandPool(device_, command_pool_, nullptr);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkDestroyCommandPool(device_, command_pool_, nullptr);
command_pool_ = nullptr;
}
@ -67,17 +70,19 @@ VkCommandBuffer CommandBufferPool::AllocateEntry(void* data) {
VkCommandBufferLevel(reinterpret_cast<uintptr_t>(data));
command_buffer_info.commandBufferCount = 1;
VkCommandBuffer command_buffer;
auto err =
vkAllocateCommandBuffers(device_, &command_buffer_info, &command_buffer);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
auto err = dfn.vkAllocateCommandBuffers(device_, &command_buffer_info,
&command_buffer);
CheckResult(err, "vkCreateCommandBuffer");
return command_buffer;
}
void CommandBufferPool::FreeEntry(VkCommandBuffer handle) {
vkFreeCommandBuffers(device_, command_pool_, 1, &handle);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkFreeCommandBuffers(device_, command_pool_, 1, &handle);
}
DescriptorPool::DescriptorPool(VkDevice device, uint32_t max_count,
DescriptorPool::DescriptorPool(const VulkanDevice& device, uint32_t max_count,
std::vector<VkDescriptorPoolSize> pool_sizes)
: BaseFencedPool(device) {
VkDescriptorPoolCreateInfo descriptor_pool_info;
@ -88,13 +93,15 @@ DescriptorPool::DescriptorPool(VkDevice device, uint32_t max_count,
descriptor_pool_info.maxSets = max_count;
descriptor_pool_info.poolSizeCount = uint32_t(pool_sizes.size());
descriptor_pool_info.pPoolSizes = pool_sizes.data();
auto err = vkCreateDescriptorPool(device, &descriptor_pool_info, nullptr,
&descriptor_pool_);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
auto err = dfn.vkCreateDescriptorPool(device, &descriptor_pool_info, nullptr,
&descriptor_pool_);
CheckResult(err, "vkCreateDescriptorPool");
}
DescriptorPool::~DescriptorPool() {
FreeAllEntries();
vkDestroyDescriptorPool(device_, descriptor_pool_, nullptr);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkDestroyDescriptorPool(device_, descriptor_pool_, nullptr);
descriptor_pool_ = nullptr;
}
@ -108,15 +115,17 @@ VkDescriptorSet DescriptorPool::AllocateEntry(void* data) {
set_alloc_info.descriptorPool = descriptor_pool_;
set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &layout;
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
auto err =
vkAllocateDescriptorSets(device_, &set_alloc_info, &descriptor_set);
dfn.vkAllocateDescriptorSets(device_, &set_alloc_info, &descriptor_set);
CheckResult(err, "vkAllocateDescriptorSets");
return descriptor_set;
}
void DescriptorPool::FreeEntry(VkDescriptorSet handle) {
vkFreeDescriptorSets(device_, descriptor_pool_, 1, &handle);
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
dfn.vkFreeDescriptorSets(device_, descriptor_pool_, 1, &handle);
}
} // namespace vulkan

View File

@ -14,6 +14,7 @@
#include "xenia/base/assert.h"
#include "xenia/ui/vulkan/vulkan.h"
#include "xenia/ui/vulkan/vulkan_device.h"
#include "xenia/ui/vulkan/vulkan_util.h"
namespace xe {
@ -28,7 +29,7 @@ namespace vulkan {
template <typename T, typename HANDLE>
class BaseFencedPool {
public:
BaseFencedPool(VkDevice device) : device_(device) {}
BaseFencedPool(const VulkanDevice& device) : device_(device) {}
virtual ~BaseFencedPool() {
// TODO(benvanik): wait on fence until done.
@ -47,11 +48,12 @@ class BaseFencedPool {
// Checks all pending batches for completion and scavenges their entries.
// This should be called as frequently as reasonable.
void Scavenge() {
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
while (pending_batch_list_head_) {
auto batch = pending_batch_list_head_;
assert_not_null(batch->fence);
VkResult status = vkGetFenceStatus(device_, batch->fence);
VkResult status = dfn.vkGetFenceStatus(device_, batch->fence);
if (status == VK_SUCCESS || status == VK_ERROR_DEVICE_LOST) {
// Batch has completed. Reclaim.
pending_batch_list_head_ = batch->next;
@ -80,6 +82,7 @@ class BaseFencedPool {
VkFence BeginBatch(VkFence fence = nullptr) {
assert_null(open_batch_);
Batch* batch = nullptr;
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
if (free_batch_list_head_) {
// Reuse a batch.
batch = free_batch_list_head_;
@ -88,10 +91,10 @@ class BaseFencedPool {
if (batch->flags & kBatchOwnsFence && !fence) {
// Reset owned fence.
vkResetFences(device_, 1, &batch->fence);
dfn.vkResetFences(device_, 1, &batch->fence);
} else if ((batch->flags & kBatchOwnsFence) && fence) {
// Transfer owned -> external
vkDestroyFence(device_, batch->fence, nullptr);
dfn.vkDestroyFence(device_, batch->fence, nullptr);
batch->fence = fence;
batch->flags &= ~kBatchOwnsFence;
} else if (!(batch->flags & kBatchOwnsFence) && !fence) {
@ -100,7 +103,8 @@ class BaseFencedPool {
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
info.pNext = nullptr;
info.flags = 0;
VkResult res = vkCreateFence(device_, &info, nullptr, &batch->fence);
VkResult res =
dfn.vkCreateFence(device_, &info, nullptr, &batch->fence);
if (res != VK_SUCCESS) {
assert_always();
}
@ -121,7 +125,8 @@ class BaseFencedPool {
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
info.pNext = nullptr;
info.flags = 0;
VkResult res = vkCreateFence(device_, &info, nullptr, &batch->fence);
VkResult res =
dfn.vkCreateFence(device_, &info, nullptr, &batch->fence);
if (res != VK_SUCCESS) {
assert_always();
}
@ -239,13 +244,14 @@ class BaseFencedPool {
}
void FreeAllEntries() {
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
// Run down free lists.
while (free_batch_list_head_) {
auto batch = free_batch_list_head_;
free_batch_list_head_ = batch->next;
if (batch->flags & kBatchOwnsFence) {
vkDestroyFence(device_, batch->fence, nullptr);
dfn.vkDestroyFence(device_, batch->fence, nullptr);
batch->fence = nullptr;
}
delete batch;
@ -258,7 +264,7 @@ class BaseFencedPool {
}
}
VkDevice device_ = nullptr;
const VulkanDevice& device_;
private:
struct Entry {
@ -288,7 +294,7 @@ class CommandBufferPool
public:
typedef BaseFencedPool<CommandBufferPool, VkCommandBuffer> Base;
CommandBufferPool(VkDevice device, uint32_t queue_family_index);
CommandBufferPool(const VulkanDevice& device, uint32_t queue_family_index);
~CommandBufferPool() override;
VkCommandBuffer AcquireEntry(
@ -308,7 +314,7 @@ class DescriptorPool : public BaseFencedPool<DescriptorPool, VkDescriptorSet> {
public:
typedef BaseFencedPool<DescriptorPool, VkDescriptorSet> Base;
DescriptorPool(VkDevice device, uint32_t max_count,
DescriptorPool(const VulkanDevice& device, uint32_t max_count,
std::vector<VkDescriptorPoolSize> pool_sizes);
~DescriptorPool() override;

View File

@ -0,0 +1,83 @@
// Vulkan 1.0 core device functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkAllocateCommandBuffers)
XE_UI_VULKAN_FUNCTION(vkAllocateDescriptorSets)
XE_UI_VULKAN_FUNCTION(vkAllocateMemory)
XE_UI_VULKAN_FUNCTION(vkBeginCommandBuffer)
XE_UI_VULKAN_FUNCTION(vkCmdBeginRenderPass)
XE_UI_VULKAN_FUNCTION(vkCmdBindDescriptorSets)
XE_UI_VULKAN_FUNCTION(vkBindBufferMemory)
XE_UI_VULKAN_FUNCTION(vkBindImageMemory)
XE_UI_VULKAN_FUNCTION(vkCmdBindIndexBuffer)
XE_UI_VULKAN_FUNCTION(vkCmdBindPipeline)
XE_UI_VULKAN_FUNCTION(vkCmdBindVertexBuffers)
XE_UI_VULKAN_FUNCTION(vkCmdBlitImage)
XE_UI_VULKAN_FUNCTION(vkCmdClearColorImage)
XE_UI_VULKAN_FUNCTION(vkCmdClearDepthStencilImage)
XE_UI_VULKAN_FUNCTION(vkCmdCopyBufferToImage)
XE_UI_VULKAN_FUNCTION(vkCmdCopyImageToBuffer)
XE_UI_VULKAN_FUNCTION(vkCmdDraw)
XE_UI_VULKAN_FUNCTION(vkCmdDrawIndexed)
XE_UI_VULKAN_FUNCTION(vkCmdEndRenderPass)
XE_UI_VULKAN_FUNCTION(vkCmdExecuteCommands)
XE_UI_VULKAN_FUNCTION(vkCmdFillBuffer)
XE_UI_VULKAN_FUNCTION(vkCmdPipelineBarrier)
XE_UI_VULKAN_FUNCTION(vkCmdPushConstants)
XE_UI_VULKAN_FUNCTION(vkCmdResolveImage)
XE_UI_VULKAN_FUNCTION(vkCmdSetBlendConstants)
XE_UI_VULKAN_FUNCTION(vkCmdSetDepthBias)
XE_UI_VULKAN_FUNCTION(vkCmdSetDepthBounds)
XE_UI_VULKAN_FUNCTION(vkCmdSetLineWidth)
XE_UI_VULKAN_FUNCTION(vkCmdSetScissor)
XE_UI_VULKAN_FUNCTION(vkCmdSetStencilCompareMask)
XE_UI_VULKAN_FUNCTION(vkCmdSetStencilReference)
XE_UI_VULKAN_FUNCTION(vkCmdSetStencilWriteMask)
XE_UI_VULKAN_FUNCTION(vkCmdSetViewport)
XE_UI_VULKAN_FUNCTION(vkCreateBuffer)
XE_UI_VULKAN_FUNCTION(vkCreateCommandPool)
XE_UI_VULKAN_FUNCTION(vkCreateDescriptorPool)
XE_UI_VULKAN_FUNCTION(vkCreateDescriptorSetLayout)
XE_UI_VULKAN_FUNCTION(vkCreateFence)
XE_UI_VULKAN_FUNCTION(vkCreateFramebuffer)
XE_UI_VULKAN_FUNCTION(vkCreateGraphicsPipelines)
XE_UI_VULKAN_FUNCTION(vkCreateImage)
XE_UI_VULKAN_FUNCTION(vkCreateImageView)
XE_UI_VULKAN_FUNCTION(vkCreatePipelineCache)
XE_UI_VULKAN_FUNCTION(vkCreatePipelineLayout)
XE_UI_VULKAN_FUNCTION(vkCreateRenderPass)
XE_UI_VULKAN_FUNCTION(vkCreateSampler)
XE_UI_VULKAN_FUNCTION(vkCreateSemaphore)
XE_UI_VULKAN_FUNCTION(vkCreateShaderModule)
XE_UI_VULKAN_FUNCTION(vkDestroyBuffer)
XE_UI_VULKAN_FUNCTION(vkDestroyCommandPool)
XE_UI_VULKAN_FUNCTION(vkDestroyDescriptorPool)
XE_UI_VULKAN_FUNCTION(vkDestroyDescriptorSetLayout)
XE_UI_VULKAN_FUNCTION(vkDestroyFence)
XE_UI_VULKAN_FUNCTION(vkDestroyFramebuffer)
XE_UI_VULKAN_FUNCTION(vkDestroyImage)
XE_UI_VULKAN_FUNCTION(vkDestroyImageView)
XE_UI_VULKAN_FUNCTION(vkDestroyPipeline)
XE_UI_VULKAN_FUNCTION(vkDestroyPipelineCache)
XE_UI_VULKAN_FUNCTION(vkDestroyPipelineLayout)
XE_UI_VULKAN_FUNCTION(vkDestroyRenderPass)
XE_UI_VULKAN_FUNCTION(vkDestroySampler)
XE_UI_VULKAN_FUNCTION(vkDestroySemaphore)
XE_UI_VULKAN_FUNCTION(vkDestroyShaderModule)
XE_UI_VULKAN_FUNCTION(vkEndCommandBuffer)
XE_UI_VULKAN_FUNCTION(vkFlushMappedMemoryRanges)
XE_UI_VULKAN_FUNCTION(vkFreeCommandBuffers)
XE_UI_VULKAN_FUNCTION(vkFreeDescriptorSets)
XE_UI_VULKAN_FUNCTION(vkFreeMemory)
XE_UI_VULKAN_FUNCTION(vkGetDeviceQueue)
XE_UI_VULKAN_FUNCTION(vkGetBufferMemoryRequirements)
XE_UI_VULKAN_FUNCTION(vkGetFenceStatus)
XE_UI_VULKAN_FUNCTION(vkGetImageMemoryRequirements)
XE_UI_VULKAN_FUNCTION(vkGetImageSubresourceLayout)
XE_UI_VULKAN_FUNCTION(vkGetPipelineCacheData)
XE_UI_VULKAN_FUNCTION(vkMapMemory)
XE_UI_VULKAN_FUNCTION(vkQueueSubmit)
XE_UI_VULKAN_FUNCTION(vkQueueWaitIdle)
XE_UI_VULKAN_FUNCTION(vkResetCommandBuffer)
XE_UI_VULKAN_FUNCTION(vkResetFences)
XE_UI_VULKAN_FUNCTION(vkUnmapMemory)
XE_UI_VULKAN_FUNCTION(vkUpdateDescriptorSets)
XE_UI_VULKAN_FUNCTION(vkWaitForFences)

View File

@ -0,0 +1,2 @@
// VK_AMD_shader_info functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkGetShaderInfoAMD)

View File

@ -0,0 +1,5 @@
// VK_EXT_debug_marker functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkDebugMarkerSetObjectNameEXT)
XE_UI_VULKAN_FUNCTION(vkCmdDebugMarkerBeginEXT)
XE_UI_VULKAN_FUNCTION(vkCmdDebugMarkerEndEXT)
XE_UI_VULKAN_FUNCTION(vkCmdDebugMarkerInsertEXT)

View File

@ -0,0 +1,6 @@
// VK_KHR_swapchain functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkAcquireNextImageKHR)
XE_UI_VULKAN_FUNCTION(vkCreateSwapchainKHR)
XE_UI_VULKAN_FUNCTION(vkDestroySwapchainKHR)
XE_UI_VULKAN_FUNCTION(vkGetSwapchainImagesKHR)
XE_UI_VULKAN_FUNCTION(vkQueuePresentKHR)

View File

@ -0,0 +1,13 @@
// Vulkan 1.0 core instance functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkCreateDevice)
XE_UI_VULKAN_FUNCTION(vkDestroyDevice)
XE_UI_VULKAN_FUNCTION(vkEnumerateDeviceExtensionProperties)
XE_UI_VULKAN_FUNCTION(vkEnumerateDeviceLayerProperties)
XE_UI_VULKAN_FUNCTION(vkEnumeratePhysicalDevices)
XE_UI_VULKAN_FUNCTION(vkGetDeviceProcAddr)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceFeatures)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceFormatProperties)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceImageFormatProperties)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceMemoryProperties)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceProperties)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)

View File

@ -0,0 +1,3 @@
// VK_EXT_debug_report functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkCreateDebugReportCallbackEXT)
XE_UI_VULKAN_FUNCTION(vkDestroyDebugReportCallbackEXT)

View File

@ -0,0 +1,2 @@
// VK_KHR_android_surface functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkCreateAndroidSurfaceKHR)

View File

@ -0,0 +1,6 @@
// VK_KHR_surface functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkDestroySurfaceKHR)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR)
XE_UI_VULKAN_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR)

View File

@ -0,0 +1,2 @@
// VK_KHR_win32_surface functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkCreateWin32SurfaceKHR)

View File

@ -0,0 +1,2 @@
// VK_KHR_xcb_surface functions used in Xenia.
XE_UI_VULKAN_FUNCTION(vkCreateXcbSurfaceKHR)

View File

@ -18,6 +18,7 @@ project("xenia-ui-vulkan")
project_root.."/third_party/vulkan/",
})
local_platform_files()
local_platform_files("functions")
files({
"shaders/bin/*.h",
})
@ -31,7 +32,6 @@ project("xenia-ui-window-vulkan-demo")
links({
"fmt",
"imgui",
"volk",
"xenia-base",
"xenia-ui",
"xenia-ui-spirv",

View File

@ -10,20 +10,30 @@
#ifndef XENIA_UI_VULKAN_VULKAN_H_
#define XENIA_UI_VULKAN_VULKAN_H_
#include "xenia/base/cvar.h"
#include "xenia/base/platform.h"
#if XE_PLATFORM_WIN32
#define VK_USE_PLATFORM_WIN32_KHR 1
#elif XE_PLATFORM_LINUX
#if XE_PLATFORM_ANDROID
#ifndef VK_USE_PLATFORM_ANDROID_KHR
#define VK_USE_PLATFORM_ANDROID_KHR 1
#endif
#elif XE_PLATFORM_GNU_LINUX
#ifndef VK_USE_PLATFORM_XCB_KHR
#define VK_USE_PLATFORM_XCB_KHR 1
#else
#error Platform not yet supported.
#endif // XE_PLATFORM_WIN32
#endif
#elif XE_PLATFORM_WIN32
// Must be included before vulkan.h with VK_USE_PLATFORM_WIN32_KHR because it
// includes Windows.h too.
#include "xenia/base/platform_win.h"
#ifndef VK_USE_PLATFORM_WIN32_KHR
#define VK_USE_PLATFORM_WIN32_KHR 1
#endif
#endif
// We use a loader with its own function prototypes.
#include "third_party/volk/volk.h"
#ifndef VK_NO_PROTOTYPES
#define VK_NO_PROTOTYPES 1
#endif
#include "third_party/vulkan/vulkan.h"
#include "xenia/base/cvar.h"
#define XELOGVK XELOGI

View File

@ -41,10 +41,11 @@ VulkanContext::VulkanContext(VulkanProvider* provider, Window* target_window)
VulkanContext::~VulkanContext() {
VkResult status;
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
VulkanDevice* device = provider->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
{
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
status = vkQueueWaitIdle(device->primary_queue());
status = dfn.vkQueueWaitIdle(device->primary_queue());
}
immediate_drawer_.reset();
swap_chain_.reset();
@ -52,7 +53,8 @@ VulkanContext::~VulkanContext() {
bool VulkanContext::Initialize() {
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
VulkanInstance* instance = provider->instance();
const VulkanInstance::InstanceFunctions& ifn = instance->ifn();
if (target_window_) {
// Create swap chain used to present to the window.
@ -66,8 +68,8 @@ bool VulkanContext::Initialize() {
create_info.hinstance =
static_cast<HINSTANCE>(target_window_->native_platform_handle());
create_info.hwnd = static_cast<HWND>(target_window_->native_handle());
status = vkCreateWin32SurfaceKHR(*provider->instance(), &create_info,
nullptr, &surface);
status = ifn.vkCreateWin32SurfaceKHR(*provider->instance(), &create_info,
nullptr, &surface);
CheckResult(status, "vkCreateWin32SurfaceKHR");
#elif XE_PLATFORM_LINUX
#ifdef GDK_WINDOWING_X11
@ -86,8 +88,8 @@ bool VulkanContext::Initialize() {
create_info.connection = static_cast<xcb_connection_t*>(
target_window_->native_platform_handle());
create_info.window = static_cast<xcb_window_t>(window);
status = vkCreateXcbSurfaceKHR(*provider->instance(), &create_info, nullptr,
&surface);
status = ifn.vkCreateXcbSurfaceKHR(*provider->instance(), &create_info,
nullptr, &surface);
CheckResult(status, "vkCreateXcbSurfaceKHR");
#else
#error Unsupported GDK Backend on Linux.
@ -100,8 +102,8 @@ bool VulkanContext::Initialize() {
return false;
}
swap_chain_ = std::make_unique<VulkanSwapChain>(provider->instance(),
provider->device());
swap_chain_ =
std::make_unique<VulkanSwapChain>(instance, provider->device());
if (swap_chain_->Initialize(surface) != VK_SUCCESS) {
XELOGE("Unable to initialize swap chain");
return false;
@ -144,7 +146,8 @@ void VulkanContext::ClearCurrent() {}
bool VulkanContext::BeginSwap() {
SCOPE_profile_cpu_f("gpu");
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
VulkanDevice* device = provider->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
VkResult status;
@ -169,15 +172,16 @@ bool VulkanContext::BeginSwap() {
// TODO(benvanik): use a fence instead? May not be possible with target image.
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
status = vkQueueWaitIdle(device->primary_queue());
status = dfn.vkQueueWaitIdle(device->primary_queue());
return true;
}
void VulkanContext::EndSwap() {
SCOPE_profile_cpu_f("gpu");
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
auto provider = static_cast<const VulkanProvider*>(provider_);
VulkanDevice* device = provider->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
VkResult status;
@ -193,7 +197,7 @@ void VulkanContext::EndSwap() {
// Wait until the queue is idle.
// TODO(benvanik): is this required?
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
status = vkQueueWaitIdle(device->primary_queue());
status = dfn.vkQueueWaitIdle(device->primary_queue());
}
std::unique_ptr<RawImage> VulkanContext::Capture() {

View File

@ -11,6 +11,7 @@
#include <cinttypes>
#include <climits>
#include <cstring>
#include <mutex>
#include <string>
@ -68,7 +69,8 @@ VulkanDevice::VulkanDevice(VulkanInstance* instance) : instance_(instance) {
VulkanDevice::~VulkanDevice() {
if (handle) {
vkDestroyDevice(handle, nullptr);
const VulkanInstance::InstanceFunctions& ifn = instance_->ifn();
ifn.vkDestroyDevice(handle, nullptr);
handle = nullptr;
}
}
@ -91,9 +93,11 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
return false;
}
const VulkanInstance::InstanceFunctions& ifn = instance_->ifn();
// Query supported features so we can make sure we have what we need.
VkPhysicalDeviceFeatures supported_features;
vkGetPhysicalDeviceFeatures(device_info.handle, &supported_features);
ifn.vkGetPhysicalDeviceFeatures(device_info.handle, &supported_features);
VkPhysicalDeviceFeatures enabled_features = {0};
bool any_features_missing = false;
#define ENABLE_AND_EXPECT(name) \
@ -189,7 +193,8 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
create_info.ppEnabledExtensionNames = enabled_extensions_.data();
create_info.pEnabledFeatures = &enabled_features;
auto err = vkCreateDevice(device_info.handle, &create_info, nullptr, &handle);
auto err =
ifn.vkCreateDevice(device_info.handle, &create_info, nullptr, &handle);
switch (err) {
case VK_SUCCESS:
// Ok!
@ -211,30 +216,34 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
return false;
}
// Set flags so we can track enabled extensions easily.
for (auto& ext : enabled_extensions_) {
if (!std::strcmp(ext, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
debug_marker_ena_ = true;
pfn_vkDebugMarkerSetObjectNameEXT_ =
(PFN_vkDebugMarkerSetObjectNameEXT)vkGetDeviceProcAddr(
*this, "vkDebugMarkerSetObjectNameEXT");
pfn_vkCmdDebugMarkerBeginEXT_ =
(PFN_vkCmdDebugMarkerBeginEXT)vkGetDeviceProcAddr(
*this, "vkCmdDebugMarkerBeginEXT");
pfn_vkCmdDebugMarkerEndEXT_ =
(PFN_vkCmdDebugMarkerEndEXT)vkGetDeviceProcAddr(
*this, "vkCmdDebugMarkerEndEXT");
pfn_vkCmdDebugMarkerInsertEXT_ =
(PFN_vkCmdDebugMarkerInsertEXT)vkGetDeviceProcAddr(
*this, "vkCmdDebugMarkerInsertEXT");
}
// Get device functions.
std::memset(&dfn_, 0, sizeof(dfn_));
bool device_functions_loaded = true;
debug_marker_ena_ = false;
#define XE_UI_VULKAN_FUNCTION(name) \
device_functions_loaded &= \
(dfn_.name = PFN_##name(ifn.vkGetDeviceProcAddr(handle, #name))) != \
nullptr;
#include "xenia/ui/vulkan/functions/device_1_0.inc"
#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc"
if (HasEnabledExtension(VK_AMD_SHADER_INFO_EXTENSION_NAME)) {
#include "xenia/ui/vulkan/functions/device_amd_shader_info.inc"
}
debug_marker_ena_ = HasEnabledExtension(VK_EXT_DEBUG_MARKER_EXTENSION_NAME);
if (debug_marker_ena_) {
#include "xenia/ui/vulkan/functions/device_ext_debug_marker.inc"
}
#undef XE_UI_VULKAN_FUNCTION
if (!device_functions_loaded) {
XELOGE("Failed to get Vulkan device function pointers");
return false;
}
device_info_ = std::move(device_info);
queue_family_index_ = ideal_queue_family_index;
// Get the primary queue used for most submissions/etc.
vkGetDeviceQueue(handle, queue_family_index_, 0, &primary_queue_);
dfn_.vkGetDeviceQueue(handle, queue_family_index_, 0, &primary_queue_);
if (!primary_queue_) {
XELOGE("vkGetDeviceQueue returned nullptr!");
return false;
@ -253,7 +262,7 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
continue;
}
vkGetDeviceQueue(handle, i, j, &queue);
dfn_.vkGetDeviceQueue(handle, i, j, &queue);
if (queue) {
free_queues_[i].push_back(queue);
}
@ -292,8 +301,8 @@ void VulkanDevice::ReleaseQueue(VkQueue queue, uint32_t queue_family_index) {
void VulkanDevice::DbgSetObjectName(uint64_t object,
VkDebugReportObjectTypeEXT object_type,
std::string name) {
if (!debug_marker_ena_ || pfn_vkDebugMarkerSetObjectNameEXT_ == nullptr) {
const std::string& name) const {
if (!debug_marker_ena_) {
// Extension disabled.
return;
}
@ -304,13 +313,13 @@ void VulkanDevice::DbgSetObjectName(uint64_t object,
info.objectType = object_type;
info.object = object;
info.pObjectName = name.c_str();
pfn_vkDebugMarkerSetObjectNameEXT_(*this, &info);
dfn_.vkDebugMarkerSetObjectNameEXT(*this, &info);
}
void VulkanDevice::DbgMarkerBegin(VkCommandBuffer command_buffer,
std::string name, float r, float g, float b,
float a) {
if (!debug_marker_ena_ || pfn_vkCmdDebugMarkerBeginEXT_ == nullptr) {
float a) const {
if (!debug_marker_ena_) {
// Extension disabled.
return;
}
@ -323,22 +332,22 @@ void VulkanDevice::DbgMarkerBegin(VkCommandBuffer command_buffer,
info.color[1] = g;
info.color[2] = b;
info.color[3] = a;
pfn_vkCmdDebugMarkerBeginEXT_(command_buffer, &info);
dfn_.vkCmdDebugMarkerBeginEXT(command_buffer, &info);
}
void VulkanDevice::DbgMarkerEnd(VkCommandBuffer command_buffer) {
if (!debug_marker_ena_ || pfn_vkCmdDebugMarkerEndEXT_ == nullptr) {
void VulkanDevice::DbgMarkerEnd(VkCommandBuffer command_buffer) const {
if (!debug_marker_ena_) {
// Extension disabled.
return;
}
pfn_vkCmdDebugMarkerEndEXT_(command_buffer);
dfn_.vkCmdDebugMarkerEndEXT(command_buffer);
}
void VulkanDevice::DbgMarkerInsert(VkCommandBuffer command_buffer,
std::string name, float r, float g, float b,
float a) {
if (!debug_marker_ena_ || pfn_vkCmdDebugMarkerInsertEXT_ == nullptr) {
float a) const {
if (!debug_marker_ena_) {
// Extension disabled.
return;
}
@ -351,7 +360,7 @@ void VulkanDevice::DbgMarkerInsert(VkCommandBuffer command_buffer,
info.color[1] = g;
info.color[2] = g;
info.color[3] = b;
pfn_vkCmdDebugMarkerInsertEXT_(command_buffer, &info);
dfn_.vkCmdDebugMarkerInsertEXT(command_buffer, &info);
}
bool VulkanDevice::is_renderdoc_attached() const {
@ -379,7 +388,8 @@ void VulkanDevice::EndRenderDocFrameCapture() {
}
VkDeviceMemory VulkanDevice::AllocateMemory(
const VkMemoryRequirements& requirements, VkFlags required_properties) {
const VkMemoryRequirements& requirements,
VkFlags required_properties) const {
// Search memory types to find one matching our requirements and our
// properties.
uint32_t type_index = UINT_MAX;
@ -407,7 +417,7 @@ VkDeviceMemory VulkanDevice::AllocateMemory(
memory_info.allocationSize = requirements.size;
memory_info.memoryTypeIndex = type_index;
VkDeviceMemory memory = nullptr;
auto err = vkAllocateMemory(handle, &memory_info, nullptr, &memory);
auto err = dfn_.vkAllocateMemory(handle, &memory_info, nullptr, &memory);
CheckResult(err, "vkAllocateMemory");
return memory;
}

View File

@ -32,11 +32,23 @@ class VulkanDevice {
VulkanDevice(VulkanInstance* instance);
~VulkanDevice();
VulkanInstance* instance() const { return instance_; }
VkDevice handle = nullptr;
operator VkDevice() const { return handle; }
operator VkPhysicalDevice() const { return device_info_.handle; }
struct DeviceFunctions {
#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name;
#include "xenia/ui/vulkan/functions/device_1_0.inc"
#include "xenia/ui/vulkan/functions/device_amd_shader_info.inc"
#include "xenia/ui/vulkan/functions/device_ext_debug_marker.inc"
#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc"
#undef XE_UI_VULKAN_FUNCTION
};
const DeviceFunctions& dfn() const { return dfn_; }
// Declares a layer to verify and enable upon initialization.
// Must be called before Initialize.
void DeclareRequiredLayer(std::string name, uint32_t min_version,
@ -78,16 +90,16 @@ class VulkanDevice {
void ReleaseQueue(VkQueue queue, uint32_t queue_family_index);
void DbgSetObjectName(uint64_t object, VkDebugReportObjectTypeEXT object_type,
std::string name);
const std::string& name) const;
void DbgMarkerBegin(VkCommandBuffer command_buffer, std::string name,
float r = 0.0f, float g = 0.0f, float b = 0.0f,
float a = 0.0f);
void DbgMarkerEnd(VkCommandBuffer command_buffer);
float a = 0.0f) const;
void DbgMarkerEnd(VkCommandBuffer command_buffer) const;
void DbgMarkerInsert(VkCommandBuffer command_buffer, std::string name,
float r = 0.0f, float g = 0.0f, float b = 0.0f,
float a = 0.0f);
float a = 0.0f) const;
// True if RenderDoc is attached and available for use.
bool is_renderdoc_attached() const;
@ -101,7 +113,7 @@ class VulkanDevice {
// Allocates memory of the given size matching the required properties.
VkDeviceMemory AllocateMemory(
const VkMemoryRequirements& requirements,
VkFlags required_properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
VkFlags required_properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) const;
private:
VulkanInstance* instance_ = nullptr;
@ -110,6 +122,8 @@ class VulkanDevice {
std::vector<Requirement> required_extensions_;
std::vector<const char*> enabled_extensions_;
DeviceFunctions dfn_ = {};
bool debug_marker_ena_ = false;
PFN_vkDebugMarkerSetObjectNameEXT pfn_vkDebugMarkerSetObjectNameEXT_ =
nullptr;

View File

@ -29,9 +29,11 @@ constexpr uint32_t kCircularBufferCapacity = 2 * 1024 * 1024;
class LightweightCircularBuffer {
public:
LightweightCircularBuffer(VulkanDevice* device) : device_(*device) {
LightweightCircularBuffer(const VulkanDevice* device) : device_(*device) {
buffer_capacity_ = xe::round_up(kCircularBufferCapacity, 4096);
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
// Index buffer.
VkBufferCreateInfo index_buffer_info;
index_buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
@ -42,8 +44,8 @@ class LightweightCircularBuffer {
index_buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
index_buffer_info.queueFamilyIndexCount = 0;
index_buffer_info.pQueueFamilyIndices = nullptr;
auto status =
vkCreateBuffer(device_, &index_buffer_info, nullptr, &index_buffer_);
auto status = dfn.vkCreateBuffer(device_, &index_buffer_info, nullptr,
&index_buffer_);
CheckResult(status, "vkCreateBuffer");
// Vertex buffer.
@ -56,34 +58,37 @@ class LightweightCircularBuffer {
vertex_buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
vertex_buffer_info.queueFamilyIndexCount = 0;
vertex_buffer_info.pQueueFamilyIndices = nullptr;
status =
vkCreateBuffer(*device, &vertex_buffer_info, nullptr, &vertex_buffer_);
status = dfn.vkCreateBuffer(*device, &vertex_buffer_info, nullptr,
&vertex_buffer_);
CheckResult(status, "vkCreateBuffer");
// Allocate underlying buffer.
// We alias it for both vertices and indices.
VkMemoryRequirements buffer_requirements;
vkGetBufferMemoryRequirements(device_, index_buffer_, &buffer_requirements);
dfn.vkGetBufferMemoryRequirements(device_, index_buffer_,
&buffer_requirements);
buffer_memory_ = device->AllocateMemory(
buffer_requirements, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
vkBindBufferMemory(*device, index_buffer_, buffer_memory_, 0);
vkBindBufferMemory(*device, vertex_buffer_, buffer_memory_, 0);
dfn.vkBindBufferMemory(*device, index_buffer_, buffer_memory_, 0);
dfn.vkBindBufferMemory(*device, vertex_buffer_, buffer_memory_, 0);
// Persistent mapping.
status = vkMapMemory(device_, buffer_memory_, 0, VK_WHOLE_SIZE, 0,
&buffer_data_);
status = dfn.vkMapMemory(device_, buffer_memory_, 0, VK_WHOLE_SIZE, 0,
&buffer_data_);
CheckResult(status, "vkMapMemory");
}
~LightweightCircularBuffer() {
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
if (buffer_memory_) {
vkUnmapMemory(device_, buffer_memory_);
dfn.vkUnmapMemory(device_, buffer_memory_);
buffer_memory_ = nullptr;
}
VK_SAFE_DESTROY(vkDestroyBuffer, device_, index_buffer_, nullptr);
VK_SAFE_DESTROY(vkDestroyBuffer, device_, vertex_buffer_, nullptr);
VK_SAFE_DESTROY(vkFreeMemory, device_, buffer_memory_, nullptr);
DestroyAndNullHandle(dfn.vkDestroyBuffer, device_, index_buffer_);
DestroyAndNullHandle(dfn.vkDestroyBuffer, device_, vertex_buffer_);
DestroyAndNullHandle(dfn.vkFreeMemory, device_, buffer_memory_);
}
VkBuffer vertex_buffer() const { return vertex_buffer_; }
@ -118,18 +123,19 @@ class LightweightCircularBuffer {
// Flush memory.
// TODO(benvanik): do only in large batches? can barrier it.
const VulkanDevice::DeviceFunctions& dfn = device_.dfn();
VkMappedMemoryRange dirty_range;
dirty_range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
dirty_range.pNext = nullptr;
dirty_range.memory = buffer_memory_;
dirty_range.offset = offset;
dirty_range.size = source_length;
vkFlushMappedMemoryRanges(device_, 1, &dirty_range);
dfn.vkFlushMappedMemoryRanges(device_, 1, &dirty_range);
return offset;
}
private:
VkDevice device_ = nullptr;
const VulkanDevice& device_;
VkBuffer index_buffer_ = nullptr;
VkBuffer vertex_buffer_ = nullptr;
@ -153,6 +159,7 @@ class VulkanImmediateTexture : public ImmediateTexture {
VkResult Initialize(VkDescriptorSetLayout descriptor_set_layout,
VkImageView image_view) {
image_view_ = image_view;
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Create descriptor set used just for this texture.
@ -163,8 +170,8 @@ class VulkanImmediateTexture : public ImmediateTexture {
set_alloc_info.descriptorPool = descriptor_pool_;
set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &descriptor_set_layout;
status =
vkAllocateDescriptorSets(*device_, &set_alloc_info, &descriptor_set_);
status = dfn.vkAllocateDescriptorSets(*device_, &set_alloc_info,
&descriptor_set_);
CheckResult(status, "vkAllocateDescriptorSets");
if (status != VK_SUCCESS) {
return status;
@ -184,12 +191,13 @@ class VulkanImmediateTexture : public ImmediateTexture {
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &texture_info;
vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
dfn.vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
return VK_SUCCESS;
}
VkResult Initialize(VkDescriptorSetLayout descriptor_set_layout) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Create image object.
@ -209,7 +217,7 @@ class VulkanImmediateTexture : public ImmediateTexture {
image_info.queueFamilyIndexCount = 0;
image_info.pQueueFamilyIndices = nullptr;
image_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
status = vkCreateImage(*device_, &image_info, nullptr, &image_);
status = dfn.vkCreateImage(*device_, &image_info, nullptr, &image_);
CheckResult(status, "vkCreateImage");
if (status != VK_SUCCESS) {
return status;
@ -217,7 +225,7 @@ class VulkanImmediateTexture : public ImmediateTexture {
// Allocate memory for the image.
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(*device_, image_, &memory_requirements);
dfn.vkGetImageMemoryRequirements(*device_, image_, &memory_requirements);
device_memory_ = device_->AllocateMemory(
memory_requirements, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!device_memory_) {
@ -225,7 +233,7 @@ class VulkanImmediateTexture : public ImmediateTexture {
}
// Bind memory and the image together.
status = vkBindImageMemory(*device_, image_, device_memory_, 0);
status = dfn.vkBindImageMemory(*device_, image_, device_memory_, 0);
CheckResult(status, "vkBindImageMemory");
if (status != VK_SUCCESS) {
return status;
@ -246,7 +254,7 @@ class VulkanImmediateTexture : public ImmediateTexture {
VK_COMPONENT_SWIZZLE_A,
};
view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
status = vkCreateImageView(*device_, &view_info, nullptr, &image_view_);
status = dfn.vkCreateImageView(*device_, &view_info, nullptr, &image_view_);
CheckResult(status, "vkCreateImageView");
if (status != VK_SUCCESS) {
return status;
@ -260,8 +268,8 @@ class VulkanImmediateTexture : public ImmediateTexture {
set_alloc_info.descriptorPool = descriptor_pool_;
set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &descriptor_set_layout;
status =
vkAllocateDescriptorSets(*device_, &set_alloc_info, &descriptor_set_);
status = dfn.vkAllocateDescriptorSets(*device_, &set_alloc_info,
&descriptor_set_);
CheckResult(status, "vkAllocateDescriptorSets");
if (status != VK_SUCCESS) {
return status;
@ -281,44 +289,48 @@ class VulkanImmediateTexture : public ImmediateTexture {
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &texture_info;
vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
dfn.vkUpdateDescriptorSets(*device_, 1, &descriptor_write, 0, nullptr);
return VK_SUCCESS;
}
void Shutdown() {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (descriptor_set_) {
vkFreeDescriptorSets(*device_, descriptor_pool_, 1, &descriptor_set_);
dfn.vkFreeDescriptorSets(*device_, descriptor_pool_, 1, &descriptor_set_);
descriptor_set_ = nullptr;
}
VK_SAFE_DESTROY(vkDestroyImageView, *device_, image_view_, nullptr);
VK_SAFE_DESTROY(vkDestroyImage, *device_, image_, nullptr);
VK_SAFE_DESTROY(vkFreeMemory, *device_, device_memory_, nullptr);
DestroyAndNullHandle(dfn.vkDestroyImageView, *device_, image_view_);
DestroyAndNullHandle(dfn.vkDestroyImage, *device_, image_);
DestroyAndNullHandle(dfn.vkFreeMemory, *device_, device_memory_);
}
VkResult Upload(const uint8_t* src_data) {
// TODO(benvanik): assert not in use? textures aren't dynamic right now.
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
// Get device image layout.
VkImageSubresource subresource;
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresource.mipLevel = 0;
subresource.arrayLayer = 0;
VkSubresourceLayout layout;
vkGetImageSubresourceLayout(*device_, image_, &subresource, &layout);
dfn.vkGetImageSubresourceLayout(*device_, image_, &subresource, &layout);
// Map memory for upload.
uint8_t* gpu_data = nullptr;
auto status = vkMapMemory(*device_, device_memory_, 0, layout.size, 0,
reinterpret_cast<void**>(&gpu_data));
auto status = dfn.vkMapMemory(*device_, device_memory_, 0, layout.size, 0,
reinterpret_cast<void**>(&gpu_data));
CheckResult(status, "vkMapMemory");
if (status == VK_SUCCESS) {
// Copy the entire texture, hoping its layout matches what we expect.
std::memcpy(gpu_data + layout.offset, src_data, layout.size);
vkUnmapMemory(*device_, device_memory_);
dfn.vkUnmapMemory(*device_, device_memory_);
}
return status;
@ -328,6 +340,8 @@ class VulkanImmediateTexture : public ImmediateTexture {
// the command buffer WILL be queued and executed by the device.
void TransitionLayout(VkCommandBuffer command_buffer,
VkImageLayout new_layout) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkImageMemoryBarrier image_barrier;
image_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_barrier.pNext = nullptr;
@ -342,16 +356,16 @@ class VulkanImmediateTexture : public ImmediateTexture {
image_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_layout_ = new_layout;
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &image_barrier);
dfn.vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr,
0, nullptr, 1, &image_barrier);
}
VkDescriptorSet descriptor_set() const { return descriptor_set_; }
VkImageLayout layout() const { return image_layout_; }
private:
ui::vulkan::VulkanDevice* device_ = nullptr;
VulkanDevice* device_ = nullptr;
VkDescriptorPool descriptor_pool_ = nullptr;
VkSampler sampler_ = nullptr; // Not owned.
VkImage image_ = nullptr;
@ -367,7 +381,8 @@ VulkanImmediateDrawer::VulkanImmediateDrawer(VulkanContext* graphics_context)
VulkanImmediateDrawer::~VulkanImmediateDrawer() { Shutdown(); }
VkResult VulkanImmediateDrawer::Initialize() {
auto device = context_->device();
const VulkanDevice* device = context_->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
// NEAREST + CLAMP
VkSamplerCreateInfo sampler_info;
@ -389,8 +404,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
sampler_info.maxLod = 0.0f;
sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
sampler_info.unnormalizedCoordinates = VK_FALSE;
auto status = vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.nearest_clamp);
auto status = dfn.vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.nearest_clamp);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -400,8 +415,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
status = vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.nearest_repeat);
status = dfn.vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.nearest_repeat);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -413,8 +428,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
status =
vkCreateSampler(*device, &sampler_info, nullptr, &samplers_.linear_clamp);
status = dfn.vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.linear_clamp);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -424,8 +439,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
status = vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.linear_repeat);
status = dfn.vkCreateSampler(*device, &sampler_info, nullptr,
&samplers_.linear_repeat);
CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
@ -447,8 +462,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
texture_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
texture_binding.pImmutableSamplers = nullptr;
texture_set_layout_info.pBindings = &texture_binding;
status = vkCreateDescriptorSetLayout(*device, &texture_set_layout_info,
nullptr, &texture_set_layout_);
status = dfn.vkCreateDescriptorSetLayout(*device, &texture_set_layout_info,
nullptr, &texture_set_layout_);
CheckResult(status, "vkCreateDescriptorSetLayout");
if (status != VK_SUCCESS) {
return status;
@ -468,8 +483,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
pool_sizes[0].descriptorCount = 128;
descriptor_pool_info.poolSizeCount = 1;
descriptor_pool_info.pPoolSizes = pool_sizes;
status = vkCreateDescriptorPool(*device, &descriptor_pool_info, nullptr,
&descriptor_pool_);
status = dfn.vkCreateDescriptorPool(*device, &descriptor_pool_info, nullptr,
&descriptor_pool_);
CheckResult(status, "vkCreateDescriptorPool");
if (status != VK_SUCCESS) {
return status;
@ -495,8 +510,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
pipeline_layout_info.pushConstantRangeCount =
static_cast<uint32_t>(xe::countof(push_constant_ranges));
pipeline_layout_info.pPushConstantRanges = push_constant_ranges;
status = vkCreatePipelineLayout(*device, &pipeline_layout_info, nullptr,
&pipeline_layout_);
status = dfn.vkCreatePipelineLayout(*device, &pipeline_layout_info, nullptr,
&pipeline_layout_);
CheckResult(status, "vkCreatePipelineLayout");
if (status != VK_SUCCESS) {
return status;
@ -510,8 +525,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
vertex_shader_info.codeSize = sizeof(immediate_vert);
vertex_shader_info.pCode = reinterpret_cast<const uint32_t*>(immediate_vert);
VkShaderModule vertex_shader;
status = vkCreateShaderModule(*device, &vertex_shader_info, nullptr,
&vertex_shader);
status = dfn.vkCreateShaderModule(*device, &vertex_shader_info, nullptr,
&vertex_shader);
CheckResult(status, "vkCreateShaderModule");
VkShaderModuleCreateInfo fragment_shader_info;
fragment_shader_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
@ -521,8 +536,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
fragment_shader_info.pCode =
reinterpret_cast<const uint32_t*>(immediate_frag);
VkShaderModule fragment_shader;
status = vkCreateShaderModule(*device, &fragment_shader_info, nullptr,
&fragment_shader);
status = dfn.vkCreateShaderModule(*device, &fragment_shader_info, nullptr,
&fragment_shader);
CheckResult(status, "vkCreateShaderModule");
// Pipeline used when rendering triangles.
@ -667,8 +682,8 @@ VkResult VulkanImmediateDrawer::Initialize() {
pipeline_info.basePipelineHandle = nullptr;
pipeline_info.basePipelineIndex = -1;
if (status == VK_SUCCESS) {
status = vkCreateGraphicsPipelines(*device, nullptr, 1, &pipeline_info,
nullptr, &triangle_pipeline_);
status = dfn.vkCreateGraphicsPipelines(*device, nullptr, 1, &pipeline_info,
nullptr, &triangle_pipeline_);
CheckResult(status, "vkCreateGraphicsPipelines");
}
@ -678,13 +693,13 @@ VkResult VulkanImmediateDrawer::Initialize() {
pipeline_info.basePipelineHandle = triangle_pipeline_;
pipeline_info.basePipelineIndex = -1;
if (status == VK_SUCCESS) {
status = vkCreateGraphicsPipelines(*device, nullptr, 1, &pipeline_info,
nullptr, &line_pipeline_);
status = dfn.vkCreateGraphicsPipelines(*device, nullptr, 1, &pipeline_info,
nullptr, &line_pipeline_);
CheckResult(status, "vkCreateGraphicsPipelines");
}
VK_SAFE_DESTROY(vkDestroyShaderModule, *device, vertex_shader, nullptr);
VK_SAFE_DESTROY(vkDestroyShaderModule, *device, fragment_shader, nullptr);
DestroyAndNullHandle(dfn.vkDestroyShaderModule, *device, vertex_shader);
DestroyAndNullHandle(dfn.vkDestroyShaderModule, *device, fragment_shader);
// Allocate the buffer we'll use for our vertex and index data.
circular_buffer_ = std::make_unique<LightweightCircularBuffer>(device);
@ -693,22 +708,23 @@ VkResult VulkanImmediateDrawer::Initialize() {
}
void VulkanImmediateDrawer::Shutdown() {
auto device = context_->device();
const VulkanDevice* device = context_->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
circular_buffer_.reset();
VK_SAFE_DESTROY(vkDestroyPipeline, *device, line_pipeline_, nullptr);
VK_SAFE_DESTROY(vkDestroyPipeline, *device, triangle_pipeline_, nullptr);
VK_SAFE_DESTROY(vkDestroyPipelineLayout, *device, pipeline_layout_, nullptr);
DestroyAndNullHandle(dfn.vkDestroyPipeline, *device, line_pipeline_);
DestroyAndNullHandle(dfn.vkDestroyPipeline, *device, triangle_pipeline_);
DestroyAndNullHandle(dfn.vkDestroyPipelineLayout, *device, pipeline_layout_);
VK_SAFE_DESTROY(vkDestroyDescriptorPool, *device, descriptor_pool_, nullptr);
VK_SAFE_DESTROY(vkDestroyDescriptorSetLayout, *device, texture_set_layout_,
nullptr);
DestroyAndNullHandle(dfn.vkDestroyDescriptorPool, *device, descriptor_pool_);
DestroyAndNullHandle(dfn.vkDestroyDescriptorSetLayout, *device,
texture_set_layout_);
VK_SAFE_DESTROY(vkDestroySampler, *device, samplers_.nearest_clamp, nullptr);
VK_SAFE_DESTROY(vkDestroySampler, *device, samplers_.nearest_repeat, nullptr);
VK_SAFE_DESTROY(vkDestroySampler, *device, samplers_.linear_clamp, nullptr);
VK_SAFE_DESTROY(vkDestroySampler, *device, samplers_.linear_repeat, nullptr);
DestroyAndNullHandle(dfn.vkDestroySampler, *device, samplers_.nearest_clamp);
DestroyAndNullHandle(dfn.vkDestroySampler, *device, samplers_.nearest_repeat);
DestroyAndNullHandle(dfn.vkDestroySampler, *device, samplers_.linear_clamp);
DestroyAndNullHandle(dfn.vkDestroySampler, *device, samplers_.linear_repeat);
}
std::unique_ptr<ImmediateTexture> VulkanImmediateDrawer::CreateTexture(
@ -751,7 +767,8 @@ std::unique_ptr<ImmediateTexture> VulkanImmediateDrawer::WrapTexture(
void VulkanImmediateDrawer::Begin(int render_target_width,
int render_target_height) {
auto device = context_->device();
const VulkanDevice* device = context_->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
auto swap_chain = context_->swap_chain();
assert_null(current_cmd_buffer_);
current_cmd_buffer_ = swap_chain->render_cmd_buffer();
@ -766,7 +783,7 @@ void VulkanImmediateDrawer::Begin(int render_target_width,
viewport.height = static_cast<float>(render_target_height);
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
vkCmdSetViewport(current_cmd_buffer_, 0, 1, &viewport);
dfn.vkCmdSetViewport(current_cmd_buffer_, 0, 1, &viewport);
// Update projection matrix.
const float ortho_projection[4][4] = {
@ -775,13 +792,14 @@ void VulkanImmediateDrawer::Begin(int render_target_width,
{0.0f, 0.0f, -1.0f, 0.0f},
{-1.0f, 1.0f, 0.0f, 1.0f},
};
vkCmdPushConstants(current_cmd_buffer_, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(float) * 16,
ortho_projection);
dfn.vkCmdPushConstants(current_cmd_buffer_, pipeline_layout_,
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(float) * 16,
ortho_projection);
}
void VulkanImmediateDrawer::BeginDrawBatch(const ImmediateDrawBatch& batch) {
auto device = context_->device();
const VulkanDevice* device = context_->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
// Upload vertices.
VkDeviceSize vertices_offset = circular_buffer_->Emplace(
@ -791,8 +809,8 @@ void VulkanImmediateDrawer::BeginDrawBatch(const ImmediateDrawBatch& batch) {
return;
}
auto vertex_buffer = circular_buffer_->vertex_buffer();
vkCmdBindVertexBuffers(current_cmd_buffer_, 0, 1, &vertex_buffer,
&vertices_offset);
dfn.vkCmdBindVertexBuffers(current_cmd_buffer_, 0, 1, &vertex_buffer,
&vertices_offset);
// Upload indices.
if (batch.indices) {
@ -802,22 +820,27 @@ void VulkanImmediateDrawer::BeginDrawBatch(const ImmediateDrawBatch& batch) {
// TODO(benvanik): die?
return;
}
vkCmdBindIndexBuffer(current_cmd_buffer_, circular_buffer_->index_buffer(),
indices_offset, VK_INDEX_TYPE_UINT16);
dfn.vkCmdBindIndexBuffer(current_cmd_buffer_,
circular_buffer_->index_buffer(), indices_offset,
VK_INDEX_TYPE_UINT16);
}
batch_has_index_buffer_ = !!batch.indices;
}
void VulkanImmediateDrawer::Draw(const ImmediateDraw& draw) {
const VulkanDevice* device = context_->device();
const VulkanDevice::DeviceFunctions& dfn = device->dfn();
switch (draw.primitive_type) {
case ImmediatePrimitiveType::kLines:
vkCmdBindPipeline(current_cmd_buffer_, VK_PIPELINE_BIND_POINT_GRAPHICS,
line_pipeline_);
dfn.vkCmdBindPipeline(current_cmd_buffer_,
VK_PIPELINE_BIND_POINT_GRAPHICS, line_pipeline_);
break;
case ImmediatePrimitiveType::kTriangles:
vkCmdBindPipeline(current_cmd_buffer_, VK_PIPELINE_BIND_POINT_GRAPHICS,
triangle_pipeline_);
dfn.vkCmdBindPipeline(current_cmd_buffer_,
VK_PIPELINE_BIND_POINT_GRAPHICS,
triangle_pipeline_);
break;
}
@ -833,18 +856,18 @@ void VulkanImmediateDrawer::Draw(const ImmediateDraw& draw) {
XELOGW("Failed to acquire texture descriptor set for immediate drawer!");
}
vkCmdBindDescriptorSets(current_cmd_buffer_,
VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout_,
0, 1, &texture_set, 0, nullptr);
dfn.vkCmdBindDescriptorSets(
current_cmd_buffer_, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout_,
0, 1, &texture_set, 0, nullptr);
}
// Use push constants for our per-draw changes.
// Here, the restrict_texture_samples uniform (was used before September 26,
// 2020, now deleted).
int restrict_texture_samples = 0;
vkCmdPushConstants(current_cmd_buffer_, pipeline_layout_,
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(float) * 16,
sizeof(int), &restrict_texture_samples);
dfn.vkCmdPushConstants(current_cmd_buffer_, pipeline_layout_,
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(float) * 16,
sizeof(int), &restrict_texture_samples);
// Scissor, if enabled.
// Scissor can be disabled by making it the full screen.
@ -861,14 +884,14 @@ void VulkanImmediateDrawer::Draw(const ImmediateDraw& draw) {
scissor.extent.width = current_render_target_width_;
scissor.extent.height = current_render_target_height_;
}
vkCmdSetScissor(current_cmd_buffer_, 0, 1, &scissor);
dfn.vkCmdSetScissor(current_cmd_buffer_, 0, 1, &scissor);
// Issue draw.
if (batch_has_index_buffer_) {
vkCmdDrawIndexed(current_cmd_buffer_, draw.count, 1, draw.index_offset,
draw.base_vertex, 0);
dfn.vkCmdDrawIndexed(current_cmd_buffer_, draw.count, 1, draw.index_offset,
draw.base_vertex, 0);
} else {
vkCmdDraw(current_cmd_buffer_, draw.count, 1, draw.base_vertex, 0);
dfn.vkCmdDraw(current_cmd_buffer_, draw.count, 1, draw.base_vertex, 0);
}
}

View File

@ -10,21 +10,28 @@
#include "xenia/ui/vulkan/vulkan_instance.h"
#include <cinttypes>
#include <cstring>
#include <mutex>
#include <string>
#include "third_party/renderdoc/renderdoc_app.h"
#include "third_party/volk/volk.h"
#include "xenia/base/assert.h"
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/base/platform.h"
#include "xenia/base/profiling.h"
#include "xenia/ui/vulkan/vulkan.h"
#include "xenia/ui/vulkan/vulkan_immediate_drawer.h"
#include "xenia/ui/vulkan/vulkan_util.h"
#include "xenia/ui/window.h"
#if XE_PLATFORM_LINUX
#include <dlfcn.h>
#elif XE_PLATFORM_WIN32
#include "xenia/base/platform_win.h"
#endif
#if XE_PLATFORM_LINUX
#include "xenia/ui/window_gtk.h"
#endif
@ -71,8 +78,61 @@ VulkanInstance::~VulkanInstance() { DestroyInstance(); }
bool VulkanInstance::Initialize() {
auto version = Version::Parse(VK_API_VERSION);
XELOGVK("Initializing Vulkan {}...", version.pretty_string);
if (volkInitialize() != VK_SUCCESS) {
XELOGE("volkInitialize() failed!");
// Load the library.
bool library_functions_loaded = true;
#if XE_PLATFORM_LINUX
#if XE_PLATFORM_ANDROID
const char* libvulkan_name = "libvulkan.so";
#else
const char* libvulkan_name = "libvulkan.so.1";
#endif
// http://developer.download.nvidia.com/mobile/shield/assets/Vulkan/UsingtheVulkanAPI.pdf
library_ = dlopen(libvulkan_name, RTLD_NOW | RTLD_LOCAL);
if (!library_) {
XELOGE("Failed to load {}", libvulkan_name);
return false;
}
#define XE_VULKAN_LOAD_MODULE_LFN(name) \
library_functions_loaded &= \
(lfn_.name = PFN_##name(dlsym(library_, #name))) != nullptr;
#elif XE_PLATFORM_WIN32
library_ = LoadLibraryA("vulkan-1.dll");
if (!library_) {
XELOGE("Failed to load vulkan-1.dll");
return false;
}
#define XE_VULKAN_LOAD_MODULE_LFN(name) \
library_functions_loaded &= \
(lfn_.name = PFN_##name(GetProcAddress(library_, #name))) != nullptr;
#else
#error No Vulkan library loading provided for the target platform.
#endif
XE_VULKAN_LOAD_MODULE_LFN(vkGetInstanceProcAddr);
XE_VULKAN_LOAD_MODULE_LFN(vkDestroyInstance);
#undef XE_VULKAN_LOAD_MODULE_LFN
if (!library_functions_loaded) {
XELOGE("Failed to get Vulkan library function pointers");
return false;
}
library_functions_loaded &=
(lfn_.vkCreateInstance = PFN_vkCreateInstance(lfn_.vkGetInstanceProcAddr(
VK_NULL_HANDLE, "vkCreateInstance"))) != nullptr;
library_functions_loaded &=
(lfn_.vkEnumerateInstanceExtensionProperties =
PFN_vkEnumerateInstanceExtensionProperties(
lfn_.vkGetInstanceProcAddr(
VK_NULL_HANDLE,
"vkEnumerateInstanceExtensionProperties"))) != nullptr;
library_functions_loaded &=
(lfn_.vkEnumerateInstanceLayerProperties =
PFN_vkEnumerateInstanceLayerProperties(lfn_.vkGetInstanceProcAddr(
VK_NULL_HANDLE, "vkEnumerateInstanceLayerProperties"))) !=
nullptr;
if (!library_functions_loaded) {
XELOGE(
"Failed to get Vulkan library function pointers via "
"vkGetInstanceProcAddr");
return false;
}
@ -149,11 +209,11 @@ bool VulkanInstance::QueryGlobals() {
std::vector<VkLayerProperties> global_layer_properties;
VkResult err;
do {
err = vkEnumerateInstanceLayerProperties(&count, nullptr);
err = lfn_.vkEnumerateInstanceLayerProperties(&count, nullptr);
CheckResult(err, "vkEnumerateInstanceLayerProperties");
global_layer_properties.resize(count);
err = vkEnumerateInstanceLayerProperties(&count,
global_layer_properties.data());
err = lfn_.vkEnumerateInstanceLayerProperties(
&count, global_layer_properties.data());
} while (err == VK_INCOMPLETE);
CheckResult(err, "vkEnumerateInstanceLayerProperties");
global_layers_.resize(count);
@ -163,11 +223,11 @@ bool VulkanInstance::QueryGlobals() {
// Get all extensions available for the layer.
do {
err = vkEnumerateInstanceExtensionProperties(
err = lfn_.vkEnumerateInstanceExtensionProperties(
global_layer.properties.layerName, &count, nullptr);
CheckResult(err, "vkEnumerateInstanceExtensionProperties");
global_layer.extensions.resize(count);
err = vkEnumerateInstanceExtensionProperties(
err = lfn_.vkEnumerateInstanceExtensionProperties(
global_layer.properties.layerName, &count,
global_layer.extensions.data());
} while (err == VK_INCOMPLETE);
@ -190,11 +250,11 @@ bool VulkanInstance::QueryGlobals() {
// Scan global extensions.
do {
err = vkEnumerateInstanceExtensionProperties(nullptr, &count, nullptr);
err = lfn_.vkEnumerateInstanceExtensionProperties(nullptr, &count, nullptr);
CheckResult(err, "vkEnumerateInstanceExtensionProperties");
global_extensions_.resize(count);
err = vkEnumerateInstanceExtensionProperties(nullptr, &count,
global_extensions_.data());
err = lfn_.vkEnumerateInstanceExtensionProperties(
nullptr, &count, global_extensions_.data());
} while (err == VK_INCOMPLETE);
CheckResult(err, "vkEnumerateInstanceExtensionProperties");
XELOGVK("Found {} global extensions:", global_extensions_.size());
@ -246,7 +306,7 @@ bool VulkanInstance::CreateInstance() {
static_cast<uint32_t>(enabled_extensions.size());
instance_info.ppEnabledExtensionNames = enabled_extensions.data();
auto err = vkCreateInstance(&instance_info, nullptr, &handle);
auto err = lfn_.vkCreateInstance(&instance_info, nullptr, &handle);
if (err != VK_SUCCESS) {
XELOGE("vkCreateInstance returned {}", to_string(err));
}
@ -273,8 +333,39 @@ bool VulkanInstance::CreateInstance() {
return false;
}
// Load Vulkan entrypoints and extensions.
volkLoadInstance(handle);
// Check if extensions are enabled.
dbg_report_ena_ = false;
for (const char* enabled_extension : enabled_extensions) {
if (!dbg_report_ena_ &&
!std::strcmp(enabled_extension, VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
dbg_report_ena_ = true;
}
}
// Get instance functions.
std::memset(&ifn_, 0, sizeof(ifn_));
bool instance_functions_loaded = true;
#define XE_UI_VULKAN_FUNCTION(name) \
instance_functions_loaded &= \
(ifn_.name = PFN_##name(lfn_.vkGetInstanceProcAddr(handle, #name))) != \
nullptr;
#include "xenia/ui/vulkan/functions/instance_1_0.inc"
#include "xenia/ui/vulkan/functions/instance_khr_surface.inc"
#if XE_PLATFORM_ANDROID
#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc"
#elif XE_PLATFORM_GNULINUX
#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc"
#elif XE_PLATFORM_WIN32
#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc"
#endif
if (dbg_report_ena_) {
#include "xenia/ui/vulkan/functions/instance_ext_debug_report.inc"
}
#undef XE_VULKAN_LOAD_IFN
if (!instance_functions_loaded) {
XELOGE("Failed to get Vulkan instance function pointers");
return false;
}
// Enable debug validation, if needed.
EnableDebugValidation();
@ -283,12 +374,23 @@ bool VulkanInstance::CreateInstance() {
}
void VulkanInstance::DestroyInstance() {
if (!handle) {
return;
if (handle) {
DisableDebugValidation();
lfn_.vkDestroyInstance(handle, nullptr);
handle = nullptr;
}
DisableDebugValidation();
vkDestroyInstance(handle, nullptr);
handle = nullptr;
#if XE_PLATFORM_LINUX
if (library_) {
dlclose(library_);
library_ = nullptr;
}
#elif XE_PLATFORM_WIN32
if (library_) {
FreeLibrary(library_);
library_ = nullptr;
}
#endif
}
VkBool32 VKAPI_PTR DebugMessageCallback(VkDebugReportFlagsEXT flags,
@ -329,16 +431,13 @@ VkBool32 VKAPI_PTR DebugMessageCallback(VkDebugReportFlagsEXT flags,
}
void VulkanInstance::EnableDebugValidation() {
if (dbg_report_callback_) {
DisableDebugValidation();
}
auto vk_create_debug_report_callback_ext =
reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
vkGetInstanceProcAddr(handle, "vkCreateDebugReportCallbackEXT"));
if (!vk_create_debug_report_callback_ext) {
if (!dbg_report_ena_) {
XELOGVK("Debug validation layer not installed; ignoring");
return;
}
if (dbg_report_callback_) {
DisableDebugValidation();
}
VkDebugReportCallbackCreateInfoEXT create_info;
create_info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
create_info.pNext = nullptr;
@ -349,7 +448,7 @@ void VulkanInstance::EnableDebugValidation() {
VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_DEBUG_BIT_EXT;
create_info.pfnCallback = &DebugMessageCallback;
create_info.pUserData = this;
auto status = vk_create_debug_report_callback_ext(
auto status = ifn_.vkCreateDebugReportCallbackEXT(
handle, &create_info, nullptr, &dbg_report_callback_);
if (status == VK_SUCCESS) {
XELOGVK("Debug validation layer enabled");
@ -360,16 +459,10 @@ void VulkanInstance::EnableDebugValidation() {
}
void VulkanInstance::DisableDebugValidation() {
if (!dbg_report_callback_) {
if (!dbg_report_ena_ || !dbg_report_callback_) {
return;
}
auto vk_destroy_debug_report_callback_ext =
reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(handle, "vkDestroyDebugReportCallbackEXT"));
if (!vk_destroy_debug_report_callback_ext) {
return;
}
vk_destroy_debug_report_callback_ext(handle, dbg_report_callback_, nullptr);
ifn_.vkDestroyDebugReportCallbackEXT(handle, dbg_report_callback_, nullptr);
dbg_report_callback_ = nullptr;
}
@ -377,11 +470,11 @@ bool VulkanInstance::QueryDevices() {
// Get handles to all devices.
uint32_t count = 0;
std::vector<VkPhysicalDevice> device_handles;
auto err = vkEnumeratePhysicalDevices(handle, &count, nullptr);
auto err = ifn_.vkEnumeratePhysicalDevices(handle, &count, nullptr);
CheckResult(err, "vkEnumeratePhysicalDevices");
device_handles.resize(count);
err = vkEnumeratePhysicalDevices(handle, &count, device_handles.data());
err = ifn_.vkEnumeratePhysicalDevices(handle, &count, device_handles.data());
CheckResult(err, "vkEnumeratePhysicalDevices");
// Query device info.
@ -391,33 +484,34 @@ bool VulkanInstance::QueryDevices() {
device_info.handle = device_handle;
// Query general attributes.
vkGetPhysicalDeviceProperties(device_handle, &device_info.properties);
vkGetPhysicalDeviceFeatures(device_handle, &device_info.features);
vkGetPhysicalDeviceMemoryProperties(device_handle,
&device_info.memory_properties);
ifn_.vkGetPhysicalDeviceProperties(device_handle, &device_info.properties);
ifn_.vkGetPhysicalDeviceFeatures(device_handle, &device_info.features);
ifn_.vkGetPhysicalDeviceMemoryProperties(device_handle,
&device_info.memory_properties);
// Gather queue family properties.
vkGetPhysicalDeviceQueueFamilyProperties(device_handle, &count, nullptr);
ifn_.vkGetPhysicalDeviceQueueFamilyProperties(device_handle, &count,
nullptr);
device_info.queue_family_properties.resize(count);
vkGetPhysicalDeviceQueueFamilyProperties(
ifn_.vkGetPhysicalDeviceQueueFamilyProperties(
device_handle, &count, device_info.queue_family_properties.data());
// Gather layers.
std::vector<VkLayerProperties> layer_properties;
err = vkEnumerateDeviceLayerProperties(device_handle, &count, nullptr);
err = ifn_.vkEnumerateDeviceLayerProperties(device_handle, &count, nullptr);
CheckResult(err, "vkEnumerateDeviceLayerProperties");
layer_properties.resize(count);
err = vkEnumerateDeviceLayerProperties(device_handle, &count,
layer_properties.data());
err = ifn_.vkEnumerateDeviceLayerProperties(device_handle, &count,
layer_properties.data());
CheckResult(err, "vkEnumerateDeviceLayerProperties");
for (size_t j = 0; j < layer_properties.size(); ++j) {
LayerInfo layer_info;
layer_info.properties = layer_properties[j];
err = vkEnumerateDeviceExtensionProperties(
err = ifn_.vkEnumerateDeviceExtensionProperties(
device_handle, layer_info.properties.layerName, &count, nullptr);
CheckResult(err, "vkEnumerateDeviceExtensionProperties");
layer_info.extensions.resize(count);
err = vkEnumerateDeviceExtensionProperties(
err = ifn_.vkEnumerateDeviceExtensionProperties(
device_handle, layer_info.properties.layerName, &count,
layer_info.extensions.data());
CheckResult(err, "vkEnumerateDeviceExtensionProperties");
@ -425,12 +519,12 @@ bool VulkanInstance::QueryDevices() {
}
// Gather extensions.
err = vkEnumerateDeviceExtensionProperties(device_handle, nullptr, &count,
nullptr);
err = ifn_.vkEnumerateDeviceExtensionProperties(device_handle, nullptr,
&count, nullptr);
CheckResult(err, "vkEnumerateDeviceExtensionProperties");
device_info.extensions.resize(count);
err = vkEnumerateDeviceExtensionProperties(device_handle, nullptr, &count,
device_info.extensions.data());
err = ifn_.vkEnumerateDeviceExtensionProperties(
device_handle, nullptr, &count, device_info.extensions.data());
CheckResult(err, "vkEnumerateDeviceExtensionProperties");
available_devices_.push_back(std::move(device_info));

View File

@ -14,6 +14,7 @@
#include <string>
#include <vector>
#include "xenia/base/platform.h"
#include "xenia/ui/vulkan/vulkan.h"
#include "xenia/ui/vulkan/vulkan_util.h"
#include "xenia/ui/window.h"
@ -28,10 +29,38 @@ class VulkanInstance {
VulkanInstance();
~VulkanInstance();
struct LibraryFunctions {
// From the module.
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
PFN_vkDestroyInstance vkDestroyInstance;
// From vkGetInstanceProcAddr.
PFN_vkCreateInstance vkCreateInstance;
PFN_vkEnumerateInstanceExtensionProperties
vkEnumerateInstanceExtensionProperties;
PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;
};
const LibraryFunctions& lfn() const { return lfn_; }
VkInstance handle = nullptr;
operator VkInstance() const { return handle; }
struct InstanceFunctions {
#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name;
#include "xenia/ui/vulkan/functions/instance_1_0.inc"
#include "xenia/ui/vulkan/functions/instance_ext_debug_report.inc"
#include "xenia/ui/vulkan/functions/instance_khr_surface.inc"
#if XE_PLATFORM_ANDROID
#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc"
#elif XE_PLATFORM_GNULINUX
#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc"
#elif XE_PLATFORM_WIN32
#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc"
#endif
#undef XE_UI_VULKAN_FUNCTION
};
const InstanceFunctions& ifn() const { return ifn_; }
// Declares a layer to verify and enable upon initialization.
// Must be called before Initialize.
void DeclareRequiredLayer(std::string name, uint32_t min_version,
@ -85,13 +114,24 @@ class VulkanInstance {
const char* indent);
void DumpDeviceInfo(const DeviceInfo& device_info);
#if XE_PLATFORM_LINUX
void* library_ = nullptr;
#elif XE_PLATFORM_WIN32
HMODULE library_ = nullptr;
#endif
LibraryFunctions lfn_ = {};
std::vector<Requirement> required_layers_;
std::vector<Requirement> required_extensions_;
InstanceFunctions ifn_ = {};
std::vector<LayerInfo> global_layers_;
std::vector<VkExtensionProperties> global_extensions_;
std::vector<DeviceInfo> available_devices_;
bool dbg_report_ena_ = false;
VkDebugReportCallbackEXT dbg_report_callback_ = nullptr;
void* renderdoc_api_ = nullptr;

View File

@ -10,31 +10,35 @@
#ifndef XENIA_UI_VULKAN_VULKAN_MEM_ALLOC_H_
#define XENIA_UI_VULKAN_VULKAN_MEM_ALLOC_H_
#include "third_party/volk/volk.h"
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#include "third_party/vulkan/vk_mem_alloc.h"
#include "xenia/ui/vulkan/vulkan_device.h"
#include "xenia/ui/vulkan/vulkan_instance.h"
namespace xe {
namespace ui {
namespace vulkan {
inline void FillVMAVulkanFunctions(VmaVulkanFunctions* vma_funcs) {
vma_funcs->vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties;
inline void FillVMAVulkanFunctions(VmaVulkanFunctions* vma_funcs,
const VulkanDevice& device) {
const VulkanInstance::InstanceFunctions& ifn = device.instance()->ifn();
const VulkanDevice::DeviceFunctions& dfn = device.dfn();
vma_funcs->vkGetPhysicalDeviceProperties = ifn.vkGetPhysicalDeviceProperties;
vma_funcs->vkGetPhysicalDeviceMemoryProperties =
vkGetPhysicalDeviceMemoryProperties;
vma_funcs->vkAllocateMemory = vkAllocateMemory;
vma_funcs->vkFreeMemory = vkFreeMemory;
vma_funcs->vkMapMemory = vkMapMemory;
vma_funcs->vkUnmapMemory = vkUnmapMemory;
vma_funcs->vkBindBufferMemory = vkBindBufferMemory;
vma_funcs->vkBindImageMemory = vkBindImageMemory;
vma_funcs->vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements;
vma_funcs->vkGetImageMemoryRequirements = vkGetImageMemoryRequirements;
vma_funcs->vkCreateBuffer = vkCreateBuffer;
vma_funcs->vkDestroyBuffer = vkDestroyBuffer;
vma_funcs->vkCreateImage = vkCreateImage;
vma_funcs->vkDestroyImage = vkDestroyImage;
ifn.vkGetPhysicalDeviceMemoryProperties;
vma_funcs->vkAllocateMemory = dfn.vkAllocateMemory;
vma_funcs->vkFreeMemory = dfn.vkFreeMemory;
vma_funcs->vkMapMemory = dfn.vkMapMemory;
vma_funcs->vkUnmapMemory = dfn.vkUnmapMemory;
vma_funcs->vkBindBufferMemory = dfn.vkBindBufferMemory;
vma_funcs->vkBindImageMemory = dfn.vkBindImageMemory;
vma_funcs->vkGetBufferMemoryRequirements = dfn.vkGetBufferMemoryRequirements;
vma_funcs->vkGetImageMemoryRequirements = dfn.vkGetImageMemoryRequirements;
vma_funcs->vkCreateBuffer = dfn.vkCreateBuffer;
vma_funcs->vkDestroyBuffer = dfn.vkDestroyBuffer;
vma_funcs->vkCreateImage = dfn.vkCreateImage;
vma_funcs->vkDestroyImage = dfn.vkDestroyImage;
}
} // namespace vulkan

View File

@ -35,6 +35,8 @@ VulkanSwapChain::~VulkanSwapChain() { Shutdown(); }
VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
surface_ = surface;
const VulkanInstance::InstanceFunctions& ifn = instance_->ifn();
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Find a queue family that supports presentation.
@ -49,8 +51,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
continue;
}
status = vkGetPhysicalDeviceSurfaceSupportKHR(*device_, i, surface,
&surface_supported);
status = ifn.vkGetPhysicalDeviceSurfaceSupportKHR(*device_, i, surface,
&surface_supported);
if (status == VK_SUCCESS && surface_supported == VK_TRUE) {
queue_family_index = i;
break;
@ -80,13 +82,13 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
// Query supported target formats.
uint32_t count = 0;
status =
vkGetPhysicalDeviceSurfaceFormatsKHR(*device_, surface_, &count, nullptr);
status = ifn.vkGetPhysicalDeviceSurfaceFormatsKHR(*device_, surface_, &count,
nullptr);
CheckResult(status, "vkGetPhysicalDeviceSurfaceFormatsKHR");
std::vector<VkSurfaceFormatKHR> surface_formats;
surface_formats.resize(count);
status = vkGetPhysicalDeviceSurfaceFormatsKHR(*device_, surface_, &count,
surface_formats.data());
status = ifn.vkGetPhysicalDeviceSurfaceFormatsKHR(*device_, surface_, &count,
surface_formats.data());
CheckResult(status, "vkGetPhysicalDeviceSurfaceFormatsKHR");
if (status != VK_SUCCESS) {
return status;
@ -107,8 +109,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
// Query surface min/max/caps.
VkSurfaceCapabilitiesKHR surface_caps;
status = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(*device_, surface_,
&surface_caps);
status = ifn.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(*device_, surface_,
&surface_caps);
CheckResult(status, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
if (status != VK_SUCCESS) {
return status;
@ -116,16 +118,16 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
// Query surface properties so we can configure ourselves within bounds.
std::vector<VkPresentModeKHR> present_modes;
status = vkGetPhysicalDeviceSurfacePresentModesKHR(*device_, surface_, &count,
nullptr);
status = ifn.vkGetPhysicalDeviceSurfacePresentModesKHR(*device_, surface_,
&count, nullptr);
CheckResult(status, "vkGetPhysicalDeviceSurfacePresentModesKHR");
if (status != VK_SUCCESS) {
return status;
}
present_modes.resize(count);
status = vkGetPhysicalDeviceSurfacePresentModesKHR(*device_, surface_, &count,
present_modes.data());
status = ifn.vkGetPhysicalDeviceSurfacePresentModesKHR(
*device_, surface_, &count, present_modes.data());
CheckResult(status, "vkGetPhysicalDeviceSurfacePresentModesKHR");
if (status != VK_SUCCESS) {
return status;
@ -210,7 +212,7 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
XELOGVK(" imageSharingMode = {}", to_string(create_info.imageSharingMode));
XELOGVK(" queueFamilyCount = {}", create_info.queueFamilyIndexCount);
status = vkCreateSwapchainKHR(*device_, &create_info, nullptr, &handle);
status = dfn.vkCreateSwapchainKHR(*device_, &create_info, nullptr, &handle);
if (status != VK_SUCCESS) {
XELOGE("Failed to create swapchain: {}", to_string(status));
return status;
@ -223,7 +225,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
cmd_pool_info.pNext = nullptr;
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
cmd_pool_info.queueFamilyIndex = presentation_queue_family_;
status = vkCreateCommandPool(*device_, &cmd_pool_info, nullptr, &cmd_pool_);
status =
dfn.vkCreateCommandPool(*device_, &cmd_pool_info, nullptr, &cmd_pool_);
CheckResult(status, "vkCreateCommandPool");
if (status != VK_SUCCESS) {
return status;
@ -236,7 +239,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
cmd_buffer_info.commandPool = cmd_pool_;
cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_buffer_info.commandBufferCount = 2;
status = vkAllocateCommandBuffers(*device_, &cmd_buffer_info, &cmd_buffer_);
status =
dfn.vkAllocateCommandBuffers(*device_, &cmd_buffer_info, &cmd_buffer_);
CheckResult(status, "vkCreateCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -247,7 +251,7 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
cmd_buffer_info.commandBufferCount = 2;
status =
vkAllocateCommandBuffers(*device_, &cmd_buffer_info, command_buffers);
dfn.vkAllocateCommandBuffers(*device_, &cmd_buffer_info, command_buffers);
CheckResult(status, "vkCreateCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -296,8 +300,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
render_pass_info.pSubpasses = &render_subpass;
render_pass_info.dependencyCount = 0;
render_pass_info.pDependencies = nullptr;
status =
vkCreateRenderPass(*device_, &render_pass_info, nullptr, &render_pass_);
status = dfn.vkCreateRenderPass(*device_, &render_pass_info, nullptr,
&render_pass_);
CheckResult(status, "vkCreateRenderPass");
if (status != VK_SUCCESS) {
return status;
@ -308,16 +312,16 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_info.pNext = nullptr;
semaphore_info.flags = 0;
status = vkCreateSemaphore(*device_, &semaphore_info, nullptr,
&image_available_semaphore_);
status = dfn.vkCreateSemaphore(*device_, &semaphore_info, nullptr,
&image_available_semaphore_);
CheckResult(status, "vkCreateSemaphore");
if (status != VK_SUCCESS) {
return status;
}
// Create another semaphore used to synchronize writes to the swap image.
status = vkCreateSemaphore(*device_, &semaphore_info, nullptr,
&image_usage_semaphore_);
status = dfn.vkCreateSemaphore(*device_, &semaphore_info, nullptr,
&image_usage_semaphore_);
CheckResult(status, "vkCreateSemaphore");
if (status != VK_SUCCESS) {
return status;
@ -327,16 +331,16 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
// Note that this may differ from our requested amount.
uint32_t actual_image_count = 0;
std::vector<VkImage> images;
status =
vkGetSwapchainImagesKHR(*device_, handle, &actual_image_count, nullptr);
status = dfn.vkGetSwapchainImagesKHR(*device_, handle, &actual_image_count,
nullptr);
CheckResult(status, "vkGetSwapchainImagesKHR");
if (status != VK_SUCCESS) {
return status;
}
images.resize(actual_image_count);
status = vkGetSwapchainImagesKHR(*device_, handle, &actual_image_count,
images.data());
status = dfn.vkGetSwapchainImagesKHR(*device_, handle, &actual_image_count,
images.data());
CheckResult(status, "vkGetSwapchainImagesKHR");
if (status != VK_SUCCESS) {
return status;
@ -360,8 +364,8 @@ VkResult VulkanSwapChain::Initialize(VkSurfaceKHR surface) {
nullptr,
VK_FENCE_CREATE_SIGNALED_BIT,
};
status = vkCreateFence(*device_, &fence_create_info, nullptr,
&synchronization_fence_);
status = dfn.vkCreateFence(*device_, &fence_create_info, nullptr,
&synchronization_fence_);
CheckResult(status, "vkGetSwapchainImagesKHR");
if (status != VK_SUCCESS) {
return status;
@ -376,6 +380,7 @@ VkResult VulkanSwapChain::InitializeBuffer(Buffer* buffer,
DestroyBuffer(buffer);
buffer->image = target_image;
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Create an image view for the presentation image.
@ -396,8 +401,8 @@ VkResult VulkanSwapChain::InitializeBuffer(Buffer* buffer,
image_view_info.subresourceRange.levelCount = 1;
image_view_info.subresourceRange.baseArrayLayer = 0;
image_view_info.subresourceRange.layerCount = 1;
status = vkCreateImageView(*device_, &image_view_info, nullptr,
&buffer->image_view);
status = dfn.vkCreateImageView(*device_, &image_view_info, nullptr,
&buffer->image_view);
CheckResult(status, "vkCreateImageView");
if (status != VK_SUCCESS) {
return status;
@ -416,8 +421,8 @@ VkResult VulkanSwapChain::InitializeBuffer(Buffer* buffer,
framebuffer_info.width = surface_width_;
framebuffer_info.height = surface_height_;
framebuffer_info.layers = 1;
status = vkCreateFramebuffer(*device_, &framebuffer_info, nullptr,
&buffer->framebuffer);
status = dfn.vkCreateFramebuffer(*device_, &framebuffer_info, nullptr,
&buffer->framebuffer);
CheckResult(status, "vkCreateFramebuffer");
if (status != VK_SUCCESS) {
return status;
@ -427,12 +432,13 @@ VkResult VulkanSwapChain::InitializeBuffer(Buffer* buffer,
}
void VulkanSwapChain::DestroyBuffer(Buffer* buffer) {
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
if (buffer->framebuffer) {
vkDestroyFramebuffer(*device_, buffer->framebuffer, nullptr);
dfn.vkDestroyFramebuffer(*device_, buffer->framebuffer, nullptr);
buffer->framebuffer = nullptr;
}
if (buffer->image_view) {
vkDestroyImageView(*device_, buffer->image_view, nullptr);
dfn.vkDestroyImageView(*device_, buffer->image_view, nullptr);
buffer->image_view = nullptr;
}
// Image is taken care of by the presentation engine.
@ -458,19 +464,21 @@ void VulkanSwapChain::Shutdown() {
}
buffers_.clear();
VK_SAFE_DESTROY(vkDestroySemaphore, *device_, image_available_semaphore_,
nullptr);
VK_SAFE_DESTROY(vkDestroyRenderPass, *device_, render_pass_, nullptr);
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
DestroyAndNullHandle(dfn.vkDestroySemaphore, *device_,
image_available_semaphore_);
DestroyAndNullHandle(dfn.vkDestroyRenderPass, *device_, render_pass_);
if (copy_cmd_buffer_) {
vkFreeCommandBuffers(*device_, cmd_pool_, 1, &copy_cmd_buffer_);
dfn.vkFreeCommandBuffers(*device_, cmd_pool_, 1, &copy_cmd_buffer_);
copy_cmd_buffer_ = nullptr;
}
if (render_cmd_buffer_) {
vkFreeCommandBuffers(*device_, cmd_pool_, 1, &render_cmd_buffer_);
dfn.vkFreeCommandBuffers(*device_, cmd_pool_, 1, &render_cmd_buffer_);
render_cmd_buffer_ = nullptr;
}
VK_SAFE_DESTROY(vkDestroyCommandPool, *device_, cmd_pool_, nullptr);
DestroyAndNullHandle(dfn.vkDestroyCommandPool, *device_, cmd_pool_);
if (presentation_queue_) {
if (!presentation_queue_mutex_) {
@ -482,33 +490,36 @@ void VulkanSwapChain::Shutdown() {
presentation_queue_family_ = -1;
}
VK_SAFE_DESTROY(vkDestroyFence, *device_, synchronization_fence_, nullptr);
DestroyAndNullHandle(dfn.vkDestroyFence, *device_, synchronization_fence_);
// images_ doesn't need to be cleaned up as the swapchain does it implicitly.
VK_SAFE_DESTROY(vkDestroySwapchainKHR, *device_, handle, nullptr);
VK_SAFE_DESTROY(vkDestroySurfaceKHR, *instance_, surface_, nullptr);
DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, *device_, handle);
const VulkanInstance::InstanceFunctions& ifn = instance_->ifn();
DestroyAndNullHandle(ifn.vkDestroySurfaceKHR, *instance_, surface_);
}
VkResult VulkanSwapChain::Begin() {
wait_semaphores_.clear();
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
// Wait for the last swap to finish.
status = vkWaitForFences(*device_, 1, &synchronization_fence_, VK_TRUE, -1);
status =
dfn.vkWaitForFences(*device_, 1, &synchronization_fence_, VK_TRUE, -1);
if (status != VK_SUCCESS) {
return status;
}
status = vkResetFences(*device_, 1, &synchronization_fence_);
status = dfn.vkResetFences(*device_, 1, &synchronization_fence_);
if (status != VK_SUCCESS) {
return status;
}
// Get the index of the next available swapchain image.
status =
vkAcquireNextImageKHR(*device_, handle, 0, image_available_semaphore_,
nullptr, &current_buffer_index_);
dfn.vkAcquireNextImageKHR(*device_, handle, 0, image_available_semaphore_,
nullptr, &current_buffer_index_);
if (status != VK_SUCCESS) {
return status;
}
@ -531,7 +542,8 @@ VkResult VulkanSwapChain::Begin() {
if (presentation_queue_mutex_) {
presentation_queue_mutex_->lock();
}
status = vkQueueSubmit(presentation_queue_, 1, &wait_submit_info, nullptr);
status =
dfn.vkQueueSubmit(presentation_queue_, 1, &wait_submit_info, nullptr);
if (presentation_queue_mutex_) {
presentation_queue_mutex_->unlock();
}
@ -540,8 +552,8 @@ VkResult VulkanSwapChain::Begin() {
}
// Reset all command buffers.
vkResetCommandBuffer(render_cmd_buffer_, 0);
vkResetCommandBuffer(copy_cmd_buffer_, 0);
dfn.vkResetCommandBuffer(render_cmd_buffer_, 0);
dfn.vkResetCommandBuffer(copy_cmd_buffer_, 0);
auto& current_buffer = buffers_[current_buffer_index_];
// Build the command buffer that will execute all queued rendering buffers.
@ -561,7 +573,7 @@ VkResult VulkanSwapChain::Begin() {
begin_info.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
begin_info.pInheritanceInfo = &inherit_info;
status = vkBeginCommandBuffer(render_cmd_buffer_, &begin_info);
status = dfn.vkBeginCommandBuffer(render_cmd_buffer_, &begin_info);
CheckResult(status, "vkBeginCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -569,7 +581,7 @@ VkResult VulkanSwapChain::Begin() {
// Start recording the copy command buffer as well.
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
status = vkBeginCommandBuffer(copy_cmd_buffer_, &begin_info);
status = dfn.vkBeginCommandBuffer(copy_cmd_buffer_, &begin_info);
CheckResult(status, "vkBeginCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -588,31 +600,32 @@ VkResult VulkanSwapChain::Begin() {
clear_color.float32[1] = 1.0f;
clear_color.float32[2] = 0.0f;
}
vkCmdClearColorImage(copy_cmd_buffer_, current_buffer.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1,
&clear_range);
dfn.vkCmdClearColorImage(copy_cmd_buffer_, current_buffer.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color,
1, &clear_range);
return VK_SUCCESS;
}
VkResult VulkanSwapChain::End() {
auto& current_buffer = buffers_[current_buffer_index_];
const VulkanDevice::DeviceFunctions& dfn = device_->dfn();
VkResult status;
status = vkEndCommandBuffer(render_cmd_buffer_);
status = dfn.vkEndCommandBuffer(render_cmd_buffer_);
CheckResult(status, "vkEndCommandBuffer");
if (status != VK_SUCCESS) {
return status;
}
status = vkEndCommandBuffer(copy_cmd_buffer_);
status = dfn.vkEndCommandBuffer(copy_cmd_buffer_);
CheckResult(status, "vkEndCommandBuffer");
if (status != VK_SUCCESS) {
return status;
}
// Build primary command buffer.
status = vkResetCommandBuffer(cmd_buffer_, 0);
status = dfn.vkResetCommandBuffer(cmd_buffer_, 0);
CheckResult(status, "vkResetCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -623,7 +636,7 @@ VkResult VulkanSwapChain::End() {
begin_info.pNext = nullptr;
begin_info.flags = 0;
begin_info.pInheritanceInfo = nullptr;
status = vkBeginCommandBuffer(cmd_buffer_, &begin_info);
status = dfn.vkBeginCommandBuffer(cmd_buffer_, &begin_info);
CheckResult(status, "vkBeginCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -642,14 +655,14 @@ VkResult VulkanSwapChain::End() {
pre_image_copy_barrier.image = current_buffer.image;
pre_image_copy_barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0,
1};
vkCmdPipelineBarrier(cmd_buffer_, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &pre_image_copy_barrier);
dfn.vkCmdPipelineBarrier(cmd_buffer_, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &pre_image_copy_barrier);
current_buffer.image_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
// Execute copy commands
vkCmdExecuteCommands(cmd_buffer_, 1, &copy_cmd_buffer_);
dfn.vkCmdExecuteCommands(cmd_buffer_, 1, &copy_cmd_buffer_);
// Transition the image to a color attachment target for drawing.
VkImageMemoryBarrier pre_image_memory_barrier;
@ -669,9 +682,9 @@ VkResult VulkanSwapChain::End() {
pre_image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
pre_image_memory_barrier.oldLayout = current_buffer.image_layout;
pre_image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
vkCmdPipelineBarrier(cmd_buffer_, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
nullptr, 0, nullptr, 1, &pre_image_memory_barrier);
dfn.vkCmdPipelineBarrier(cmd_buffer_, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
nullptr, 0, nullptr, 1, &pre_image_memory_barrier);
current_buffer.image_layout = pre_image_memory_barrier.newLayout;
@ -687,14 +700,14 @@ VkResult VulkanSwapChain::End() {
render_pass_begin_info.renderArea.extent.height = surface_height_;
render_pass_begin_info.clearValueCount = 0;
render_pass_begin_info.pClearValues = nullptr;
vkCmdBeginRenderPass(cmd_buffer_, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
dfn.vkCmdBeginRenderPass(cmd_buffer_, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
// Render commands.
vkCmdExecuteCommands(cmd_buffer_, 1, &render_cmd_buffer_);
dfn.vkCmdExecuteCommands(cmd_buffer_, 1, &render_cmd_buffer_);
// End render pass.
vkCmdEndRenderPass(cmd_buffer_);
dfn.vkCmdEndRenderPass(cmd_buffer_);
// Transition the image to a format the presentation engine can source from.
// FIXME: Do we need more synchronization here between the copy buffer?
@ -715,14 +728,14 @@ VkResult VulkanSwapChain::End() {
post_image_memory_barrier.subresourceRange.levelCount = 1;
post_image_memory_barrier.subresourceRange.baseArrayLayer = 0;
post_image_memory_barrier.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(cmd_buffer_,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &post_image_memory_barrier);
dfn.vkCmdPipelineBarrier(cmd_buffer_,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &post_image_memory_barrier);
current_buffer.image_layout = post_image_memory_barrier.newLayout;
status = vkEndCommandBuffer(cmd_buffer_);
status = dfn.vkEndCommandBuffer(cmd_buffer_);
CheckResult(status, "vkEndCommandBuffer");
if (status != VK_SUCCESS) {
return status;
@ -752,8 +765,8 @@ VkResult VulkanSwapChain::End() {
if (presentation_queue_mutex_) {
presentation_queue_mutex_->lock();
}
status = vkQueueSubmit(presentation_queue_, 1, &render_submit_info,
synchronization_fence_);
status = dfn.vkQueueSubmit(presentation_queue_, 1, &render_submit_info,
synchronization_fence_);
if (presentation_queue_mutex_) {
presentation_queue_mutex_->unlock();
}
@ -777,7 +790,7 @@ VkResult VulkanSwapChain::End() {
if (presentation_queue_mutex_) {
presentation_queue_mutex_->lock();
}
status = vkQueuePresentKHR(presentation_queue_, &present_info);
status = dfn.vkQueuePresentKHR(presentation_queue_, &present_info);
if (presentation_queue_mutex_) {
presentation_queue_mutex_->unlock();
}

View File

@ -26,39 +26,37 @@ namespace xe {
namespace ui {
namespace vulkan {
#define VK_SAFE_DESTROY(fn, dev, obj, alloc) \
\
do { \
if (obj) { \
fn(dev, obj, alloc); \
obj = nullptr; \
} \
\
} while (0)
class Fence {
public:
Fence(VkDevice device) : device_(device) {
VkFenceCreateInfo fence_info;
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.pNext = nullptr;
fence_info.flags = 0;
vkCreateFence(device, &fence_info, nullptr, &fence_);
template <typename F, typename T>
inline bool DestroyAndNullHandle(F* destroy_function, T& handle) {
if (handle != VK_NULL_HANDLE) {
destroy_function(handle, nullptr);
handle = VK_NULL_HANDLE;
return true;
}
~Fence() {
vkDestroyFence(device_, fence_, nullptr);
fence_ = nullptr;
return false;
}
template <typename F, typename T>
inline bool DestroyAndNullHandle(F* destroy_function, VkInstance parent,
T& handle) {
if (handle != VK_NULL_HANDLE) {
destroy_function(parent, handle, nullptr);
handle = VK_NULL_HANDLE;
return true;
}
return false;
}
VkResult status() const { return vkGetFenceStatus(device_, fence_); }
VkFence fence() const { return fence_; }
operator VkFence() const { return fence_; }
private:
VkDevice device_;
VkFence fence_ = nullptr;
};
template <typename F, typename T>
inline bool DestroyAndNullHandle(F* destroy_function, VkDevice parent,
T& handle) {
if (handle != VK_NULL_HANDLE) {
destroy_function(parent, handle, nullptr);
handle = VK_NULL_HANDLE;
return true;
}
return false;
}
struct Version {
uint32_t major;

1
third_party/volk vendored

@ -1 +0,0 @@
Subproject commit 30a851b67e129a3d91f191b2e9dcdad65ba98438

30
third_party/volk.lua vendored
View File

@ -1,30 +0,0 @@
group("third_party")
project("volk")
uuid("C9781C93-2DF5-47A2-94EE-2C5EBED61239")
kind("StaticLib")
language("C")
defines({
"_LIB",
"API_NAME=\"vulkan\"",
})
removedefines({
"_UNICODE",
"UNICODE",
})
includedirs({
"volk",
})
files({
"volk/volk.c",
"volk/volk.h",
})
filter("platforms:Windows")
defines({
"VK_USE_PLATFORM_WIN32_KHR",
})
filter("platforms:Linux")
defines({
"VK_USE_PLATFORM_XCB_KHR",
})