[Vulkan] Robustify error handling during initialization

This commit is contained in:
DrChat 2017-12-16 15:14:48 -06:00
parent 293878cd14
commit 49287579ff
13 changed files with 339 additions and 136 deletions

View File

@ -27,19 +27,24 @@ constexpr VkDeviceSize kConstantRegisterUniformRange =
BufferCache::BufferCache(RegisterFile* register_file, Memory* memory, BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
ui::vulkan::VulkanDevice* device, size_t capacity) ui::vulkan::VulkanDevice* device, size_t capacity)
: register_file_(register_file), memory_(memory), device_(*device) { : register_file_(register_file), memory_(memory), device_(device) {
transient_buffer_ = std::make_unique<ui::vulkan::CircularBuffer>( transient_buffer_ = std::make_unique<ui::vulkan::CircularBuffer>(
device, device_,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
capacity); capacity);
}
BufferCache::~BufferCache() { Shutdown(); }
VkResult BufferCache::Initialize() {
VkMemoryRequirements pool_reqs; VkMemoryRequirements pool_reqs;
transient_buffer_->GetBufferMemoryRequirements(&pool_reqs); transient_buffer_->GetBufferMemoryRequirements(&pool_reqs);
gpu_memory_pool_ = device->AllocateMemory(pool_reqs); gpu_memory_pool_ = device_->AllocateMemory(pool_reqs);
if (!transient_buffer_->Initialize(gpu_memory_pool_, 0)) { VkResult status = transient_buffer_->Initialize(gpu_memory_pool_, 0);
assert_always(); if (status != VK_SUCCESS) {
return status;
} }
// Descriptor pool used for all of our cached descriptors. // Descriptor pool used for all of our cached descriptors.
@ -56,9 +61,11 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
pool_sizes[0].descriptorCount = 2; pool_sizes[0].descriptorCount = 2;
descriptor_pool_info.poolSizeCount = 1; descriptor_pool_info.poolSizeCount = 1;
descriptor_pool_info.pPoolSizes = pool_sizes; descriptor_pool_info.pPoolSizes = pool_sizes;
auto err = vkCreateDescriptorPool(device_, &descriptor_pool_info, nullptr, status = vkCreateDescriptorPool(*device_, &descriptor_pool_info, nullptr,
&descriptor_pool_); &descriptor_pool_);
CheckResult(err, "vkCreateDescriptorPool"); if (status != VK_SUCCESS) {
return status;
}
// Create the descriptor set layout used for our uniform buffer. // Create the descriptor set layout used for our uniform buffer.
// As it is a static binding that uses dynamic offsets during draws we can // As it is a static binding that uses dynamic offsets during draws we can
@ -83,14 +90,17 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
descriptor_set_layout_info.pNext = nullptr; descriptor_set_layout_info.pNext = nullptr;
descriptor_set_layout_info.flags = 0; descriptor_set_layout_info.flags = 0;
VkDescriptorSetLayoutBinding uniform_bindings[] = { VkDescriptorSetLayoutBinding uniform_bindings[] = {
vertex_uniform_binding, fragment_uniform_binding, vertex_uniform_binding,
fragment_uniform_binding,
}; };
descriptor_set_layout_info.bindingCount = descriptor_set_layout_info.bindingCount =
static_cast<uint32_t>(xe::countof(uniform_bindings)); static_cast<uint32_t>(xe::countof(uniform_bindings));
descriptor_set_layout_info.pBindings = uniform_bindings; descriptor_set_layout_info.pBindings = uniform_bindings;
err = vkCreateDescriptorSetLayout(device_, &descriptor_set_layout_info, status = vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info,
nullptr, &descriptor_set_layout_); nullptr, &descriptor_set_layout_);
CheckResult(err, "vkCreateDescriptorSetLayout"); if (status != VK_SUCCESS) {
return status;
}
// Create the descriptor we'll use for the uniform buffer. // Create the descriptor we'll use for the uniform buffer.
// This is what we hand out to everyone (who then also needs to use our // This is what we hand out to everyone (who then also needs to use our
@ -101,9 +111,11 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
set_alloc_info.descriptorPool = descriptor_pool_; set_alloc_info.descriptorPool = descriptor_pool_;
set_alloc_info.descriptorSetCount = 1; set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &descriptor_set_layout_; set_alloc_info.pSetLayouts = &descriptor_set_layout_;
err = vkAllocateDescriptorSets(device_, &set_alloc_info, status = vkAllocateDescriptorSets(*device_, &set_alloc_info,
&transient_descriptor_set_); &transient_descriptor_set_);
CheckResult(err, "vkAllocateDescriptorSets"); if (status != VK_SUCCESS) {
return status;
}
// Initialize descriptor set with our buffers. // Initialize descriptor set with our buffers.
VkDescriptorBufferInfo buffer_info; VkDescriptorBufferInfo buffer_info;
@ -132,18 +144,33 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
fragment_uniform_binding_write.descriptorType = fragment_uniform_binding_write.descriptorType =
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
fragment_uniform_binding_write.pBufferInfo = &buffer_info; fragment_uniform_binding_write.pBufferInfo = &buffer_info;
vkUpdateDescriptorSets(device_, 2, descriptor_writes, 0, nullptr); vkUpdateDescriptorSets(*device_, 2, descriptor_writes, 0, nullptr);
return VK_SUCCESS;
} }
BufferCache::~BufferCache() { void BufferCache::Shutdown() {
vkFreeDescriptorSets(device_, descriptor_pool_, 1, if (transient_descriptor_set_) {
vkFreeDescriptorSets(*device_, descriptor_pool_, 1,
&transient_descriptor_set_); &transient_descriptor_set_);
vkDestroyDescriptorSetLayout(device_, descriptor_set_layout_, nullptr); transient_descriptor_set_ = nullptr;
vkDestroyDescriptorPool(device_, descriptor_pool_, nullptr); }
if (descriptor_set_layout_) {
vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr);
descriptor_set_layout_ = nullptr;
}
if (descriptor_pool_) {
vkDestroyDescriptorPool(*device_, descriptor_pool_, nullptr);
descriptor_pool_ = nullptr;
}
transient_buffer_->Shutdown(); transient_buffer_->Shutdown();
if (gpu_memory_pool_) { if (gpu_memory_pool_) {
vkFreeMemory(device_, gpu_memory_pool_, nullptr); vkFreeMemory(*device_, gpu_memory_pool_, nullptr);
gpu_memory_pool_ = nullptr;
} }
} }
@ -416,7 +443,7 @@ void BufferCache::Flush(VkCommandBuffer command_buffer) {
dirty_range.memory = transient_buffer_->gpu_memory(); dirty_range.memory = transient_buffer_->gpu_memory();
dirty_range.offset = 0; dirty_range.offset = 0;
dirty_range.size = transient_buffer_->capacity(); dirty_range.size = transient_buffer_->capacity();
vkFlushMappedMemoryRanges(device_, 1, &dirty_range); vkFlushMappedMemoryRanges(*device_, 1, &dirty_range);
} }
void BufferCache::InvalidateCache() { transient_cache_.clear(); } void BufferCache::InvalidateCache() { transient_cache_.clear(); }

View File

@ -33,6 +33,9 @@ class BufferCache {
ui::vulkan::VulkanDevice* device, size_t capacity); ui::vulkan::VulkanDevice* device, size_t capacity);
~BufferCache(); ~BufferCache();
VkResult Initialize();
void Shutdown();
// Descriptor set containing the dynamic uniform buffer used for constant // Descriptor set containing the dynamic uniform buffer used for constant
// uploads. Used in conjunction with a dynamic offset returned by // uploads. Used in conjunction with a dynamic offset returned by
// UploadConstantRegisters. // UploadConstantRegisters.
@ -109,7 +112,7 @@ class BufferCache {
RegisterFile* register_file_ = nullptr; RegisterFile* register_file_ = nullptr;
Memory* memory_ = nullptr; Memory* memory_ = nullptr;
VkDevice device_ = nullptr; ui::vulkan::VulkanDevice* device_ = nullptr;
VkDeviceMemory gpu_memory_pool_ = nullptr; VkDeviceMemory gpu_memory_pool_ = nullptr;

View File

@ -33,11 +33,20 @@ using xe::ui::vulkan::CheckResult;
#include "xenia/gpu/vulkan/shaders/bin/quad_list_geom.h" #include "xenia/gpu/vulkan/shaders/bin/quad_list_geom.h"
#include "xenia/gpu/vulkan/shaders/bin/rect_list_geom.h" #include "xenia/gpu/vulkan/shaders/bin/rect_list_geom.h"
PipelineCache::PipelineCache( PipelineCache::PipelineCache(RegisterFile* register_file,
RegisterFile* register_file, ui::vulkan::VulkanDevice* device, ui::vulkan::VulkanDevice* device)
VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout)
: register_file_(register_file), device_(*device) { : register_file_(register_file), device_(*device) {
// We can also use the GLSL translator with a Vulkan dialect.
shader_translator_.reset(new SpirvShaderTranslator());
}
PipelineCache::~PipelineCache() { Shutdown(); }
VkResult PipelineCache::Initialize(
VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout) {
VkResult status;
// Initialize the shared driver pipeline cache. // Initialize the shared driver pipeline cache.
// We'll likely want to serialize this and reuse it, if that proves to be // We'll likely want to serialize this and reuse it, if that proves to be
// useful. If the shaders are expensive and this helps we could do it per // useful. If the shaders are expensive and this helps we could do it per
@ -48,9 +57,11 @@ PipelineCache::PipelineCache(
pipeline_cache_info.flags = 0; pipeline_cache_info.flags = 0;
pipeline_cache_info.initialDataSize = 0; pipeline_cache_info.initialDataSize = 0;
pipeline_cache_info.pInitialData = nullptr; pipeline_cache_info.pInitialData = nullptr;
auto err = vkCreatePipelineCache(device_, &pipeline_cache_info, nullptr, status = vkCreatePipelineCache(device_, &pipeline_cache_info, nullptr,
&pipeline_cache_); &pipeline_cache_);
CheckResult(err, "vkCreatePipelineCache"); if (status != VK_SUCCESS) {
return status;
}
// Descriptors used by the pipelines. // Descriptors used by the pipelines.
// These are the only ones we can ever bind. // These are the only ones we can ever bind.
@ -82,9 +93,11 @@ PipelineCache::PipelineCache(
pipeline_layout_info.pushConstantRangeCount = pipeline_layout_info.pushConstantRangeCount =
static_cast<uint32_t>(xe::countof(push_constant_ranges)); static_cast<uint32_t>(xe::countof(push_constant_ranges));
pipeline_layout_info.pPushConstantRanges = push_constant_ranges; pipeline_layout_info.pPushConstantRanges = push_constant_ranges;
err = vkCreatePipelineLayout(*device, &pipeline_layout_info, nullptr, status = vkCreatePipelineLayout(device_, &pipeline_layout_info, nullptr,
&pipeline_layout_); &pipeline_layout_);
CheckResult(err, "vkCreatePipelineLayout"); if (status != VK_SUCCESS) {
return status;
}
// Initialize our shared geometry shaders. // Initialize our shared geometry shaders.
// These will be used as needed to emulate primitive types Vulkan doesn't // These will be used as needed to emulate primitive types Vulkan doesn't
@ -97,34 +110,48 @@ PipelineCache::PipelineCache(
static_cast<uint32_t>(sizeof(line_quad_list_geom)); static_cast<uint32_t>(sizeof(line_quad_list_geom));
shader_module_info.pCode = shader_module_info.pCode =
reinterpret_cast<const uint32_t*>(line_quad_list_geom); reinterpret_cast<const uint32_t*>(line_quad_list_geom);
err = vkCreateShaderModule(device_, &shader_module_info, nullptr, status = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.line_quad_list); &geometry_shaders_.line_quad_list);
CheckResult(err, "vkCreateShaderModule"); if (status != VK_SUCCESS) {
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(point_list_geom)); return status;
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(point_list_geom);
err = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.point_list);
CheckResult(err, "vkCreateShaderModule");
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(quad_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(quad_list_geom);
err = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.quad_list);
CheckResult(err, "vkCreateShaderModule");
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(rect_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(rect_list_geom);
err = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.rect_list);
CheckResult(err, "vkCreateShaderModule");
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(dummy_frag));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(dummy_frag);
err = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&dummy_pixel_shader_);
// We can also use the GLSL translator with a Vulkan dialect.
shader_translator_.reset(new SpirvShaderTranslator());
} }
PipelineCache::~PipelineCache() { shader_module_info.codeSize = static_cast<uint32_t>(sizeof(point_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(point_list_geom);
status = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.point_list);
if (status != VK_SUCCESS) {
return status;
}
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(quad_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(quad_list_geom);
status = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.quad_list);
if (status != VK_SUCCESS) {
return status;
}
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(rect_list_geom));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(rect_list_geom);
status = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&geometry_shaders_.rect_list);
if (status != VK_SUCCESS) {
return status;
}
shader_module_info.codeSize = static_cast<uint32_t>(sizeof(dummy_frag));
shader_module_info.pCode = reinterpret_cast<const uint32_t*>(dummy_frag);
status = vkCreateShaderModule(device_, &shader_module_info, nullptr,
&dummy_pixel_shader_);
if (status != VK_SUCCESS) {
return status;
}
return VK_SUCCESS;
}
void PipelineCache::Shutdown() {
// Destroy all pipelines. // Destroy all pipelines.
for (auto it : cached_pipelines_) { for (auto it : cached_pipelines_) {
vkDestroyPipeline(device_, it.second, nullptr); vkDestroyPipeline(device_, it.second, nullptr);
@ -132,14 +159,35 @@ PipelineCache::~PipelineCache() {
cached_pipelines_.clear(); cached_pipelines_.clear();
// Destroy geometry shaders. // Destroy geometry shaders.
if (geometry_shaders_.line_quad_list) {
vkDestroyShaderModule(device_, geometry_shaders_.line_quad_list, nullptr); vkDestroyShaderModule(device_, geometry_shaders_.line_quad_list, nullptr);
geometry_shaders_.line_quad_list = nullptr;
}
if (geometry_shaders_.point_list) {
vkDestroyShaderModule(device_, geometry_shaders_.point_list, nullptr); vkDestroyShaderModule(device_, geometry_shaders_.point_list, nullptr);
geometry_shaders_.point_list = nullptr;
}
if (geometry_shaders_.quad_list) {
vkDestroyShaderModule(device_, geometry_shaders_.quad_list, nullptr); vkDestroyShaderModule(device_, geometry_shaders_.quad_list, nullptr);
geometry_shaders_.quad_list = nullptr;
}
if (geometry_shaders_.rect_list) {
vkDestroyShaderModule(device_, geometry_shaders_.rect_list, nullptr); vkDestroyShaderModule(device_, geometry_shaders_.rect_list, nullptr);
geometry_shaders_.rect_list = nullptr;
}
if (dummy_pixel_shader_) {
vkDestroyShaderModule(device_, dummy_pixel_shader_, nullptr); vkDestroyShaderModule(device_, dummy_pixel_shader_, nullptr);
dummy_pixel_shader_ = nullptr;
}
if (pipeline_layout_) {
vkDestroyPipelineLayout(device_, pipeline_layout_, nullptr); vkDestroyPipelineLayout(device_, pipeline_layout_, nullptr);
pipeline_layout_ = nullptr;
}
if (pipeline_cache_) {
vkDestroyPipelineCache(device_, pipeline_cache_, nullptr); vkDestroyPipelineCache(device_, pipeline_cache_, nullptr);
pipeline_cache_ = nullptr;
}
// Destroy all shaders. // Destroy all shaders.
for (auto it : shader_map_) { for (auto it : shader_map_) {
@ -334,20 +382,20 @@ void PipelineCache::DumpShaderDisasmNV(
pipeline_cache_info.flags = 0; pipeline_cache_info.flags = 0;
pipeline_cache_info.initialDataSize = 0; pipeline_cache_info.initialDataSize = 0;
pipeline_cache_info.pInitialData = nullptr; pipeline_cache_info.pInitialData = nullptr;
auto err = vkCreatePipelineCache(device_, &pipeline_cache_info, nullptr, auto status = vkCreatePipelineCache(device_, &pipeline_cache_info, nullptr,
&dummy_pipeline_cache); &dummy_pipeline_cache);
CheckResult(err, "vkCreatePipelineCache"); CheckResult(status, "vkCreatePipelineCache");
// Create a pipeline on the dummy cache and dump it. // Create a pipeline on the dummy cache and dump it.
VkPipeline dummy_pipeline; VkPipeline dummy_pipeline;
err = vkCreateGraphicsPipelines(device_, dummy_pipeline_cache, 1, status = vkCreateGraphicsPipelines(device_, dummy_pipeline_cache, 1,
&pipeline_info, nullptr, &dummy_pipeline); &pipeline_info, nullptr, &dummy_pipeline);
std::vector<uint8_t> pipeline_data; std::vector<uint8_t> pipeline_data;
size_t data_size = 0; size_t data_size = 0;
err = vkGetPipelineCacheData(device_, dummy_pipeline_cache, &data_size, status = vkGetPipelineCacheData(device_, dummy_pipeline_cache, &data_size,
nullptr); nullptr);
if (err == VK_SUCCESS) { if (status == VK_SUCCESS) {
pipeline_data.resize(data_size); pipeline_data.resize(data_size);
vkGetPipelineCacheData(device_, dummy_pipeline_cache, &data_size, vkGetPipelineCacheData(device_, dummy_pipeline_cache, &data_size,
pipeline_data.data()); pipeline_data.data());

View File

@ -39,11 +39,13 @@ class PipelineCache {
kError, kError,
}; };
PipelineCache(RegisterFile* register_file, ui::vulkan::VulkanDevice* device, PipelineCache(RegisterFile* register_file, ui::vulkan::VulkanDevice* device);
VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout);
~PipelineCache(); ~PipelineCache();
VkResult Initialize(VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout);
void Shutdown();
// Loads a shader from the cache, possibly translating it. // Loads a shader from the cache, possibly translating it.
VulkanShader* LoadShader(ShaderType shader_type, uint32_t guest_address, VulkanShader* LoadShader(ShaderType shader_type, uint32_t guest_address,
const uint32_t* host_address, uint32_t dword_count); const uint32_t* host_address, uint32_t dword_count);

View File

@ -510,7 +510,11 @@ bool CachedRenderPass::IsCompatible(
RenderCache::RenderCache(RegisterFile* register_file, RenderCache::RenderCache(RegisterFile* register_file,
ui::vulkan::VulkanDevice* device) ui::vulkan::VulkanDevice* device)
: register_file_(register_file), device_(device) { : register_file_(register_file), device_(device) {}
RenderCache::~RenderCache() { Shutdown(); }
VkResult RenderCache::Initialize() {
VkResult status = VK_SUCCESS; VkResult status = VK_SUCCESS;
// Create the buffer we'll bind to our memory. // Create the buffer we'll bind to our memory.
@ -524,8 +528,11 @@ RenderCache::RenderCache(RegisterFile* register_file,
buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_info.queueFamilyIndexCount = 0; buffer_info.queueFamilyIndexCount = 0;
buffer_info.pQueueFamilyIndices = nullptr; buffer_info.pQueueFamilyIndices = nullptr;
status = vkCreateBuffer(*device, &buffer_info, nullptr, &edram_buffer_); status = vkCreateBuffer(*device_, &buffer_info, nullptr, &edram_buffer_);
CheckResult(status, "vkCreateBuffer"); CheckResult(status, "vkCreateBuffer");
if (status != VK_SUCCESS) {
return status;
}
// Query requirements for the buffer. // Query requirements for the buffer.
// It should be 1:1. // It should be 1:1.
@ -535,19 +542,24 @@ RenderCache::RenderCache(RegisterFile* register_file,
// Allocate EDRAM memory. // Allocate EDRAM memory.
// TODO(benvanik): do we need it host visible? // TODO(benvanik): do we need it host visible?
edram_memory_ = device->AllocateMemory(buffer_requirements); edram_memory_ = device_->AllocateMemory(buffer_requirements);
assert_not_null(edram_memory_); assert_not_null(edram_memory_);
if (!edram_memory_) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Bind buffer to map our entire memory. // Bind buffer to map our entire memory.
status = vkBindBufferMemory(*device_, edram_buffer_, edram_memory_, 0); status = vkBindBufferMemory(*device_, edram_buffer_, edram_memory_, 0);
CheckResult(status, "vkBindBufferMemory"); CheckResult(status, "vkBindBufferMemory");
if (status != VK_SUCCESS) {
return status;
}
if (status == VK_SUCCESS) { if (status == VK_SUCCESS) {
// For debugging, upload a grid into the EDRAM buffer. // For debugging, upload a grid into the EDRAM buffer.
uint32_t* gpu_data = nullptr; uint32_t* gpu_data = nullptr;
status = vkMapMemory(*device_, edram_memory_, 0, buffer_requirements.size, status = vkMapMemory(*device_, edram_memory_, 0, buffer_requirements.size,
0, reinterpret_cast<void**>(&gpu_data)); 0, reinterpret_cast<void**>(&gpu_data));
CheckResult(status, "vkMapMemory");
if (status == VK_SUCCESS) { if (status == VK_SUCCESS) {
for (int i = 0; i < kEdramBufferCapacity / 4; i++) { for (int i = 0; i < kEdramBufferCapacity / 4; i++) {
@ -557,9 +569,11 @@ RenderCache::RenderCache(RegisterFile* register_file,
vkUnmapMemory(*device_, edram_memory_); vkUnmapMemory(*device_, edram_memory_);
} }
} }
return VK_SUCCESS;
} }
RenderCache::~RenderCache() { void RenderCache::Shutdown() {
// TODO(benvanik): wait for idle. // TODO(benvanik): wait for idle.
// Dispose all render passes (and their framebuffers). // Dispose all render passes (and their framebuffers).
@ -575,8 +589,14 @@ RenderCache::~RenderCache() {
cached_tile_views_.clear(); cached_tile_views_.clear();
// Release underlying EDRAM memory. // Release underlying EDRAM memory.
if (edram_buffer_) {
vkDestroyBuffer(*device_, edram_buffer_, nullptr); vkDestroyBuffer(*device_, edram_buffer_, nullptr);
edram_buffer_ = nullptr;
}
if (edram_memory_) {
vkFreeMemory(*device_, edram_memory_, nullptr); vkFreeMemory(*device_, edram_memory_, nullptr);
edram_memory_ = nullptr;
}
} }
bool RenderCache::dirty() const { bool RenderCache::dirty() const {

View File

@ -276,6 +276,9 @@ class RenderCache {
RenderCache(RegisterFile* register_file, ui::vulkan::VulkanDevice* device); RenderCache(RegisterFile* register_file, ui::vulkan::VulkanDevice* device);
~RenderCache(); ~RenderCache();
VkResult Initialize();
void Shutdown();
// Call this to determine if you should start a new render pass or continue // Call this to determine if you should start a new render pass or continue
// with an already open pass. // with an already open pass.
bool dirty() const; bool dirty() const;

View File

@ -128,8 +128,12 @@ TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
kStagingBufferSize), kStagingBufferSize),
wb_staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_DST_BIT, wb_staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_DST_BIT,
kStagingBufferSize) { kStagingBufferSize) {}
VkResult err = VK_SUCCESS;
TextureCache::~TextureCache() { Shutdown(); }
VkResult TextureCache::Initialize() {
VkResult status = VK_SUCCESS;
// Descriptor pool used for all of our cached descriptors. // Descriptor pool used for all of our cached descriptors.
VkDescriptorPoolSize pool_sizes[1]; VkDescriptorPoolSize pool_sizes[1];
@ -176,33 +180,43 @@ TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
descriptor_set_layout_info.bindingCount = descriptor_set_layout_info.bindingCount =
static_cast<uint32_t>(xe::countof(bindings)); static_cast<uint32_t>(xe::countof(bindings));
descriptor_set_layout_info.pBindings = bindings; descriptor_set_layout_info.pBindings = bindings;
err = vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info, status =
vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info,
nullptr, &texture_descriptor_set_layout_); nullptr, &texture_descriptor_set_layout_);
CheckResult(err, "vkCreateDescriptorSetLayout"); if (status != VK_SUCCESS) {
return status;
if (!staging_buffer_.Initialize()) {
assert_always();
} }
if (!wb_staging_buffer_.Initialize()) { status = staging_buffer_.Initialize();
assert_always(); if (status != VK_SUCCESS) {
return status;
}
status = wb_staging_buffer_.Initialize();
if (status != VK_SUCCESS) {
return status;
} }
// Create a memory allocator for textures. // Create a memory allocator for textures.
VmaAllocatorCreateInfo alloc_info = { VmaAllocatorCreateInfo alloc_info = {
0, *device_, *device_, 0, 0, nullptr, nullptr, 0, *device_, *device_, 0, 0, nullptr, nullptr,
}; };
err = vmaCreateAllocator(&alloc_info, &mem_allocator_); status = vmaCreateAllocator(&alloc_info, &mem_allocator_);
CheckResult(err, "vmaCreateAllocator"); if (status != VK_SUCCESS) {
vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr);
return status;
}
invalidated_textures_sets_[0].reserve(64); invalidated_textures_sets_[0].reserve(64);
invalidated_textures_sets_[1].reserve(64); invalidated_textures_sets_[1].reserve(64);
invalidated_textures_ = &invalidated_textures_sets_[0]; invalidated_textures_ = &invalidated_textures_sets_[0];
device_queue_ = device_->AcquireQueue(); device_queue_ = device_->AcquireQueue();
return VK_SUCCESS;
} }
TextureCache::~TextureCache() { void TextureCache::Shutdown() {
if (device_queue_) { if (device_queue_) {
device_->ReleaseQueue(device_queue_); device_->ReleaseQueue(device_queue_);
} }
@ -211,7 +225,10 @@ TextureCache::~TextureCache() {
ClearCache(); ClearCache();
Scavenge(); Scavenge();
if (mem_allocator_ != nullptr) {
vmaDestroyAllocator(mem_allocator_); vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = nullptr;
}
vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_, vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr); nullptr);
} }

View File

@ -76,6 +76,9 @@ class TextureCache {
TraceWriter* trace_writer, ui::vulkan::VulkanDevice* device); TraceWriter* trace_writer, ui::vulkan::VulkanDevice* device);
~TextureCache(); ~TextureCache();
VkResult Initialize();
void Shutdown();
// Descriptor set layout containing all possible texture bindings. // Descriptor set layout containing all possible texture bindings.
// The set contains one descriptor for each texture sampler [0-31]. // The set contains one descriptor for each texture sampler [0-31].
VkDescriptorSetLayout texture_descriptor_set_layout() const { VkDescriptorSetLayout texture_descriptor_set_layout() const {

View File

@ -69,10 +69,14 @@ bool VulkanCommandProcessor::SetupContext() {
queue_mutex_ = &device_->primary_queue_mutex(); queue_mutex_ = &device_->primary_queue_mutex();
} }
VkResult status = VK_SUCCESS;
// Setup a blitter. // Setup a blitter.
blitter_ = std::make_unique<ui::vulkan::Blitter>(); blitter_ = std::make_unique<ui::vulkan::Blitter>();
if (!blitter_->Initialize(device_)) { status = blitter_->Initialize(device_);
if (status != VK_SUCCESS) {
XELOGE("Unable to initialize blitter"); XELOGE("Unable to initialize blitter");
blitter_->Shutdown();
return false; return false;
} }
@ -83,21 +87,47 @@ bool VulkanCommandProcessor::SetupContext() {
// Initialize the state machine caches. // Initialize the state machine caches.
buffer_cache_ = std::make_unique<BufferCache>( buffer_cache_ = std::make_unique<BufferCache>(
register_file_, memory_, device_, kDefaultBufferCacheCapacity); register_file_, memory_, device_, kDefaultBufferCacheCapacity);
status = buffer_cache_->Initialize();
if (status != VK_SUCCESS) {
XELOGE("Unable to initialize buffer cache");
buffer_cache_->Shutdown();
return false;
}
texture_cache_ = std::make_unique<TextureCache>(memory_, register_file_, texture_cache_ = std::make_unique<TextureCache>(memory_, register_file_,
&trace_writer_, device_); &trace_writer_, device_);
pipeline_cache_ = std::make_unique<PipelineCache>( status = texture_cache_->Initialize();
register_file_, device_, buffer_cache_->constant_descriptor_set_layout(), if (status != VK_SUCCESS) {
XELOGE("Unable to initialize texture cache");
texture_cache_->Shutdown();
return false;
}
pipeline_cache_ = std::make_unique<PipelineCache>(register_file_, device_);
status = pipeline_cache_->Initialize(
buffer_cache_->constant_descriptor_set_layout(),
texture_cache_->texture_descriptor_set_layout()); texture_cache_->texture_descriptor_set_layout());
if (status != VK_SUCCESS) {
XELOGE("Unable to initialize pipeline cache");
pipeline_cache_->Shutdown();
return false;
}
render_cache_ = std::make_unique<RenderCache>(register_file_, device_); render_cache_ = std::make_unique<RenderCache>(register_file_, device_);
status = render_cache_->Initialize();
if (status != VK_SUCCESS) {
XELOGE("Unable to initialize render cache");
render_cache_->Shutdown();
return false;
}
VkEventCreateInfo info = { VkEventCreateInfo info = {
VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0, VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0,
}; };
VkResult result = status = vkCreateEvent(*device_, &info, nullptr,
vkCreateEvent(*device_, &info, nullptr,
reinterpret_cast<VkEvent*>(&swap_state_.backend_data)); reinterpret_cast<VkEvent*>(&swap_state_.backend_data));
if (result != VK_SUCCESS) { if (status != VK_SUCCESS) {
return false; return false;
} }

View File

@ -23,30 +23,41 @@ namespace vulkan {
Blitter::Blitter() {} Blitter::Blitter() {}
Blitter::~Blitter() { Shutdown(); } Blitter::~Blitter() { Shutdown(); }
bool Blitter::Initialize(VulkanDevice* device) { VkResult Blitter::Initialize(VulkanDevice* device) {
device_ = device; device_ = device;
VkResult status = VK_SUCCESS;
// Shaders // Shaders
VkShaderModuleCreateInfo shader_create_info; VkShaderModuleCreateInfo shader_create_info;
std::memset(&shader_create_info, 0, sizeof(shader_create_info)); std::memset(&shader_create_info, 0, sizeof(shader_create_info));
shader_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; shader_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
shader_create_info.codeSize = sizeof(blit_vert); shader_create_info.codeSize = sizeof(blit_vert);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_vert); shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_vert);
auto result = vkCreateShaderModule(*device_, &shader_create_info, nullptr, status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_vertex_); &blit_vertex_);
CheckResult(result, "vkCreateShaderModule"); CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
}
shader_create_info.codeSize = sizeof(blit_color_frag); shader_create_info.codeSize = sizeof(blit_color_frag);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_color_frag); shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_color_frag);
result = vkCreateShaderModule(*device_, &shader_create_info, nullptr, status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_color_); &blit_color_);
CheckResult(result, "vkCreateShaderModule"); CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
}
shader_create_info.codeSize = sizeof(blit_depth_frag); shader_create_info.codeSize = sizeof(blit_depth_frag);
shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_depth_frag); shader_create_info.pCode = reinterpret_cast<const uint32_t*>(blit_depth_frag);
result = vkCreateShaderModule(*device_, &shader_create_info, nullptr, status = vkCreateShaderModule(*device_, &shader_create_info, nullptr,
&blit_depth_); &blit_depth_);
CheckResult(result, "vkCreateShaderModule"); CheckResult(status, "vkCreateShaderModule");
if (status != VK_SUCCESS) {
return status;
}
// Create the descriptor set layout used for our texture sampler. // Create the descriptor set layout used for our texture sampler.
// As it changes almost every draw we cache it per texture. // As it changes almost every draw we cache it per texture.
@ -63,9 +74,12 @@ bool Blitter::Initialize(VulkanDevice* device) {
texture_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; texture_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
texture_binding.pImmutableSamplers = nullptr; texture_binding.pImmutableSamplers = nullptr;
texture_set_layout_info.pBindings = &texture_binding; texture_set_layout_info.pBindings = &texture_binding;
result = vkCreateDescriptorSetLayout(*device_, &texture_set_layout_info, status = vkCreateDescriptorSetLayout(*device_, &texture_set_layout_info,
nullptr, &descriptor_set_layout_); nullptr, &descriptor_set_layout_);
CheckResult(result, "vkCreateDescriptorSetLayout"); CheckResult(status, "vkCreateDescriptorSetLayout");
if (status != VK_SUCCESS) {
return status;
}
// Create a descriptor pool // Create a descriptor pool
VkDescriptorPoolSize pool_sizes[1]; VkDescriptorPoolSize pool_sizes[1];
@ -99,9 +113,12 @@ bool Blitter::Initialize(VulkanDevice* device) {
pipeline_layout_info.pushConstantRangeCount = pipeline_layout_info.pushConstantRangeCount =
static_cast<uint32_t>(xe::countof(push_constant_ranges)); static_cast<uint32_t>(xe::countof(push_constant_ranges));
pipeline_layout_info.pPushConstantRanges = push_constant_ranges; pipeline_layout_info.pPushConstantRanges = push_constant_ranges;
result = vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr, status = vkCreatePipelineLayout(*device_, &pipeline_layout_info, nullptr,
&pipeline_layout_); &pipeline_layout_);
CheckResult(result, "vkCreatePipelineLayout"); CheckResult(status, "vkCreatePipelineLayout");
if (status != VK_SUCCESS) {
return status;
}
// Create two samplers. // Create two samplers.
VkSamplerCreateInfo sampler_create_info = { VkSamplerCreateInfo sampler_create_info = {
@ -124,31 +141,63 @@ bool Blitter::Initialize(VulkanDevice* device) {
VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,
VK_FALSE, VK_FALSE,
}; };
result = status =
vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_nearest_); vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_nearest_);
CheckResult(result, "vkCreateSampler"); CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
}
sampler_create_info.minFilter = VK_FILTER_LINEAR; sampler_create_info.minFilter = VK_FILTER_LINEAR;
sampler_create_info.magFilter = VK_FILTER_LINEAR; sampler_create_info.magFilter = VK_FILTER_LINEAR;
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
result = status =
vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_linear_); vkCreateSampler(*device_, &sampler_create_info, nullptr, &samp_linear_);
CheckResult(result, "vkCreateSampler"); CheckResult(status, "vkCreateSampler");
if (status != VK_SUCCESS) {
return status;
}
return true; return VK_SUCCESS;
} }
void Blitter::Shutdown() { void Blitter::Shutdown() {
if (samp_nearest_) {
vkDestroySampler(*device_, samp_nearest_, nullptr); vkDestroySampler(*device_, samp_nearest_, nullptr);
samp_nearest_ = nullptr;
}
if (samp_linear_) {
vkDestroySampler(*device_, samp_linear_, nullptr); vkDestroySampler(*device_, samp_linear_, nullptr);
samp_linear_ = nullptr;
}
if (blit_vertex_) {
vkDestroyShaderModule(*device_, blit_vertex_, nullptr); vkDestroyShaderModule(*device_, blit_vertex_, nullptr);
blit_vertex_ = nullptr;
}
if (blit_color_) {
vkDestroyShaderModule(*device_, blit_color_, nullptr); vkDestroyShaderModule(*device_, blit_color_, nullptr);
blit_color_ = nullptr;
}
if (blit_depth_) {
vkDestroyShaderModule(*device_, blit_depth_, nullptr); vkDestroyShaderModule(*device_, blit_depth_, nullptr);
blit_depth_ = nullptr;
}
if (pipeline_color_) {
vkDestroyPipeline(*device_, pipeline_color_, nullptr); vkDestroyPipeline(*device_, pipeline_color_, nullptr);
pipeline_color_ = nullptr;
}
if (pipeline_depth_) {
vkDestroyPipeline(*device_, pipeline_depth_, nullptr); vkDestroyPipeline(*device_, pipeline_depth_, nullptr);
pipeline_depth_ = nullptr;
}
if (pipeline_layout_) {
vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr); vkDestroyPipelineLayout(*device_, pipeline_layout_, nullptr);
pipeline_layout_ = nullptr;
}
if (descriptor_set_layout_) {
vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr); vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr);
descriptor_set_layout_ = nullptr;
}
for (auto& pipeline : pipelines_) { for (auto& pipeline : pipelines_) {
vkDestroyPipeline(*device_, pipeline.second, nullptr); vkDestroyPipeline(*device_, pipeline.second, nullptr);
} }

View File

@ -27,7 +27,7 @@ class Blitter {
Blitter(); Blitter();
~Blitter(); ~Blitter();
bool Initialize(VulkanDevice* device); VkResult Initialize(VulkanDevice* device);
void Scavenge(); void Scavenge();
void Shutdown(); void Shutdown();

View File

@ -46,7 +46,8 @@ CircularBuffer::CircularBuffer(VulkanDevice* device, VkBufferUsageFlags usage,
} }
CircularBuffer::~CircularBuffer() { Shutdown(); } CircularBuffer::~CircularBuffer() { Shutdown(); }
bool CircularBuffer::Initialize(VkDeviceMemory memory, VkDeviceSize offset) { VkResult CircularBuffer::Initialize(VkDeviceMemory memory,
VkDeviceSize offset) {
assert_true(offset % alignment_ == 0); assert_true(offset % alignment_ == 0);
gpu_memory_ = memory; gpu_memory_ = memory;
gpu_base_ = offset; gpu_base_ = offset;
@ -59,7 +60,7 @@ bool CircularBuffer::Initialize(VkDeviceMemory memory, VkDeviceSize offset) {
if (status != VK_SUCCESS) { if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to bind memory!"); XELOGE("CircularBuffer::Initialize - Failed to bind memory!");
Shutdown(); Shutdown();
return false; return status;
} }
// Map the memory so we can access it. // Map the memory so we can access it.
@ -69,13 +70,13 @@ bool CircularBuffer::Initialize(VkDeviceMemory memory, VkDeviceSize offset) {
if (status != VK_SUCCESS) { if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to map memory!"); XELOGE("CircularBuffer::Initialize - Failed to map memory!");
Shutdown(); Shutdown();
return false; return status;
} }
return true; return VK_SUCCESS;
} }
bool CircularBuffer::Initialize() { VkResult CircularBuffer::Initialize() {
VkResult status = VK_SUCCESS; VkResult status = VK_SUCCESS;
VkMemoryRequirements reqs; VkMemoryRequirements reqs;
@ -87,7 +88,7 @@ bool CircularBuffer::Initialize() {
if (!gpu_memory_) { if (!gpu_memory_) {
XELOGE("CircularBuffer::Initialize - Failed to allocate memory!"); XELOGE("CircularBuffer::Initialize - Failed to allocate memory!");
Shutdown(); Shutdown();
return false; return VK_ERROR_INITIALIZATION_FAILED;
} }
capacity_ = reqs.size; capacity_ = reqs.size;
@ -99,7 +100,7 @@ bool CircularBuffer::Initialize() {
if (status != VK_SUCCESS) { if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to bind memory!"); XELOGE("CircularBuffer::Initialize - Failed to bind memory!");
Shutdown(); Shutdown();
return false; return status;
} }
// Map the memory so we can access it. // Map the memory so we can access it.
@ -109,10 +110,10 @@ bool CircularBuffer::Initialize() {
if (status != VK_SUCCESS) { if (status != VK_SUCCESS) {
XELOGE("CircularBuffer::Initialize - Failed to map memory!"); XELOGE("CircularBuffer::Initialize - Failed to map memory!");
Shutdown(); Shutdown();
return false; return status;
} }
return true; return VK_SUCCESS;
} }
void CircularBuffer::Shutdown() { void CircularBuffer::Shutdown() {

View File

@ -43,8 +43,8 @@ class CircularBuffer {
VkFence fence; VkFence fence;
}; };
bool Initialize(VkDeviceMemory memory, VkDeviceSize offset); VkResult Initialize(VkDeviceMemory memory, VkDeviceSize offset);
bool Initialize(); VkResult Initialize();
void Shutdown(); void Shutdown();
void GetBufferMemoryRequirements(VkMemoryRequirements* reqs); void GetBufferMemoryRequirements(VkMemoryRequirements* reqs);