Vulkan TextureCache: Use a hashed and fenced pool for descriptors

This commit is contained in:
Dr. Chat 2017-01-28 14:04:27 -06:00
parent a16dc261da
commit 366eeeaa60
2 changed files with 61 additions and 39 deletions

View File

@ -113,21 +113,15 @@ TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
device_(device), device_(device),
staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, staging_buffer_(device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
kStagingBufferSize) { kStagingBufferSize) {
VkResult err = VK_SUCCESS;
// Descriptor pool used for all of our cached descriptors. // Descriptor pool used for all of our cached descriptors.
VkDescriptorPoolCreateInfo descriptor_pool_info;
descriptor_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptor_pool_info.pNext = nullptr;
descriptor_pool_info.flags =
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
descriptor_pool_info.maxSets = 32768;
VkDescriptorPoolSize pool_sizes[1]; VkDescriptorPoolSize pool_sizes[1];
pool_sizes[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; pool_sizes[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
pool_sizes[0].descriptorCount = 32768; pool_sizes[0].descriptorCount = 32768;
descriptor_pool_info.poolSizeCount = 1; descriptor_pool_ = std::make_unique<ui::vulkan::DescriptorPool>(
descriptor_pool_info.pPoolSizes = pool_sizes; *device_, 32768,
auto err = vkCreateDescriptorPool(*device_, &descriptor_pool_info, nullptr, std::vector<VkDescriptorPoolSize>(pool_sizes, std::end(pool_sizes)));
&descriptor_pool_);
CheckResult(err, "vkCreateDescriptorPool");
// Create the descriptor set layout used for rendering. // Create the descriptor set layout used for rendering.
// We always have the same number of samplers but only some are used. // We always have the same number of samplers but only some are used.
@ -177,7 +171,6 @@ TextureCache::~TextureCache() {
vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_, vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_,
nullptr); nullptr);
vkDestroyDescriptorPool(*device_, descriptor_pool_, nullptr);
} }
TextureCache::Texture* TextureCache::AllocateTexture( TextureCache::Texture* TextureCache::AllocateTexture(
@ -1191,10 +1184,44 @@ bool TextureCache::UploadTextureCube(VkCommandBuffer command_buffer,
return true; return true;
} }
void TextureCache::HashTextureBindings(
XXH64_state_t* hash_state, uint32_t& fetch_mask,
const std::vector<Shader::TextureBinding>& bindings) {
for (auto& binding : bindings) {
uint32_t fetch_bit = 1 << binding.fetch_constant;
if (fetch_mask & fetch_bit) {
// We've covered this binding.
continue;
}
auto& regs = *register_file_;
int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + binding.fetch_constant * 6;
auto group =
reinterpret_cast<const xenos::xe_gpu_fetch_group_t*>(&regs.values[r]);
auto& fetch = group->texture_fetch;
XXH64_update(hash_state, &fetch, sizeof(fetch));
}
}
VkDescriptorSet TextureCache::PrepareTextureSet( VkDescriptorSet TextureCache::PrepareTextureSet(
VkCommandBuffer command_buffer, VkFence completion_fence, VkCommandBuffer command_buffer, VkFence completion_fence,
const std::vector<Shader::TextureBinding>& vertex_bindings, const std::vector<Shader::TextureBinding>& vertex_bindings,
const std::vector<Shader::TextureBinding>& pixel_bindings) { const std::vector<Shader::TextureBinding>& pixel_bindings) {
XXH64_state_t hash_state;
XXH64_reset(&hash_state, 0);
// (quickly) Generate a hash.
uint32_t fetch_mask = 0;
HashTextureBindings(&hash_state, fetch_mask, vertex_bindings);
HashTextureBindings(&hash_state, fetch_mask, pixel_bindings);
uint64_t hash = XXH64_digest(&hash_state);
for (auto it = texture_bindings_.find(hash); it != texture_bindings_.end();
++it) {
// TODO(DrChat): We need to compare the bindings and ensure they're equal.
return it->second;
}
// Clear state. // Clear state.
auto update_set_info = &update_set_info_; auto update_set_info = &update_set_info_;
update_set_info->has_setup_fetch_mask = 0; update_set_info->has_setup_fetch_mask = 0;
@ -1217,19 +1244,15 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
// TODO(benvanik): actually bail out here? // TODO(benvanik): actually bail out here?
} }
// TODO(benvanik): reuse. // Open a new batch of descriptor sets (for this frame)
VkDescriptorSet descriptor_set = nullptr; if (!descriptor_pool_->has_open_batch()) {
VkDescriptorSetAllocateInfo set_alloc_info; descriptor_pool_->BeginBatch(completion_fence);
set_alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; }
set_alloc_info.pNext = nullptr;
set_alloc_info.descriptorPool = descriptor_pool_;
set_alloc_info.descriptorSetCount = 1;
set_alloc_info.pSetLayouts = &texture_descriptor_set_layout_;
auto err =
vkAllocateDescriptorSets(*device_, &set_alloc_info, &descriptor_set);
CheckResult(err, "vkAllocateDescriptorSets");
if (err != VK_SUCCESS) { // TODO(benvanik): reuse.
auto descriptor_set =
descriptor_pool_->AcquireEntry(texture_descriptor_set_layout_);
if (!descriptor_set) {
return nullptr; return nullptr;
} }
@ -1243,7 +1266,7 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
update_set_info->image_writes, 0, nullptr); update_set_info->image_writes, 0, nullptr);
} }
in_flight_sets_.push_back({descriptor_set, completion_fence}); texture_bindings_[hash] = descriptor_set;
return descriptor_set; return descriptor_set;
} }
@ -1351,20 +1374,16 @@ void TextureCache::ClearCache() {
} }
void TextureCache::Scavenge() { void TextureCache::Scavenge() {
// Free unused descriptor sets // Close any open descriptor pool batches
for (auto it = in_flight_sets_.begin(); it != in_flight_sets_.end();) { if (descriptor_pool_->has_open_batch()) {
if (vkGetFenceStatus(*device_, it->second) == VK_SUCCESS) { descriptor_pool_->EndBatch();
// We can free this one.
vkFreeDescriptorSets(*device_, descriptor_pool_, 1, &it->first);
it = in_flight_sets_.erase(it);
continue;
}
// We've encountered an item that hasn't been used yet, so any items
// afterwards are guaranteed to be unused.
break;
} }
// Free unused descriptor sets
// TODO(DrChat): These sets could persist across frames, we just need a smart
// way to detect if they're unused and free them.
texture_bindings_.clear();
descriptor_pool_->Scavenge();
staging_buffer_.Scavenge(); staging_buffer_.Scavenge();
// Kill all pending delete textures. // Kill all pending delete textures.

View File

@ -20,6 +20,7 @@
#include "xenia/gpu/vulkan/vulkan_command_processor.h" #include "xenia/gpu/vulkan/vulkan_command_processor.h"
#include "xenia/gpu/xenos.h" #include "xenia/gpu/xenos.h"
#include "xenia/ui/vulkan/circular_buffer.h" #include "xenia/ui/vulkan/circular_buffer.h"
#include "xenia/ui/vulkan/fenced_pools.h"
#include "xenia/ui/vulkan/vulkan.h" #include "xenia/ui/vulkan/vulkan.h"
#include "xenia/ui/vulkan/vulkan_device.h" #include "xenia/ui/vulkan/vulkan_device.h"
@ -165,6 +166,8 @@ class TextureCache {
VkFence completion_fence, Texture* dest, VkFence completion_fence, Texture* dest,
const TextureInfo& src); const TextureInfo& src);
void HashTextureBindings(XXH64_state_t* hash_state, uint32_t& fetch_mask,
const std::vector<Shader::TextureBinding>& bindings);
bool SetupTextureBindings( bool SetupTextureBindings(
VkCommandBuffer command_buffer, VkFence completion_fence, VkCommandBuffer command_buffer, VkFence completion_fence,
UpdateSetInfo* update_set_info, UpdateSetInfo* update_set_info,
@ -181,9 +184,9 @@ class TextureCache {
ui::vulkan::VulkanDevice* device_ = nullptr; ui::vulkan::VulkanDevice* device_ = nullptr;
VkQueue device_queue_ = nullptr; VkQueue device_queue_ = nullptr;
VkDescriptorPool descriptor_pool_ = nullptr; std::unique_ptr<xe::ui::vulkan::DescriptorPool> descriptor_pool_ = nullptr;
std::unordered_map<uint64_t, VkDescriptorSet> texture_bindings_;
VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr; VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr;
std::list<std::pair<VkDescriptorSet, VkFence>> in_flight_sets_;
ui::vulkan::CircularBuffer staging_buffer_; ui::vulkan::CircularBuffer staging_buffer_;
std::unordered_map<uint64_t, Texture*> textures_; std::unordered_map<uint64_t, Texture*> textures_;