Track batch fences with the batches.
This commit is contained in:
parent
391b6e9bb7
commit
48f6ba747c
|
@ -149,7 +149,7 @@ BufferCache::~BufferCache() {
|
|||
std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
|
||||
const Shader::ConstantRegisterMap& vertex_constant_register_map,
|
||||
const Shader::ConstantRegisterMap& pixel_constant_register_map,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence) {
|
||||
VkFence fence) {
|
||||
// Fat struct, including all registers:
|
||||
// struct {
|
||||
// vec4 float[512];
|
||||
|
@ -230,7 +230,7 @@ std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
|
|||
|
||||
std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
|
||||
const void* source_ptr, size_t source_length, IndexFormat format,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence) {
|
||||
VkFence fence) {
|
||||
// Allocate space in the buffer for our data.
|
||||
auto offset = AllocateTransientData(source_length, fence);
|
||||
if (offset == VK_WHOLE_SIZE) {
|
||||
|
@ -256,7 +256,7 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
|
|||
|
||||
std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
|
||||
const void* source_ptr, size_t source_length, Endian endian,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence) {
|
||||
VkFence fence) {
|
||||
// Allocate space in the buffer for our data.
|
||||
auto offset = AllocateTransientData(source_length, fence);
|
||||
if (offset == VK_WHOLE_SIZE) {
|
||||
|
@ -276,8 +276,8 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
|
|||
return {transient_buffer_->gpu_buffer(), offset};
|
||||
}
|
||||
|
||||
VkDeviceSize BufferCache::AllocateTransientData(
|
||||
VkDeviceSize length, std::shared_ptr<ui::vulkan::Fence> fence) {
|
||||
VkDeviceSize BufferCache::AllocateTransientData(VkDeviceSize length,
|
||||
VkFence fence) {
|
||||
// Try fast path (if we have space).
|
||||
VkDeviceSize offset = TryAllocateTransientData(length, fence);
|
||||
if (offset != VK_WHOLE_SIZE) {
|
||||
|
@ -293,8 +293,8 @@ VkDeviceSize BufferCache::AllocateTransientData(
|
|||
return offset;
|
||||
}
|
||||
|
||||
VkDeviceSize BufferCache::TryAllocateTransientData(
|
||||
VkDeviceSize length, std::shared_ptr<ui::vulkan::Fence> fence) {
|
||||
VkDeviceSize BufferCache::TryAllocateTransientData(VkDeviceSize length,
|
||||
VkFence fence) {
|
||||
auto alloc = transient_buffer_->Acquire(length, fence);
|
||||
if (alloc) {
|
||||
return alloc->offset;
|
||||
|
|
|
@ -54,23 +54,25 @@ class BufferCache {
|
|||
std::pair<VkDeviceSize, VkDeviceSize> UploadConstantRegisters(
|
||||
const Shader::ConstantRegisterMap& vertex_constant_register_map,
|
||||
const Shader::ConstantRegisterMap& pixel_constant_register_map,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence);
|
||||
VkFence fence);
|
||||
|
||||
// Uploads index buffer data from guest memory, possibly eliding with
|
||||
// recently uploaded data or cached copies.
|
||||
// Returns a buffer and offset that can be used with vkCmdBindIndexBuffer.
|
||||
// Size will be VK_WHOLE_SIZE if the data could not be uploaded (OOM).
|
||||
std::pair<VkBuffer, VkDeviceSize> UploadIndexBuffer(
|
||||
const void* source_ptr, size_t source_length, IndexFormat format,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence);
|
||||
std::pair<VkBuffer, VkDeviceSize> UploadIndexBuffer(const void* source_ptr,
|
||||
size_t source_length,
|
||||
IndexFormat format,
|
||||
VkFence fence);
|
||||
|
||||
// Uploads vertex buffer data from guest memory, possibly eliding with
|
||||
// recently uploaded data or cached copies.
|
||||
// Returns a buffer and offset that can be used with vkCmdBindVertexBuffers.
|
||||
// Size will be VK_WHOLE_SIZE if the data could not be uploaded (OOM).
|
||||
std::pair<VkBuffer, VkDeviceSize> UploadVertexBuffer(
|
||||
const void* source_ptr, size_t source_length, Endian endian,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence);
|
||||
std::pair<VkBuffer, VkDeviceSize> UploadVertexBuffer(const void* source_ptr,
|
||||
size_t source_length,
|
||||
Endian endian,
|
||||
VkFence fence);
|
||||
|
||||
// Flushes all pending data to the GPU.
|
||||
// Until this is called the GPU is not guaranteed to see any data.
|
||||
|
@ -93,12 +95,10 @@ class BufferCache {
|
|||
// Allocates a block of memory in the transient buffer.
|
||||
// When memory is not available fences are checked and space is reclaimed.
|
||||
// Returns VK_WHOLE_SIZE if requested amount of memory is not available.
|
||||
VkDeviceSize AllocateTransientData(VkDeviceSize length,
|
||||
std::shared_ptr<ui::vulkan::Fence> fence);
|
||||
VkDeviceSize AllocateTransientData(VkDeviceSize length, VkFence fence);
|
||||
// Tries to allocate a block of memory in the transient buffer.
|
||||
// Returns VK_WHOLE_SIZE if requested amount of memory is not available.
|
||||
VkDeviceSize TryAllocateTransientData(
|
||||
VkDeviceSize length, std::shared_ptr<ui::vulkan::Fence> fence);
|
||||
VkDeviceSize TryAllocateTransientData(VkDeviceSize length, VkFence fence);
|
||||
|
||||
RegisterFile* register_file_ = nullptr;
|
||||
VkDevice device_ = nullptr;
|
||||
|
|
|
@ -279,7 +279,7 @@ TextureCache::Texture* TextureCache::AllocateTexture(
|
|||
|
||||
bool TextureCache::FreeTexture(Texture* texture) {
|
||||
if (texture->in_flight_fence &&
|
||||
texture->in_flight_fence->status() != VK_SUCCESS) {
|
||||
vkGetFenceStatus(*device_, texture->in_flight_fence) != VK_SUCCESS) {
|
||||
// Texture still in flight.
|
||||
return false;
|
||||
}
|
||||
|
@ -315,6 +315,14 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
|
|||
texture = AllocateTexture(texture_info);
|
||||
texture->is_full_texture = false;
|
||||
|
||||
// Setup a debug name for the texture.
|
||||
device_->DbgSetObjectName(
|
||||
reinterpret_cast<uint64_t>(texture->image),
|
||||
VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
|
||||
xe::format_string(
|
||||
"0x%.8X - 0x%.8X", texture_info.guest_address,
|
||||
texture_info.guest_address + texture_info.output_length));
|
||||
|
||||
// Setup an access watch. If this texture is touched, it is destroyed.
|
||||
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
|
||||
texture_info.guest_address, texture_info.input_length,
|
||||
|
@ -337,9 +345,9 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
|
|||
return texture;
|
||||
}
|
||||
|
||||
TextureCache::Texture* TextureCache::Demand(
|
||||
const TextureInfo& texture_info, VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence) {
|
||||
TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
||||
VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence) {
|
||||
// Run a tight loop to scan for an exact match existing texture.
|
||||
auto texture_hash = texture_info.hash();
|
||||
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
|
||||
|
@ -432,6 +440,14 @@ TextureCache::Texture* TextureCache::Demand(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
// Setup a debug name for the texture.
|
||||
device_->DbgSetObjectName(
|
||||
reinterpret_cast<uint64_t>(texture->image),
|
||||
VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
|
||||
xe::format_string(
|
||||
"0x%.8X - 0x%.8X", texture_info.guest_address,
|
||||
texture_info.guest_address + texture_info.output_length));
|
||||
|
||||
// Copy in overlapping resolve textures.
|
||||
// FIXME: RDR appears to take textures from small chunks of a resolve texture?
|
||||
if (texture_info.dimension == Dimension::k2D) {
|
||||
|
@ -770,9 +786,8 @@ void TextureSwap(Endian endianness, void* dest, const void* src,
|
|||
}
|
||||
}
|
||||
|
||||
void TextureCache::FlushPendingCommands(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence) {
|
||||
void TextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence) {
|
||||
auto status = vkEndCommandBuffer(command_buffer);
|
||||
CheckResult(status, "vkEndCommandBuffer");
|
||||
|
||||
|
@ -784,20 +799,19 @@ void TextureCache::FlushPendingCommands(
|
|||
|
||||
if (device_queue_) {
|
||||
auto status =
|
||||
vkQueueSubmit(device_queue_, 1, &submit_info, *completion_fence);
|
||||
vkQueueSubmit(device_queue_, 1, &submit_info, completion_fence);
|
||||
CheckResult(status, "vkQueueSubmit");
|
||||
} else {
|
||||
std::lock_guard<std::mutex>(device_->primary_queue_mutex());
|
||||
|
||||
auto status = vkQueueSubmit(device_->primary_queue(), 1, &submit_info,
|
||||
*completion_fence);
|
||||
completion_fence);
|
||||
CheckResult(status, "vkQueueSubmit");
|
||||
}
|
||||
|
||||
VkFence fences[] = {*completion_fence};
|
||||
vkWaitForFences(*device_, 1, fences, VK_TRUE, -1);
|
||||
vkWaitForFences(*device_, 1, &completion_fence, VK_TRUE, -1);
|
||||
staging_buffer_.Scavenge();
|
||||
vkResetFences(*device_, 1, fences);
|
||||
vkResetFences(*device_, 1, &completion_fence);
|
||||
|
||||
// Reset the command buffer and put it back into the recording state.
|
||||
vkResetCommandBuffer(command_buffer, 0);
|
||||
|
@ -922,10 +936,9 @@ void TextureCache::ConvertTextureCube(uint8_t* dest, const TextureInfo& src) {
|
|||
}
|
||||
}
|
||||
|
||||
bool TextureCache::UploadTexture2D(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence, Texture* dest,
|
||||
const TextureInfo& src) {
|
||||
bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence, Texture* dest,
|
||||
const TextureInfo& src) {
|
||||
#if FINE_GRAINED_DRAW_SCOPES
|
||||
SCOPE_profile_cpu_f("gpu");
|
||||
#endif // FINE_GRAINED_DRAW_SCOPES
|
||||
|
@ -1004,10 +1017,9 @@ bool TextureCache::UploadTexture2D(
|
|||
return true;
|
||||
}
|
||||
|
||||
bool TextureCache::UploadTextureCube(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence, Texture* dest,
|
||||
const TextureInfo& src) {
|
||||
bool TextureCache::UploadTextureCube(VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence, Texture* dest,
|
||||
const TextureInfo& src) {
|
||||
assert_true(src.dimension == Dimension::kCube);
|
||||
|
||||
size_t unpack_length = src.output_length;
|
||||
|
@ -1083,8 +1095,7 @@ bool TextureCache::UploadTextureCube(
|
|||
}
|
||||
|
||||
VkDescriptorSet TextureCache::PrepareTextureSet(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
VkCommandBuffer command_buffer, VkFence completion_fence,
|
||||
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
||||
const std::vector<Shader::TextureBinding>& pixel_bindings) {
|
||||
// Clear state.
|
||||
|
@ -1140,8 +1151,7 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
|
|||
}
|
||||
|
||||
bool TextureCache::SetupTextureBindings(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
VkCommandBuffer command_buffer, VkFence completion_fence,
|
||||
UpdateSetInfo* update_set_info,
|
||||
const std::vector<Shader::TextureBinding>& bindings) {
|
||||
bool any_failed = false;
|
||||
|
@ -1158,10 +1168,10 @@ bool TextureCache::SetupTextureBindings(
|
|||
return !any_failed;
|
||||
}
|
||||
|
||||
bool TextureCache::SetupTextureBinding(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
UpdateSetInfo* update_set_info, const Shader::TextureBinding& binding) {
|
||||
bool TextureCache::SetupTextureBinding(VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence,
|
||||
UpdateSetInfo* update_set_info,
|
||||
const Shader::TextureBinding& binding) {
|
||||
#if FINE_GRAINED_DRAW_SCOPES
|
||||
SCOPE_profile_cpu_f("gpu");
|
||||
#endif // FINE_GRAINED_DRAW_SCOPES
|
||||
|
@ -1246,7 +1256,7 @@ void TextureCache::ClearCache() {
|
|||
void TextureCache::Scavenge() {
|
||||
// Free unused descriptor sets
|
||||
for (auto it = in_flight_sets_.begin(); it != in_flight_sets_.end();) {
|
||||
if (vkGetFenceStatus(*device_, *it->second) == VK_SUCCESS) {
|
||||
if (vkGetFenceStatus(*device_, it->second) == VK_SUCCESS) {
|
||||
// We can free this one.
|
||||
vkFreeDescriptorSets(*device_, descriptor_pool_, 1, &it->first);
|
||||
it = in_flight_sets_.erase(it);
|
||||
|
|
|
@ -52,7 +52,7 @@ class TextureCache {
|
|||
bool pending_invalidation;
|
||||
|
||||
// Pointer to the latest usage fence.
|
||||
std::shared_ptr<ui::vulkan::Fence> in_flight_fence;
|
||||
VkFence in_flight_fence;
|
||||
};
|
||||
|
||||
struct TextureView {
|
||||
|
@ -88,8 +88,7 @@ class TextureCache {
|
|||
// Requires a fence to be provided that will be signaled when finished
|
||||
// using the returned descriptor set.
|
||||
VkDescriptorSet PrepareTextureSet(
|
||||
VkCommandBuffer setup_command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
VkCommandBuffer setup_command_buffer, VkFence completion_fence,
|
||||
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
||||
const std::vector<Shader::TextureBinding>& pixel_bindings);
|
||||
|
||||
|
@ -140,15 +139,14 @@ class TextureCache {
|
|||
|
||||
// Demands a texture. If command_buffer is null and the texture hasn't been
|
||||
// uploaded to graphics memory already, we will return null and bail.
|
||||
Texture* Demand(
|
||||
const TextureInfo& texture_info, VkCommandBuffer command_buffer = nullptr,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence = nullptr);
|
||||
Texture* Demand(const TextureInfo& texture_info,
|
||||
VkCommandBuffer command_buffer = nullptr,
|
||||
VkFence completion_fence = nullptr);
|
||||
TextureView* DemandView(Texture* texture, uint16_t swizzle);
|
||||
Sampler* Demand(const SamplerInfo& sampler_info);
|
||||
|
||||
void FlushPendingCommands(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence);
|
||||
void FlushPendingCommands(VkCommandBuffer command_buffer,
|
||||
VkFence completion_fence);
|
||||
|
||||
void ConvertTexture2D(uint8_t* dest, const TextureInfo& src);
|
||||
void ConvertTextureCube(uint8_t* dest, const TextureInfo& src);
|
||||
|
@ -156,21 +154,19 @@ class TextureCache {
|
|||
// Queues commands to upload a texture from system memory, applying any
|
||||
// conversions necessary. This may flush the command buffer to the GPU if we
|
||||
// run out of staging memory.
|
||||
bool UploadTexture2D(VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
bool UploadTexture2D(VkCommandBuffer command_buffer, VkFence completion_fence,
|
||||
Texture* dest, const TextureInfo& src);
|
||||
|
||||
bool UploadTextureCube(VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
Texture* dest, const TextureInfo& src);
|
||||
VkFence completion_fence, Texture* dest,
|
||||
const TextureInfo& src);
|
||||
|
||||
bool SetupTextureBindings(
|
||||
VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
VkCommandBuffer command_buffer, VkFence completion_fence,
|
||||
UpdateSetInfo* update_set_info,
|
||||
const std::vector<Shader::TextureBinding>& bindings);
|
||||
bool SetupTextureBinding(VkCommandBuffer command_buffer,
|
||||
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||
VkFence completion_fence,
|
||||
UpdateSetInfo* update_set_info,
|
||||
const Shader::TextureBinding& binding);
|
||||
|
||||
|
@ -183,8 +179,7 @@ class TextureCache {
|
|||
|
||||
VkDescriptorPool descriptor_pool_ = nullptr;
|
||||
VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr;
|
||||
std::list<std::pair<VkDescriptorSet, std::shared_ptr<ui::vulkan::Fence>>>
|
||||
in_flight_sets_;
|
||||
std::list<std::pair<VkDescriptorSet, VkFence>> in_flight_sets_;
|
||||
|
||||
ui::vulkan::CircularBuffer staging_buffer_;
|
||||
std::unordered_map<uint64_t, Texture*> textures_;
|
||||
|
|
|
@ -164,8 +164,8 @@ bool CircularBuffer::CanAcquire(VkDeviceSize length) {
|
|||
return false;
|
||||
}
|
||||
|
||||
CircularBuffer::Allocation* CircularBuffer::Acquire(
|
||||
VkDeviceSize length, std::shared_ptr<Fence> fence) {
|
||||
CircularBuffer::Allocation* CircularBuffer::Acquire(VkDeviceSize length,
|
||||
VkFence fence) {
|
||||
VkDeviceSize aligned_length = xe::round_up(length, alignment_);
|
||||
if (!CanAcquire(aligned_length)) {
|
||||
return nullptr;
|
||||
|
@ -243,7 +243,7 @@ void CircularBuffer::Clear() {
|
|||
|
||||
void CircularBuffer::Scavenge() {
|
||||
for (auto it = allocations_.begin(); it != allocations_.end();) {
|
||||
if ((*it)->fence->status() != VK_SUCCESS) {
|
||||
if (vkGetFenceStatus(*device_, (*it)->fence) != VK_SUCCESS) {
|
||||
// Don't bother freeing following allocations to ensure proper ordering.
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ class CircularBuffer {
|
|||
|
||||
// Allocation usage fence. This allocation will be deleted when the fence
|
||||
// becomes signaled.
|
||||
std::shared_ptr<Fence> fence;
|
||||
VkFence fence;
|
||||
};
|
||||
|
||||
bool Initialize(VkDeviceMemory memory, VkDeviceSize offset);
|
||||
|
@ -59,7 +59,7 @@ class CircularBuffer {
|
|||
|
||||
// Acquires space to hold memory. This allocation is only freed when the fence
|
||||
// reaches the signaled state.
|
||||
Allocation* Acquire(VkDeviceSize length, std::shared_ptr<Fence> fence);
|
||||
Allocation* Acquire(VkDeviceSize length, VkFence fence);
|
||||
void Flush(Allocation* allocation);
|
||||
|
||||
// Clears all allocations, regardless of whether they've been consumed or not.
|
||||
|
|
|
@ -48,7 +48,7 @@ CommandBufferPool::CommandBufferPool(VkDevice device,
|
|||
vkAllocateCommandBuffers(device_, &command_buffer_info, command_buffers);
|
||||
CheckResult(err, "vkCreateCommandBuffer");
|
||||
for (size_t i = 0; i < xe::countof(command_buffers); ++i) {
|
||||
PushEntry(command_buffers[i]);
|
||||
PushEntry(command_buffers[i], nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ CommandBufferPool::~CommandBufferPool() {
|
|||
command_pool_ = nullptr;
|
||||
}
|
||||
|
||||
VkCommandBuffer CommandBufferPool::AllocateEntry() {
|
||||
VkCommandBuffer CommandBufferPool::AllocateEntry(void* data) {
|
||||
// TODO(benvanik): allocate a bunch at once?
|
||||
VkCommandBufferAllocateInfo command_buffer_info;
|
||||
command_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
|
@ -77,6 +77,42 @@ void CommandBufferPool::FreeEntry(VkCommandBuffer handle) {
|
|||
vkFreeCommandBuffers(device_, command_pool_, 1, &handle);
|
||||
}
|
||||
|
||||
DescriptorPool::DescriptorPool(VkDevice device, uint32_t max_count,
|
||||
std::vector<VkDescriptorPoolSize> pool_sizes)
|
||||
: BaseFencedPool(device) {
|
||||
VkDescriptorPoolCreateInfo descriptor_pool_info;
|
||||
descriptor_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
||||
descriptor_pool_info.pNext = nullptr;
|
||||
descriptor_pool_info.flags =
|
||||
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
|
||||
descriptor_pool_info.maxSets = max_count;
|
||||
descriptor_pool_info.poolSizeCount = uint32_t(pool_sizes.size());
|
||||
descriptor_pool_info.pPoolSizes = pool_sizes.data();
|
||||
auto err = vkCreateDescriptorPool(device, &descriptor_pool_info, nullptr,
|
||||
&descriptor_pool_);
|
||||
CheckResult(err, "vkCreateDescriptorPool");
|
||||
}
|
||||
DescriptorPool::~DescriptorPool() {}
|
||||
|
||||
VkDescriptorSet DescriptorPool::AllocateEntry(void* data) {
|
||||
VkDescriptorSetLayout layout = reinterpret_cast<VkDescriptorSetLayout>(data);
|
||||
|
||||
VkDescriptorSet descriptor_set = nullptr;
|
||||
VkDescriptorSetAllocateInfo set_alloc_info;
|
||||
set_alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
|
||||
set_alloc_info.pNext = nullptr;
|
||||
set_alloc_info.descriptorPool = descriptor_pool_;
|
||||
set_alloc_info.descriptorSetCount = 1;
|
||||
set_alloc_info.pSetLayouts = &layout;
|
||||
auto err =
|
||||
vkAllocateDescriptorSets(device_, &set_alloc_info, &descriptor_set);
|
||||
CheckResult(err, "vkAllocateDescriptorSets");
|
||||
|
||||
return descriptor_set;
|
||||
}
|
||||
|
||||
void DescriptorPool::FreeEntry(VkDescriptorSet handle) {}
|
||||
|
||||
} // namespace vulkan
|
||||
} // namespace ui
|
||||
} // namespace xe
|
||||
|
|
|
@ -49,7 +49,7 @@ class BaseFencedPool {
|
|||
void Scavenge() {
|
||||
while (pending_batch_list_head_) {
|
||||
auto batch = pending_batch_list_head_;
|
||||
if (vkGetFenceStatus(device_, *batch->fence) == VK_SUCCESS) {
|
||||
if (vkGetFenceStatus(device_, batch->fence) == VK_SUCCESS) {
|
||||
// Batch has completed. Reclaim.
|
||||
pending_batch_list_head_ = batch->next;
|
||||
if (batch == pending_batch_list_tail_) {
|
||||
|
@ -72,7 +72,7 @@ class BaseFencedPool {
|
|||
// Begins a new batch.
|
||||
// All entries acquired within this batch will be marked as in-use until
|
||||
// the fence specified in EndBatch is signalled.
|
||||
void BeginBatch() {
|
||||
VkFence BeginBatch() {
|
||||
assert_null(open_batch_);
|
||||
Batch* batch = nullptr;
|
||||
if (free_batch_list_head_) {
|
||||
|
@ -80,15 +80,26 @@ class BaseFencedPool {
|
|||
batch = free_batch_list_head_;
|
||||
free_batch_list_head_ = batch->next;
|
||||
batch->next = nullptr;
|
||||
vkResetFences(device_, 1, &batch->fence);
|
||||
} else {
|
||||
// Allocate new batch.
|
||||
batch = new Batch();
|
||||
batch->next = nullptr;
|
||||
|
||||
VkFenceCreateInfo info;
|
||||
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
||||
info.pNext = nullptr;
|
||||
info.flags = 0;
|
||||
VkResult res = vkCreateFence(device_, &info, nullptr, &batch->fence);
|
||||
if (res != VK_SUCCESS) {
|
||||
assert_always();
|
||||
}
|
||||
}
|
||||
batch->entry_list_head = nullptr;
|
||||
batch->entry_list_tail = nullptr;
|
||||
batch->fence = nullptr;
|
||||
open_batch_ = batch;
|
||||
|
||||
return batch->fence;
|
||||
}
|
||||
|
||||
// Cancels an open batch, and releases all entries acquired within.
|
||||
|
@ -109,33 +120,8 @@ class BaseFencedPool {
|
|||
batch->entry_list_tail = nullptr;
|
||||
}
|
||||
|
||||
// Attempts to acquire an entry from the pool in the current batch.
|
||||
// If none are available a new one will be allocated.
|
||||
HANDLE AcquireEntry() {
|
||||
Entry* entry = nullptr;
|
||||
if (free_entry_list_head_) {
|
||||
// Slice off an entry from the free list.
|
||||
entry = free_entry_list_head_;
|
||||
free_entry_list_head_ = entry->next;
|
||||
} else {
|
||||
// No entry available; allocate new.
|
||||
entry = new Entry();
|
||||
entry->handle = static_cast<T*>(this)->AllocateEntry();
|
||||
}
|
||||
entry->next = nullptr;
|
||||
if (!open_batch_->entry_list_head) {
|
||||
open_batch_->entry_list_head = entry;
|
||||
}
|
||||
if (open_batch_->entry_list_tail) {
|
||||
open_batch_->entry_list_tail->next = entry;
|
||||
}
|
||||
open_batch_->entry_list_tail = entry;
|
||||
return entry->handle;
|
||||
}
|
||||
|
||||
// Ends the current batch using the given fence to indicate when the batch
|
||||
// has completed execution on the GPU.
|
||||
void EndBatch(std::shared_ptr<Fence> fence) {
|
||||
// Ends the current batch.
|
||||
void EndBatch() {
|
||||
assert_not_null(open_batch_);
|
||||
|
||||
// Close and see if we have anything.
|
||||
|
@ -148,9 +134,6 @@ class BaseFencedPool {
|
|||
return;
|
||||
}
|
||||
|
||||
// Track the fence.
|
||||
batch->fence = fence;
|
||||
|
||||
// Append to the end of the batch list.
|
||||
batch->next = nullptr;
|
||||
if (!pending_batch_list_head_) {
|
||||
|
@ -165,9 +148,52 @@ class BaseFencedPool {
|
|||
}
|
||||
|
||||
protected:
|
||||
void PushEntry(HANDLE handle) {
|
||||
// Attempts to acquire an entry from the pool in the current batch.
|
||||
// If none are available a new one will be allocated.
|
||||
HANDLE AcquireEntry(void* data) {
|
||||
Entry* entry = nullptr;
|
||||
if (free_entry_list_head_) {
|
||||
// Slice off an entry from the free list.
|
||||
Entry* prev = nullptr;
|
||||
Entry* cur = free_entry_list_head_;
|
||||
while (cur != nullptr) {
|
||||
if (cur->data == data) {
|
||||
if (prev) {
|
||||
prev->next = cur->next;
|
||||
} else {
|
||||
free_entry_list_head_ = cur->next;
|
||||
}
|
||||
|
||||
entry = cur;
|
||||
break;
|
||||
}
|
||||
|
||||
prev = cur;
|
||||
cur = cur->next;
|
||||
}
|
||||
}
|
||||
|
||||
if (!entry) {
|
||||
// No entry available; allocate new.
|
||||
entry = new Entry();
|
||||
entry->data = data;
|
||||
entry->handle = static_cast<T*>(this)->AllocateEntry(data);
|
||||
}
|
||||
entry->next = nullptr;
|
||||
if (!open_batch_->entry_list_head) {
|
||||
open_batch_->entry_list_head = entry;
|
||||
}
|
||||
if (open_batch_->entry_list_tail) {
|
||||
open_batch_->entry_list_tail->next = entry;
|
||||
}
|
||||
open_batch_->entry_list_tail = entry;
|
||||
return entry->handle;
|
||||
}
|
||||
|
||||
void PushEntry(HANDLE handle, void* data) {
|
||||
auto entry = new Entry();
|
||||
entry->next = free_entry_list_head_;
|
||||
entry->data = data;
|
||||
entry->handle = handle;
|
||||
free_entry_list_head_ = entry;
|
||||
}
|
||||
|
@ -192,13 +218,14 @@ class BaseFencedPool {
|
|||
private:
|
||||
struct Entry {
|
||||
Entry* next;
|
||||
void* data;
|
||||
HANDLE handle;
|
||||
};
|
||||
struct Batch {
|
||||
Batch* next;
|
||||
Entry* entry_list_head;
|
||||
Entry* entry_list_tail;
|
||||
std::shared_ptr<Fence> fence;
|
||||
VkFence fence;
|
||||
};
|
||||
|
||||
Batch* free_batch_list_head_ = nullptr;
|
||||
|
@ -211,19 +238,39 @@ class BaseFencedPool {
|
|||
class CommandBufferPool
|
||||
: public BaseFencedPool<CommandBufferPool, VkCommandBuffer> {
|
||||
public:
|
||||
typedef BaseFencedPool<CommandBufferPool, VkCommandBuffer> Base;
|
||||
|
||||
CommandBufferPool(VkDevice device, uint32_t queue_family_index,
|
||||
VkCommandBufferLevel level);
|
||||
~CommandBufferPool() override;
|
||||
|
||||
VkCommandBuffer AcquireEntry() { return Base::AcquireEntry(nullptr); }
|
||||
|
||||
protected:
|
||||
friend class BaseFencedPool<CommandBufferPool, VkCommandBuffer>;
|
||||
VkCommandBuffer AllocateEntry();
|
||||
VkCommandBuffer AllocateEntry(void* data);
|
||||
void FreeEntry(VkCommandBuffer handle);
|
||||
|
||||
VkCommandPool command_pool_ = nullptr;
|
||||
VkCommandBufferLevel level_ = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
};
|
||||
|
||||
class DescriptorPool : public BaseFencedPool<DescriptorPool, VkDescriptorSet> {
|
||||
public:
|
||||
DescriptorPool(VkDevice device, uint32_t max_count,
|
||||
std::vector<VkDescriptorPoolSize> pool_sizes);
|
||||
~DescriptorPool() override;
|
||||
|
||||
VkDescriptorSet AcquireEntry(VkDescriptorSetLayout layout) { return nullptr; }
|
||||
|
||||
protected:
|
||||
friend class BaseFencedPool<DescriptorPool, VkCommandBuffer>;
|
||||
VkDescriptorSet AllocateEntry(void* data);
|
||||
void FreeEntry(VkDescriptorSet handle);
|
||||
|
||||
VkDescriptorPool descriptor_pool_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace vulkan
|
||||
} // namespace ui
|
||||
} // namespace xe
|
||||
|
|
|
@ -55,6 +55,9 @@ VulkanDevice::VulkanDevice(VulkanInstance* instance) : instance_(instance) {
|
|||
DeclareRequiredLayer("VK_LAYER_LUNARG_image", Version::Make(0, 0, 0), true);
|
||||
*/
|
||||
}
|
||||
|
||||
DeclareRequiredExtension(VK_EXT_DEBUG_MARKER_EXTENSION_NAME,
|
||||
Version::Make(0, 0, 0), true);
|
||||
}
|
||||
|
||||
VulkanDevice::~VulkanDevice() {
|
||||
|
@ -221,6 +224,51 @@ void VulkanDevice::ReleaseQueue(VkQueue queue) {
|
|||
free_queues_.push_back(queue);
|
||||
}
|
||||
|
||||
void VulkanDevice::DbgSetObjectName(VkDevice device, uint64_t object,
|
||||
VkDebugReportObjectTypeEXT object_type,
|
||||
std::string name) {
|
||||
PFN_vkDebugMarkerSetObjectNameEXT pfn_vkDebugMarkerSetObjectNameEXT = nullptr;
|
||||
if (!pfn_vkDebugMarkerSetObjectNameEXT) {
|
||||
pfn_vkDebugMarkerSetObjectNameEXT =
|
||||
(PFN_vkDebugMarkerSetObjectNameEXT)vkGetDeviceProcAddr(
|
||||
device, "vkDebugMarkerSetObjectNameEXT");
|
||||
|
||||
if (!pfn_vkDebugMarkerSetObjectNameEXT) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
VkDebugMarkerObjectNameInfoEXT info;
|
||||
info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
|
||||
info.pNext = nullptr;
|
||||
info.objectType = object_type;
|
||||
info.object = object;
|
||||
info.pObjectName = name.c_str();
|
||||
pfn_vkDebugMarkerSetObjectNameEXT(device, &info);
|
||||
}
|
||||
|
||||
void VulkanDevice::DbgSetObjectName(uint64_t object,
|
||||
VkDebugReportObjectTypeEXT object_type,
|
||||
std::string name) {
|
||||
if (!pfn_vkDebugMarkerSetObjectNameEXT_) {
|
||||
pfn_vkDebugMarkerSetObjectNameEXT_ =
|
||||
(PFN_vkDebugMarkerSetObjectNameEXT)vkGetDeviceProcAddr(
|
||||
handle, "vkDebugMarkerSetObjectNameEXT");
|
||||
|
||||
if (!pfn_vkDebugMarkerSetObjectNameEXT_) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
VkDebugMarkerObjectNameInfoEXT info;
|
||||
info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
|
||||
info.pNext = nullptr;
|
||||
info.objectType = object_type;
|
||||
info.object = object;
|
||||
info.pObjectName = name.c_str();
|
||||
pfn_vkDebugMarkerSetObjectNameEXT_(handle, &info);
|
||||
}
|
||||
|
||||
bool VulkanDevice::is_renderdoc_attached() const {
|
||||
return instance_->is_renderdoc_attached();
|
||||
}
|
||||
|
|
|
@ -75,6 +75,12 @@ class VulkanDevice {
|
|||
// This method is thread safe.
|
||||
void ReleaseQueue(VkQueue queue);
|
||||
|
||||
static void DbgSetObjectName(VkDevice device, uint64_t object,
|
||||
VkDebugReportObjectTypeEXT object_type,
|
||||
std::string name);
|
||||
void DbgSetObjectName(uint64_t object, VkDebugReportObjectTypeEXT object_type,
|
||||
std::string name);
|
||||
|
||||
// True if RenderDoc is attached and available for use.
|
||||
bool is_renderdoc_attached() const;
|
||||
// Begins capturing the current frame in RenderDoc, if it is attached.
|
||||
|
@ -95,6 +101,8 @@ class VulkanDevice {
|
|||
std::vector<Requirement> required_layers_;
|
||||
std::vector<Requirement> required_extensions_;
|
||||
|
||||
PFN_vkDebugMarkerSetObjectNameEXT pfn_vkDebugMarkerSetObjectNameEXT_;
|
||||
|
||||
DeviceInfo device_info_;
|
||||
uint32_t queue_family_index_ = 0;
|
||||
std::mutex queue_mutex_;
|
||||
|
|
|
@ -58,6 +58,9 @@ VulkanInstance::VulkanInstance() {
|
|||
DeclareRequiredExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
|
||||
Version::Make(0, 0, 0), true);
|
||||
}
|
||||
|
||||
DeclareRequiredExtension(VK_EXT_DEBUG_MARKER_EXTENSION_NAME,
|
||||
Version::Make(0, 0, 0), true);
|
||||
}
|
||||
|
||||
VulkanInstance::~VulkanInstance() { DestroyInstance(); }
|
||||
|
|
Loading…
Reference in New Issue