diff --git a/src/xenia/ui/vulkan/vulkan_context.cc b/src/xenia/ui/vulkan/vulkan_context.cc index 9dd9c7d58..a2c5998f4 100644 --- a/src/xenia/ui/vulkan/vulkan_context.cc +++ b/src/xenia/ui/vulkan/vulkan_context.cc @@ -35,28 +35,18 @@ VulkanContext::VulkanContext(VulkanProvider* provider, Window* target_window) VulkanContext::~VulkanContext() { auto provider = static_cast(provider_); auto device = provider->device(); - vkQueueWaitIdle(device->primary_queue()); + { + std::lock_guard queue_lock(device->primary_queue_mutex()); + vkQueueWaitIdle(device->primary_queue()); + } immediate_drawer_.reset(); swap_chain_.reset(); - if (cmd_pool_) { - vkDestroyCommandPool(*device, cmd_pool_, nullptr); - } } bool VulkanContext::Initialize() { auto provider = static_cast(provider_); auto device = provider->device(); - // All context-specific commands will be allocated from this. - // We may want to have additional pools for different rendering subsystems. - VkCommandPoolCreateInfo cmd_pool_info; - cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; - cmd_pool_info.pNext = nullptr; - cmd_pool_info.queueFamilyIndex = device->queue_family_index(); - cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; - auto err = vkCreateCommandPool(*device, &cmd_pool_info, nullptr, &cmd_pool_); - CheckResult(err, "vkCreateCommandPool"); - if (target_window_) { // Create swap chain used to present to the window. VkSurfaceKHR surface = nullptr; @@ -68,8 +58,8 @@ bool VulkanContext::Initialize() { create_info.hinstance = static_cast(target_window_->native_platform_handle()); create_info.hwnd = static_cast(target_window_->native_handle()); - err = vkCreateWin32SurfaceKHR(*provider->instance(), &create_info, nullptr, - &surface); + auto err = vkCreateWin32SurfaceKHR(*provider->instance(), &create_info, + nullptr, &surface); CheckResult(err, "vkCreateWin32SurfaceKHR"); #else #error Platform not yet implemented. @@ -130,6 +120,7 @@ void VulkanContext::BeginSwap() { swap_chain_->Begin(); // TODO(benvanik): use a fence instead? May not be possible with target image. + std::lock_guard queue_lock(device->primary_queue_mutex()); auto err = vkQueueWaitIdle(device->primary_queue()); CheckResult(err, "vkQueueWaitIdle"); } @@ -145,6 +136,7 @@ void VulkanContext::EndSwap() { // Wait until the queue is idle. // TODO(benvanik): is this required? + std::lock_guard queue_lock(device->primary_queue_mutex()); auto err = vkQueueWaitIdle(device->primary_queue()); CheckResult(err, "vkQueueWaitIdle"); } diff --git a/src/xenia/ui/vulkan/vulkan_context.h b/src/xenia/ui/vulkan/vulkan_context.h index 1893ca287..f8ec41f05 100644 --- a/src/xenia/ui/vulkan/vulkan_context.h +++ b/src/xenia/ui/vulkan/vulkan_context.h @@ -53,7 +53,6 @@ class VulkanContext : public GraphicsContext { std::unique_ptr swap_chain_; std::unique_ptr immediate_drawer_; - VkCommandPool cmd_pool_ = nullptr; }; } // namespace vulkan diff --git a/src/xenia/ui/vulkan/vulkan_device.cc b/src/xenia/ui/vulkan/vulkan_device.cc index 8f862f444..c7ca1d974 100644 --- a/src/xenia/ui/vulkan/vulkan_device.cc +++ b/src/xenia/ui/vulkan/vulkan_device.cc @@ -118,8 +118,8 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) { if (queue_flags & VK_QUEUE_GRAPHICS_BIT) { // Can do graphics and present - good! ideal_queue_family_index = static_cast(i); - // TODO(benvanik): pick a higher queue count? - queue_count = 1; + // Grab all the queues we can. + queue_count = device_info.queue_family_properties[i].queueCount; break; } } @@ -136,6 +136,8 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) { queue_info.queueFamilyIndex = ideal_queue_family_index; queue_info.queueCount = queue_count; std::vector queue_priorities(queue_count); + // Prioritize the primary queue. + queue_priorities[0] = 1.0f; queue_info.pQueuePriorities = queue_priorities.data(); VkDeviceCreateInfo create_info; @@ -179,10 +181,32 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) { // Get the primary queue used for most submissions/etc. vkGetDeviceQueue(handle, queue_family_index_, 0, &primary_queue_); + // Get all additional queues, if we got any. + for (uint32_t i = 0; i < queue_count - 1; ++i) { + VkQueue queue; + vkGetDeviceQueue(handle, queue_family_index_, i, &queue); + free_queues_.push_back(queue); + } + XELOGVK("Device initialized successfully!"); return true; } +VkQueue VulkanDevice::AcquireQueue() { + std::lock_guard lock(queue_mutex_); + if (free_queues_.empty()) { + return nullptr; + } + auto queue = free_queues_.back(); + free_queues_.pop_back(); + return queue; +} + +void VulkanDevice::ReleaseQueue(VkQueue queue) { + std::lock_guard lock(queue_mutex_); + free_queues_.push_back(queue); +} + VkDeviceMemory VulkanDevice::AllocateMemory( const VkMemoryRequirements& requirements, VkFlags required_properties) { // Search memory types to find one matching our requirements and our diff --git a/src/xenia/ui/vulkan/vulkan_device.h b/src/xenia/ui/vulkan/vulkan_device.h index f1194d662..e9b12e3fc 100644 --- a/src/xenia/ui/vulkan/vulkan_device.h +++ b/src/xenia/ui/vulkan/vulkan_device.h @@ -11,6 +11,7 @@ #define XENIA_UI_VULKAN_VULKAN_DEVICE_H_ #include +#include #include #include @@ -57,9 +58,23 @@ class VulkanDevice { bool Initialize(DeviceInfo device_info); uint32_t queue_family_index() const { return queue_family_index_; } + std::mutex& primary_queue_mutex() { return queue_mutex_; } + // Access to the primary queue must be synchronized with primary_queue_mutex. VkQueue primary_queue() const { return primary_queue_; } const DeviceInfo& device_info() const { return device_info_; } + // Acquires a queue for exclusive use by the caller. + // The queue will not be touched by any other code until it's returned with + // ReleaseQueue. + // Not all devices support queues or only support a limited number. If this + // returns null the primary_queue should be used with the + // primary_queue_mutex. + // This method is thread safe. + VkQueue AcquireQueue(); + // Releases a queue back to the device pool. + // This method is thread safe. + void ReleaseQueue(VkQueue queue); + // Allocates memory of the given size matching the required properties. VkDeviceMemory AllocateMemory( const VkMemoryRequirements& requirements, @@ -73,7 +88,9 @@ class VulkanDevice { DeviceInfo device_info_; uint32_t queue_family_index_ = 0; + std::mutex queue_mutex_; VkQueue primary_queue_ = nullptr; + std::vector free_queues_; }; } // namespace vulkan diff --git a/src/xenia/ui/vulkan/vulkan_swap_chain.cc b/src/xenia/ui/vulkan/vulkan_swap_chain.cc index cb088bb75..47d246d18 100644 --- a/src/xenia/ui/vulkan/vulkan_swap_chain.cc +++ b/src/xenia/ui/vulkan/vulkan_swap_chain.cc @@ -373,12 +373,15 @@ bool VulkanSwapChain::Begin() { wait_submit_info.pCommandBuffers = nullptr; wait_submit_info.signalSemaphoreCount = 0; wait_submit_info.pSignalSemaphores = nullptr; - err = vkQueueSubmit(device_->primary_queue(), 1, &wait_submit_info, nullptr); + { + std::lock_guard queue_lock(device_->primary_queue_mutex()); + err = + vkQueueSubmit(device_->primary_queue(), 1, &wait_submit_info, nullptr); + } CheckResult(err, "vkQueueSubmit"); // Reset all command buffers. - vkResetCommandBuffer(render_cmd_buffer_, - VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT); + vkResetCommandBuffer(render_cmd_buffer_, 0); auto& current_buffer = buffers_[current_buffer_index_]; // Build the command buffer that will execute all queued rendering buffers. @@ -484,8 +487,11 @@ bool VulkanSwapChain::End() { render_submit_info.pCommandBuffers = &render_cmd_buffer_; render_submit_info.signalSemaphoreCount = 0; render_submit_info.pSignalSemaphores = nullptr; - err = - vkQueueSubmit(device_->primary_queue(), 1, &render_submit_info, nullptr); + { + std::lock_guard queue_lock(device_->primary_queue_mutex()); + err = vkQueueSubmit(device_->primary_queue(), 1, &render_submit_info, + nullptr); + } CheckResult(err, "vkQueueSubmit"); // Queue the present of our current image. @@ -500,7 +506,10 @@ bool VulkanSwapChain::End() { present_info.pSwapchains = swap_chains; present_info.pImageIndices = swap_chain_image_indices; present_info.pResults = nullptr; - err = vkQueuePresentKHR(device_->primary_queue(), &present_info); + { + std::lock_guard queue_lock(device_->primary_queue_mutex()); + err = vkQueuePresentKHR(device_->primary_queue(), &present_info); + } switch (err) { case VK_SUCCESS: break;