Adding mutliple queue/shared queue support.

This commit is contained in:
Ben Vanik 2016-02-18 16:40:45 -08:00
parent ea959b52fd
commit 00594da417
5 changed files with 66 additions and 25 deletions

View File

@ -35,28 +35,18 @@ VulkanContext::VulkanContext(VulkanProvider* provider, Window* target_window)
VulkanContext::~VulkanContext() {
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
vkQueueWaitIdle(device->primary_queue());
{
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
vkQueueWaitIdle(device->primary_queue());
}
immediate_drawer_.reset();
swap_chain_.reset();
if (cmd_pool_) {
vkDestroyCommandPool(*device, cmd_pool_, nullptr);
}
}
bool VulkanContext::Initialize() {
auto provider = static_cast<VulkanProvider*>(provider_);
auto device = provider->device();
// All context-specific commands will be allocated from this.
// We may want to have additional pools for different rendering subsystems.
VkCommandPoolCreateInfo cmd_pool_info;
cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmd_pool_info.pNext = nullptr;
cmd_pool_info.queueFamilyIndex = device->queue_family_index();
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
auto err = vkCreateCommandPool(*device, &cmd_pool_info, nullptr, &cmd_pool_);
CheckResult(err, "vkCreateCommandPool");
if (target_window_) {
// Create swap chain used to present to the window.
VkSurfaceKHR surface = nullptr;
@ -68,8 +58,8 @@ bool VulkanContext::Initialize() {
create_info.hinstance =
static_cast<HINSTANCE>(target_window_->native_platform_handle());
create_info.hwnd = static_cast<HWND>(target_window_->native_handle());
err = vkCreateWin32SurfaceKHR(*provider->instance(), &create_info, nullptr,
&surface);
auto err = vkCreateWin32SurfaceKHR(*provider->instance(), &create_info,
nullptr, &surface);
CheckResult(err, "vkCreateWin32SurfaceKHR");
#else
#error Platform not yet implemented.
@ -130,6 +120,7 @@ void VulkanContext::BeginSwap() {
swap_chain_->Begin();
// TODO(benvanik): use a fence instead? May not be possible with target image.
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
auto err = vkQueueWaitIdle(device->primary_queue());
CheckResult(err, "vkQueueWaitIdle");
}
@ -145,6 +136,7 @@ void VulkanContext::EndSwap() {
// Wait until the queue is idle.
// TODO(benvanik): is this required?
std::lock_guard<std::mutex> queue_lock(device->primary_queue_mutex());
auto err = vkQueueWaitIdle(device->primary_queue());
CheckResult(err, "vkQueueWaitIdle");
}

View File

@ -53,7 +53,6 @@ class VulkanContext : public GraphicsContext {
std::unique_ptr<VulkanSwapChain> swap_chain_;
std::unique_ptr<VulkanImmediateDrawer> immediate_drawer_;
VkCommandPool cmd_pool_ = nullptr;
};
} // namespace vulkan

View File

@ -118,8 +118,8 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
if (queue_flags & VK_QUEUE_GRAPHICS_BIT) {
// Can do graphics and present - good!
ideal_queue_family_index = static_cast<uint32_t>(i);
// TODO(benvanik): pick a higher queue count?
queue_count = 1;
// Grab all the queues we can.
queue_count = device_info.queue_family_properties[i].queueCount;
break;
}
}
@ -136,6 +136,8 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
queue_info.queueFamilyIndex = ideal_queue_family_index;
queue_info.queueCount = queue_count;
std::vector<float> queue_priorities(queue_count);
// Prioritize the primary queue.
queue_priorities[0] = 1.0f;
queue_info.pQueuePriorities = queue_priorities.data();
VkDeviceCreateInfo create_info;
@ -179,10 +181,32 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
// Get the primary queue used for most submissions/etc.
vkGetDeviceQueue(handle, queue_family_index_, 0, &primary_queue_);
// Get all additional queues, if we got any.
for (uint32_t i = 0; i < queue_count - 1; ++i) {
VkQueue queue;
vkGetDeviceQueue(handle, queue_family_index_, i, &queue);
free_queues_.push_back(queue);
}
XELOGVK("Device initialized successfully!");
return true;
}
VkQueue VulkanDevice::AcquireQueue() {
std::lock_guard<std::mutex> lock(queue_mutex_);
if (free_queues_.empty()) {
return nullptr;
}
auto queue = free_queues_.back();
free_queues_.pop_back();
return queue;
}
void VulkanDevice::ReleaseQueue(VkQueue queue) {
std::lock_guard<std::mutex> lock(queue_mutex_);
free_queues_.push_back(queue);
}
VkDeviceMemory VulkanDevice::AllocateMemory(
const VkMemoryRequirements& requirements, VkFlags required_properties) {
// Search memory types to find one matching our requirements and our

View File

@ -11,6 +11,7 @@
#define XENIA_UI_VULKAN_VULKAN_DEVICE_H_
#include <memory>
#include <mutex>
#include <string>
#include <vector>
@ -57,9 +58,23 @@ class VulkanDevice {
bool Initialize(DeviceInfo device_info);
uint32_t queue_family_index() const { return queue_family_index_; }
std::mutex& primary_queue_mutex() { return queue_mutex_; }
// Access to the primary queue must be synchronized with primary_queue_mutex.
VkQueue primary_queue() const { return primary_queue_; }
const DeviceInfo& device_info() const { return device_info_; }
// Acquires a queue for exclusive use by the caller.
// The queue will not be touched by any other code until it's returned with
// ReleaseQueue.
// Not all devices support queues or only support a limited number. If this
// returns null the primary_queue should be used with the
// primary_queue_mutex.
// This method is thread safe.
VkQueue AcquireQueue();
// Releases a queue back to the device pool.
// This method is thread safe.
void ReleaseQueue(VkQueue queue);
// Allocates memory of the given size matching the required properties.
VkDeviceMemory AllocateMemory(
const VkMemoryRequirements& requirements,
@ -73,7 +88,9 @@ class VulkanDevice {
DeviceInfo device_info_;
uint32_t queue_family_index_ = 0;
std::mutex queue_mutex_;
VkQueue primary_queue_ = nullptr;
std::vector<VkQueue> free_queues_;
};
} // namespace vulkan

View File

@ -373,12 +373,15 @@ bool VulkanSwapChain::Begin() {
wait_submit_info.pCommandBuffers = nullptr;
wait_submit_info.signalSemaphoreCount = 0;
wait_submit_info.pSignalSemaphores = nullptr;
err = vkQueueSubmit(device_->primary_queue(), 1, &wait_submit_info, nullptr);
{
std::lock_guard<std::mutex> queue_lock(device_->primary_queue_mutex());
err =
vkQueueSubmit(device_->primary_queue(), 1, &wait_submit_info, nullptr);
}
CheckResult(err, "vkQueueSubmit");
// Reset all command buffers.
vkResetCommandBuffer(render_cmd_buffer_,
VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT);
vkResetCommandBuffer(render_cmd_buffer_, 0);
auto& current_buffer = buffers_[current_buffer_index_];
// Build the command buffer that will execute all queued rendering buffers.
@ -484,8 +487,11 @@ bool VulkanSwapChain::End() {
render_submit_info.pCommandBuffers = &render_cmd_buffer_;
render_submit_info.signalSemaphoreCount = 0;
render_submit_info.pSignalSemaphores = nullptr;
err =
vkQueueSubmit(device_->primary_queue(), 1, &render_submit_info, nullptr);
{
std::lock_guard<std::mutex> queue_lock(device_->primary_queue_mutex());
err = vkQueueSubmit(device_->primary_queue(), 1, &render_submit_info,
nullptr);
}
CheckResult(err, "vkQueueSubmit");
// Queue the present of our current image.
@ -500,7 +506,10 @@ bool VulkanSwapChain::End() {
present_info.pSwapchains = swap_chains;
present_info.pImageIndices = swap_chain_image_indices;
present_info.pResults = nullptr;
err = vkQueuePresentKHR(device_->primary_queue(), &present_info);
{
std::lock_guard<std::mutex> queue_lock(device_->primary_queue_mutex());
err = vkQueuePresentKHR(device_->primary_queue(), &present_info);
}
switch (err) {
case VK_SUCCESS:
break;