[Vulkan] Context, remove Volk

This commit is contained in:
Triang3l 2020-09-13 17:51:00 +03:00
parent dfa181a529
commit 203bf64d88
16 changed files with 1241 additions and 295 deletions

View File

@ -226,7 +226,6 @@ solution("xenia")
include("third_party/SDL2.lua")
include("third_party/snappy.lua")
include("third_party/spirv-tools.lua")
include("third_party/volk.lua")
include("third_party/xxhash.lua")
include("src/xenia")

View File

@ -20,7 +20,6 @@ project("xenia-app")
"mspack",
"snappy",
"spirv-tools",
"volk",
"xenia-app-discord",
"xenia-apu",
"xenia-apu-nop",

View File

@ -22,7 +22,6 @@ project("xenia-hid-demo")
links({
"fmt",
"imgui",
"volk",
"xenia-base",
"xenia-helper-sdl",
"xenia-hid",

View File

@ -9,9 +9,6 @@
#include "xenia/ui/d3d12/d3d12_context.h"
#include <cstdlib>
#include "xenia/base/cvar.h"
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/ui/d3d12/d3d12_immediate_drawer.h"
@ -19,9 +16,6 @@
#include "xenia/ui/d3d12/d3d12_util.h"
#include "xenia/ui/window.h"
DEFINE_bool(d3d12_random_clear_color, false,
"Randomize presentation back buffer clear color.", "D3D12");
namespace xe {
namespace ui {
namespace d3d12 {
@ -32,110 +26,112 @@ D3D12Context::D3D12Context(D3D12Provider* provider, Window* target_window)
D3D12Context::~D3D12Context() { Shutdown(); }
bool D3D12Context::Initialize() {
context_lost_ = false;
if (!target_window_) {
return true;
}
auto& provider = GetD3D12Provider();
auto dxgi_factory = provider.GetDXGIFactory();
auto device = provider.GetDevice();
auto direct_queue = provider.GetDirectQueue();
context_lost_ = false;
swap_fence_current_value_ = 1;
swap_fence_completed_value_ = 0;
swap_fence_completion_event_ = CreateEvent(nullptr, false, false, nullptr);
if (swap_fence_completion_event_ == nullptr) {
XELOGE("Failed to create the composition fence completion event");
Shutdown();
return false;
}
// Create a fence for transient resources of compositing.
if (FAILED(device->CreateFence(0, D3D12_FENCE_FLAG_NONE,
IID_PPV_ARGS(&swap_fence_)))) {
XELOGE("Failed to create the composition fence");
Shutdown();
return false;
}
if (target_window_) {
swap_fence_current_value_ = 1;
swap_fence_completed_value_ = 0;
swap_fence_completion_event_ = CreateEvent(nullptr, false, false, nullptr);
if (swap_fence_completion_event_ == nullptr) {
XELOGE("Failed to create the composition fence completion event");
Shutdown();
return false;
}
// Create a fence for transient resources of compositing.
if (FAILED(device->CreateFence(0, D3D12_FENCE_FLAG_NONE,
IID_PPV_ARGS(&swap_fence_)))) {
XELOGE("Failed to create the composition fence");
Shutdown();
return false;
}
// Create the swap chain.
swap_chain_width_ = target_window_->scaled_width();
swap_chain_height_ = target_window_->scaled_height();
DXGI_SWAP_CHAIN_DESC1 swap_chain_desc;
swap_chain_desc.Width = swap_chain_width_;
swap_chain_desc.Height = swap_chain_height_;
swap_chain_desc.Format = kSwapChainFormat;
swap_chain_desc.Stereo = FALSE;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.SampleDesc.Quality = 0;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.BufferCount = kSwapChainBufferCount;
swap_chain_desc.Scaling = DXGI_SCALING_STRETCH;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
swap_chain_desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
swap_chain_desc.Flags = 0;
IDXGISwapChain1* swap_chain_1;
if (FAILED(dxgi_factory->CreateSwapChainForHwnd(
provider.GetDirectQueue(),
static_cast<HWND>(target_window_->native_handle()),
&swap_chain_desc, nullptr, nullptr, &swap_chain_1))) {
XELOGE("Failed to create a DXGI swap chain");
Shutdown();
return false;
}
if (FAILED(swap_chain_1->QueryInterface(IID_PPV_ARGS(&swap_chain_)))) {
XELOGE("Failed to get version 3 of the DXGI swap chain interface");
swap_chain_1->Release();
Shutdown();
return false;
}
// Create the swap chain.
swap_chain_width_ = target_window_->scaled_width();
swap_chain_height_ = target_window_->scaled_height();
DXGI_SWAP_CHAIN_DESC1 swap_chain_desc;
swap_chain_desc.Width = swap_chain_width_;
swap_chain_desc.Height = swap_chain_height_;
swap_chain_desc.Format = kSwapChainFormat;
swap_chain_desc.Stereo = FALSE;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.SampleDesc.Quality = 0;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.BufferCount = kSwapChainBufferCount;
swap_chain_desc.Scaling = DXGI_SCALING_STRETCH;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
swap_chain_desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
swap_chain_desc.Flags = 0;
IDXGISwapChain1* swap_chain_1;
if (FAILED(dxgi_factory->CreateSwapChainForHwnd(
provider.GetDirectQueue(),
reinterpret_cast<HWND>(target_window_->native_handle()),
&swap_chain_desc, nullptr, nullptr, &swap_chain_1))) {
XELOGE("Failed to create a DXGI swap chain");
Shutdown();
return false;
}
if (FAILED(swap_chain_1->QueryInterface(IID_PPV_ARGS(&swap_chain_)))) {
XELOGE("Failed to get version 3 of the DXGI swap chain interface");
swap_chain_1->Release();
Shutdown();
return false;
}
swap_chain_1->Release();
// Create a heap for RTV descriptors of swap chain buffers.
D3D12_DESCRIPTOR_HEAP_DESC rtv_heap_desc;
rtv_heap_desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV;
rtv_heap_desc.NumDescriptors = kSwapChainBufferCount;
rtv_heap_desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
rtv_heap_desc.NodeMask = 0;
if (FAILED(device->CreateDescriptorHeap(
&rtv_heap_desc, IID_PPV_ARGS(&swap_chain_rtv_heap_)))) {
XELOGE("Failed to create swap chain RTV descriptor heap");
// Create a heap for RTV descriptors of swap chain buffers.
D3D12_DESCRIPTOR_HEAP_DESC rtv_heap_desc;
rtv_heap_desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV;
rtv_heap_desc.NumDescriptors = kSwapChainBufferCount;
rtv_heap_desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
rtv_heap_desc.NodeMask = 0;
if (FAILED(device->CreateDescriptorHeap(
&rtv_heap_desc, IID_PPV_ARGS(&swap_chain_rtv_heap_)))) {
XELOGE("Failed to create swap chain RTV descriptor heap");
Shutdown();
return false;
}
swap_chain_rtv_heap_start_ =
swap_chain_rtv_heap_->GetCPUDescriptorHandleForHeapStart();
// Get the buffers and create their RTV descriptors.
if (!InitializeSwapChainBuffers()) {
Shutdown();
return false;
}
// Create the command list for compositing.
for (uint32_t i = 0; i < kSwapCommandAllocatorCount; ++i) {
if (FAILED(device->CreateCommandAllocator(
D3D12_COMMAND_LIST_TYPE_DIRECT,
IID_PPV_ARGS(&swap_command_allocators_[i])))) {
XELOGE("Failed to create a composition command allocator");
Shutdown();
return false;
}
swap_chain_rtv_heap_start_ =
swap_chain_rtv_heap_->GetCPUDescriptorHandleForHeapStart();
}
if (FAILED(device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT,
swap_command_allocators_[0], nullptr,
IID_PPV_ARGS(&swap_command_list_)))) {
XELOGE("Failed to create the composition graphics command list");
Shutdown();
return false;
}
// Initially in open state, wait until BeginSwap.
swap_command_list_->Close();
// Get the buffers and create their RTV descriptors.
if (!InitializeSwapChainBuffers()) {
Shutdown();
return false;
}
// Create the command list for compositing.
for (uint32_t i = 0; i < kSwapCommandAllocatorCount; ++i) {
if (FAILED(device->CreateCommandAllocator(
D3D12_COMMAND_LIST_TYPE_DIRECT,
IID_PPV_ARGS(&swap_command_allocators_[i])))) {
XELOGE("Failed to create a composition command allocator");
Shutdown();
return false;
}
}
if (FAILED(device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT,
swap_command_allocators_[0], nullptr,
IID_PPV_ARGS(&swap_command_list_)))) {
XELOGE("Failed to create the composition graphics command list");
Shutdown();
return false;
}
// Initially in open state, wait until BeginSwap.
swap_command_list_->Close();
// Initialize the immediate mode drawer if not offscreen.
immediate_drawer_ = std::make_unique<D3D12ImmediateDrawer>(*this);
if (!immediate_drawer_->Initialize()) {
Shutdown();
return false;
}
// Initialize the immediate mode drawer if not offscreen.
immediate_drawer_ = std::make_unique<D3D12ImmediateDrawer>(*this);
if (!immediate_drawer_->Initialize()) {
Shutdown();
return false;
}
return true;
@ -223,9 +219,11 @@ ImmediateDrawer* D3D12Context::immediate_drawer() {
return immediate_drawer_.get();
}
void D3D12Context::BeginSwap() {
bool D3D12Context::WasLost() { return context_lost_; }
bool D3D12Context::BeginSwap() {
if (!target_window_ || context_lost_) {
return;
return false;
}
// Resize the swap chain if the window is resized.
@ -252,13 +250,13 @@ void D3D12Context::BeginSwap() {
kSwapChainBufferCount, target_window_width, target_window_height,
kSwapChainFormat, 0))) {
context_lost_ = true;
return;
return false;
}
swap_chain_width_ = target_window_width;
swap_chain_height_ = target_window_height;
if (!InitializeSwapChainBuffers()) {
context_lost_ = true;
return;
return false;
}
}
@ -295,18 +293,11 @@ void D3D12Context::BeginSwap() {
D3D12_CPU_DESCRIPTOR_HANDLE back_buffer_rtv = GetSwapChainBackBufferRTV();
swap_command_list_->OMSetRenderTargets(1, &back_buffer_rtv, TRUE, nullptr);
float clear_color[4];
if (cvars::d3d12_random_clear_color) {
clear_color[0] = rand() / float(RAND_MAX); // NOLINT(runtime/threadsafe_fn)
clear_color[1] = 1.0f;
clear_color[2] = 0.0f;
} else {
clear_color[0] = 0.0f;
clear_color[1] = 0.0f;
clear_color[2] = 0.0f;
}
clear_color[3] = 1.0f;
GetClearColor(clear_color);
swap_command_list_->ClearRenderTargetView(back_buffer_rtv, clear_color, 0,
nullptr);
return true;
}
void D3D12Context::EndSwap() {

View File

@ -28,9 +28,9 @@ class D3D12Context : public GraphicsContext {
ImmediateDrawer* immediate_drawer() override;
bool WasLost() override { return context_lost_; }
bool WasLost() override;
void BeginSwap() override;
bool BeginSwap() override;
void EndSwap() override;
std::unique_ptr<RawImage> Capture() override;

View File

@ -9,8 +9,13 @@
#include "xenia/ui/graphics_context.h"
#include <cstdlib>
#include "xenia/base/cvar.h"
#include "xenia/ui/graphics_provider.h"
DEFINE_bool(random_clear_color, false, "Randomize window clear color.", "UI");
namespace xe {
namespace ui {
@ -26,5 +31,18 @@ bool GraphicsContext::MakeCurrent() { return true; }
void GraphicsContext::ClearCurrent() {}
void GraphicsContext::GetClearColor(float* rgba) {
if (cvars::random_clear_color) {
rgba[0] = rand() / float(RAND_MAX); // NOLINT(runtime/threadsafe_fn)
rgba[1] = 1.0f;
rgba[2] = 0.0f;
} else {
rgba[0] = 0.0f;
rgba[1] = 0.0f;
rgba[2] = 0.0f;
}
rgba[3] = 1.0f;
}
} // namespace ui
} // namespace xe

View File

@ -51,7 +51,8 @@ class GraphicsContext {
// This context must be made current in order for this call to work properly.
virtual bool WasLost() = 0;
virtual void BeginSwap() = 0;
// Returns true if able to draw now (the target surface is available).
virtual bool BeginSwap() = 0;
virtual void EndSwap() = 0;
virtual std::unique_ptr<RawImage> Capture() = 0;
@ -59,6 +60,8 @@ class GraphicsContext {
protected:
explicit GraphicsContext(GraphicsProvider* provider, Window* target_window);
static void GetClearColor(float* rgba);
GraphicsProvider* provider_ = nullptr;
Window* target_window_ = nullptr;
};

View File

@ -9,8 +9,24 @@
#include "xenia/ui/vulkan/vulkan_context.h"
#include <algorithm>
#include <cstdint>
#include <vector>
#include "xenia/base/assert.h"
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/base/platform.h"
#include "xenia/ui/vulkan/vulkan_immediate_drawer.h"
#include "xenia/ui/vulkan/vulkan_provider.h"
#include "xenia/ui/vulkan/vulkan_util.h"
#include "xenia/ui/window.h"
#if XE_PLATFORM_ANDROID
#include <android/native_window.h>
#elif XE_PLATFORM_WIN32
#include "xenia/base/platform_win.h"
#endif
namespace xe {
namespace ui {
@ -19,21 +35,842 @@ namespace vulkan {
VulkanContext::VulkanContext(VulkanProvider* provider, Window* target_window)
: GraphicsContext(provider, target_window) {}
bool VulkanContext::Initialize() { return false; }
bool VulkanContext::Initialize() {
context_lost_ = false;
if (!target_window_) {
return true;
}
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
VkFenceCreateInfo fence_create_info;
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_create_info.pNext = nullptr;
fence_create_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
VkCommandPoolCreateInfo command_pool_create_info;
command_pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
command_pool_create_info.pNext = nullptr;
command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
command_pool_create_info.queueFamilyIndex =
provider.queue_family_graphics_compute();
VkCommandBufferAllocateInfo command_buffer_allocate_info;
command_buffer_allocate_info.sType =
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.pNext = nullptr;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
command_buffer_allocate_info.commandBufferCount = 1;
for (uint32_t i = 0; i < kSwapchainMaxImageCount; ++i) {
SwapSubmission& submission = swap_submissions_[i];
if (dfn.vkCreateFence(device, &fence_create_info, nullptr,
&submission.fence) != VK_SUCCESS) {
XELOGE("Failed to create the Vulkan composition fences");
Shutdown();
return false;
}
if (dfn.vkCreateCommandPool(device, &command_pool_create_info, nullptr,
&submission.command_pool) != VK_SUCCESS) {
XELOGE("Failed to create the Vulkan composition command pools");
Shutdown();
return false;
}
command_buffer_allocate_info.commandPool = submission.command_pool;
if (dfn.vkAllocateCommandBuffers(device, &command_buffer_allocate_info,
&submission.command_buffer) !=
VK_SUCCESS) {
XELOGE("Failed to allocate the Vulkan composition command buffers");
Shutdown();
return false;
}
}
VkSemaphoreCreateInfo semaphore_create_info;
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = nullptr;
semaphore_create_info.flags = 0;
if (dfn.vkCreateSemaphore(device, &semaphore_create_info, nullptr,
&swap_image_acquisition_semaphore_) != VK_SUCCESS) {
XELOGE(
"Failed to create the Vulkan swap chain image acquisition semaphore");
Shutdown();
return false;
}
if (dfn.vkCreateSemaphore(device, &semaphore_create_info, nullptr,
&swap_render_completion_semaphore_) != VK_SUCCESS) {
XELOGE(
"Failed to create the Vulkan swap chain rendering completion "
"semaphore");
Shutdown();
return false;
}
immediate_drawer_ = std::make_unique<VulkanImmediateDrawer>(*this);
// TODO(Triang3l): Initialize the immediate drawer.
swap_swapchain_or_surface_recreation_needed_ = true;
return true;
}
void VulkanContext::Shutdown() {
if (!target_window_) {
return;
}
AwaitAllSwapSubmissionsCompletion();
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::InstanceFunctions& ifn = provider.ifn();
VkInstance instance = provider.instance();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
swap_swapchain_image_current_ = UINT32_MAX;
DestroySwapchainFramebuffers();
util::DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, device,
swap_swapchain_);
util::DestroyAndNullHandle(dfn.vkDestroyRenderPass, device,
swap_render_pass_);
util::DestroyAndNullHandle(ifn.vkDestroySurfaceKHR, instance, swap_surface_);
swap_swapchain_or_surface_recreation_needed_ = false;
util::DestroyAndNullHandle(dfn.vkDestroySemaphore, device,
swap_render_completion_semaphore_);
util::DestroyAndNullHandle(dfn.vkDestroySemaphore, device,
swap_image_acquisition_semaphore_);
for (uint32_t i = 0; i < kSwapchainMaxImageCount; ++i) {
SwapSubmission& submission = swap_submissions_[i];
util::DestroyAndNullHandle(dfn.vkDestroyCommandPool, device,
submission.command_pool);
util::DestroyAndNullHandle(dfn.vkDestroyFence, device, submission.fence);
}
swap_submission_current_ = 1;
swap_submission_completed_ = 0;
}
ImmediateDrawer* VulkanContext::immediate_drawer() {
return immediate_drawer_.get();
}
void VulkanContext::BeginSwap() {}
bool VulkanContext::WasLost() { return context_lost_; }
void VulkanContext::EndSwap() {}
bool VulkanContext::BeginSwap() {
if (!target_window_ || context_lost_) {
return false;
}
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::InstanceFunctions& ifn = provider.ifn();
VkPhysicalDevice physical_device = provider.physical_device();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
uint32_t window_width = uint32_t(target_window_->scaled_width());
uint32_t window_height = uint32_t(target_window_->scaled_height());
if (swap_swapchain_ != VK_NULL_HANDLE) {
// Check if need to resize.
assert_true(swap_surface_ != VK_NULL_HANDLE);
// Win32 has minImageExtent == maxImageExtent == currentExtent, so the
// capabilities need to be requested every time they are needed.
VkSurfaceCapabilitiesKHR surface_capabilities;
if (ifn.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
physical_device, swap_surface_, &surface_capabilities) ==
VK_SUCCESS) {
if (swap_swapchain_extent_.width !=
xe::clamp(window_width, surface_capabilities.minImageExtent.width,
surface_capabilities.maxImageExtent.width) ||
swap_swapchain_extent_.height !=
xe::clamp(window_height,
surface_capabilities.minImageExtent.height,
surface_capabilities.maxImageExtent.height)) {
swap_swapchain_or_surface_recreation_needed_ = true;
}
}
}
// If the swap chain turns out to be out of date, try to recreate it on the
// second attempt (to avoid skipping the frame entirely in this case).
for (uint32_t attempt = 0; attempt < 2; ++attempt) {
if (swap_swapchain_or_surface_recreation_needed_) {
// If recreation fails, don't retry until some change happens.
swap_swapchain_or_surface_recreation_needed_ = false;
AwaitAllSwapSubmissionsCompletion();
uint32_t queue_family_graphics_compute =
provider.queue_family_graphics_compute();
if (swap_surface_ == VK_NULL_HANDLE) {
assert_true(swap_swapchain_ == VK_NULL_HANDLE);
assert_true(swap_swapchain_image_views_.empty());
assert_true(swap_swapchain_framebuffers_.empty());
VkInstance instance = provider.instance();
VkResult surface_create_result;
#if XE_PLATFORM_ANDROID
VkAndroidSurfaceCreateInfoKHR surface_create_info;
surface_create_info.window =
reinterpret_cast<ANativeWindow*>(target_window_->native_handle());
if (!surface_create_info.window) {
// The activity is in background - try again when the window is
// created.
return false;
}
surface_create_info.sType =
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
surface_create_info.pNext = nullptr;
surface_create_info.flags = 0;
surface_create_result = ifn.vkCreateAndroidSurfaceKHR(
instance, &surface_create_info, nullptr, &swap_surface_);
#elif XE_PLATFORM_WIN32
VkWin32SurfaceCreateInfoKHR surface_create_info;
surface_create_info.sType =
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
surface_create_info.pNext = nullptr;
surface_create_info.flags = 0;
surface_create_info.hinstance = reinterpret_cast<HINSTANCE>(
target_window_->native_platform_handle());
surface_create_info.hwnd =
reinterpret_cast<HWND>(target_window_->native_handle());
surface_create_result = ifn.vkCreateWin32SurfaceKHR(
instance, &surface_create_info, nullptr, &swap_surface_);
#else
#error No Vulkan surface creation for the target platform.
#endif
if (surface_create_result != VK_SUCCESS) {
XELOGE("Failed to create a Vulkan surface");
return false;
}
// FIXME(Triang3l): Allow a separate queue for present - see
// vulkan_provider.cc for details.
VkBool32 surface_supported;
if (ifn.vkGetPhysicalDeviceSurfaceSupportKHR(
physical_device, queue_family_graphics_compute, swap_surface_,
&surface_supported) != VK_SUCCESS ||
!surface_supported) {
XELOGE(
"The Vulkan graphics and compute queue doesn't support "
"presentation");
ifn.vkDestroySurfaceKHR(instance, swap_surface_, nullptr);
swap_surface_ = VK_NULL_HANDLE;
return false;
}
// Choose an SDR format, 8.8.8.8 preferred, or if not available, any
// supported. Windows and GNU/Linux use B8G8R8A8, Android uses R8G8B8A8.
std::vector<VkSurfaceFormatKHR> surface_formats;
VkResult surface_formats_get_result;
for (;;) {
uint32_t surface_format_count = uint32_t(surface_formats.size());
bool surface_formats_was_empty = !surface_format_count;
surface_formats_get_result = ifn.vkGetPhysicalDeviceSurfaceFormatsKHR(
physical_device, swap_surface_, &surface_format_count,
surface_formats_was_empty ? nullptr : surface_formats.data());
// If the original surface format count was 0 (first call), SUCCESS is
// returned, not INCOMPLETE.
if (surface_formats_get_result == VK_SUCCESS ||
surface_formats_get_result == VK_INCOMPLETE) {
surface_formats.resize(surface_format_count);
if (surface_formats_get_result == VK_SUCCESS &&
(!surface_formats_was_empty || !surface_format_count)) {
break;
}
} else {
break;
}
}
if (surface_formats_get_result != VK_SUCCESS ||
surface_formats.empty()) {
XELOGE("Failed to get Vulkan surface formats");
ifn.vkDestroySurfaceKHR(instance, swap_surface_, nullptr);
swap_surface_ = VK_NULL_HANDLE;
return false;
}
VkSurfaceFormatKHR surface_format;
if (surface_formats.size() == 1 &&
surface_formats[0].format == VK_FORMAT_UNDEFINED) {
#if XE_PLATFORM_ANDROID
surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
#else
surface_format.format = VK_FORMAT_B8G8R8A8_UNORM;
#endif
surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
} else {
surface_format = surface_formats.front();
for (const VkSurfaceFormatKHR& surface_format_current :
surface_formats) {
if (surface_format_current.format == VK_FORMAT_B8G8R8A8_UNORM ||
surface_format_current.format == VK_FORMAT_R8G8B8A8_UNORM ||
surface_format_current.format ==
VK_FORMAT_A8B8G8R8_UNORM_PACK32) {
surface_format = surface_format_current;
break;
}
}
}
if (swap_surface_format_.format != surface_format.format) {
util::DestroyAndNullHandle(dfn.vkDestroyRenderPass, device,
swap_render_pass_);
}
swap_surface_format_ = surface_format;
// Prefer a low-latency present mode because emulation is done on the
// same queue, ordered by the decreasing amount of tearing, fall back to
// FIFO if no other options.
swap_surface_present_mode_ = VK_PRESENT_MODE_FIFO_KHR;
std::vector<VkPresentModeKHR> surface_present_modes;
VkResult surface_present_modes_get_result;
for (;;) {
uint32_t surface_present_mode_count =
uint32_t(surface_present_modes.size());
bool surface_present_modes_was_empty = !surface_present_mode_count;
surface_present_modes_get_result =
ifn.vkGetPhysicalDeviceSurfacePresentModesKHR(
physical_device, swap_surface_, &surface_present_mode_count,
surface_present_modes_was_empty
? nullptr
: surface_present_modes.data());
// If the original surface present mode count was 0 (first call),
// SUCCESS is returned, not INCOMPLETE.
if (surface_present_modes_get_result == VK_SUCCESS ||
surface_present_modes_get_result == VK_INCOMPLETE) {
surface_present_modes.resize(surface_present_mode_count);
if (surface_present_modes_get_result == VK_SUCCESS &&
(!surface_present_modes_was_empty ||
!surface_present_mode_count)) {
break;
}
} else {
break;
}
}
if (surface_present_modes_get_result == VK_SUCCESS) {
static const VkPresentModeKHR present_modes_preferred[] = {
VK_PRESENT_MODE_MAILBOX_KHR,
VK_PRESENT_MODE_FIFO_RELAXED_KHR,
VK_PRESENT_MODE_IMMEDIATE_KHR,
};
for (size_t i = 0; i < xe::countof(present_modes_preferred); ++i) {
VkPresentModeKHR present_mode_preferred =
present_modes_preferred[i];
if (std::find(surface_present_modes.cbegin(),
surface_present_modes.cend(),
present_mode_preferred) !=
surface_present_modes.cend()) {
swap_surface_present_mode_ = present_mode_preferred;
break;
}
}
}
}
// Recreate the swap chain unconditionally because a request was made.
// The old swapchain will be retired even if vkCreateSwapchainKHR fails,
// so destroy the framebuffers and the image views unconditionally.
// If anything fails before the vkCreateSwapchainKHR call, also destroy
// the swapchain to fulfill the request.
// It was safe to handle errors while creating the surface without caring
// about destroying the swapchain, because there can't be swapchain when
// there is no surface.
DestroySwapchainFramebuffers();
// Win32 has minImageExtent == maxImageExtent == currentExtent, so the
// capabilities need to be requested every time they are needed.
VkSurfaceCapabilitiesKHR surface_capabilities;
if (ifn.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
physical_device, swap_surface_, &surface_capabilities) !=
VK_SUCCESS) {
XELOGE("Failed to get Vulkan surface capabilities");
util::DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, device,
swap_swapchain_);
return false;
}
// TODO(Triang3l): Support rotated transforms.
if (!(surface_capabilities.supportedTransforms &
VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
XELOGE("The Vulkan surface doesn't support identity transform");
util::DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, device,
swap_swapchain_);
return false;
}
VkSwapchainCreateInfoKHR swapchain_create_info;
swapchain_create_info.imageExtent.width =
xe::clamp(window_width, surface_capabilities.minImageExtent.width,
surface_capabilities.maxImageExtent.width);
swapchain_create_info.imageExtent.height =
xe::clamp(window_height, surface_capabilities.minImageExtent.height,
surface_capabilities.maxImageExtent.height);
if (!swapchain_create_info.imageExtent.width ||
!swapchain_create_info.imageExtent.height) {
// Everything else is fine with the surface, but the window is too
// small, try again when the window may be resized (won't try to do some
// vkCreate* every BeginSwap, will reach this part again, so okay to set
// swap_swapchain_or_surface_recreation_needed_ back to true).
swap_swapchain_or_surface_recreation_needed_ = true;
util::DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, device,
swap_swapchain_);
return false;
}
swapchain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapchain_create_info.pNext = nullptr;
swapchain_create_info.flags = 0;
swapchain_create_info.surface = swap_surface_;
swapchain_create_info.minImageCount = kSwapchainMaxImageCount;
if (surface_capabilities.maxImageCount) {
swapchain_create_info.minImageCount =
std::min(swapchain_create_info.minImageCount,
surface_capabilities.maxImageCount);
}
swapchain_create_info.imageFormat = swap_surface_format_.format;
swapchain_create_info.imageColorSpace = swap_surface_format_.colorSpace;
swapchain_create_info.imageArrayLayers = 1;
swapchain_create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// FIXME(Triang3l): Allow a separate queue for present - see
// vulkan_provider.cc for details.
swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapchain_create_info.queueFamilyIndexCount = 1;
swapchain_create_info.pQueueFamilyIndices =
&queue_family_graphics_compute;
// TODO(Triang3l): Support rotated transforms.
swapchain_create_info.preTransform =
VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
swapchain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
if (!(surface_capabilities.supportedCompositeAlpha &
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR)) {
if (surface_capabilities.supportedCompositeAlpha &
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) {
swapchain_create_info.compositeAlpha =
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
} else {
// Whatever. supportedCompositeAlpha must have at least one bit set,
// but if it somehow doesn't (impossible situation according to the
// specification, but who knows), just assume opaque.
uint32_t composite_alpha_bit_index;
if (xe::bit_scan_forward(
uint32_t(surface_capabilities.supportedCompositeAlpha),
&composite_alpha_bit_index)) {
swapchain_create_info.compositeAlpha = VkCompositeAlphaFlagBitsKHR(
uint32_t(1) << composite_alpha_bit_index);
}
}
}
swapchain_create_info.presentMode = swap_surface_present_mode_;
swapchain_create_info.clipped = VK_TRUE;
swapchain_create_info.oldSwapchain = swap_swapchain_;
VkResult swapchain_create_result = dfn.vkCreateSwapchainKHR(
device, &swapchain_create_info, nullptr, &swap_swapchain_);
// The old swapchain is retired even if vkCreateSwapchainKHR has failed.
if (swapchain_create_info.oldSwapchain != VK_NULL_HANDLE) {
dfn.vkDestroySwapchainKHR(device, swapchain_create_info.oldSwapchain,
nullptr);
}
if (swapchain_create_result != VK_SUCCESS) {
XELOGE("Failed to create a Vulkan swapchain");
swap_swapchain_ = VK_NULL_HANDLE;
return false;
}
swap_swapchain_extent_ = swapchain_create_info.imageExtent;
// The render pass is needed to create framebuffers for swapchain images.
// It depends on the surface format, and thus can be reused with different
// surfaces by different swapchains, so it has separate lifetime tracking.
// It's safe to fail now (though destroying the new swapchain), because
// the request to destroy the old VkSwapchain somehow (after retiring, or
// directly) has been fulfilled.
if (swap_render_pass_ == VK_NULL_HANDLE) {
VkAttachmentDescription render_pass_color_attachment;
render_pass_color_attachment.flags = 0;
render_pass_color_attachment.format = swap_surface_format_.format;
render_pass_color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
render_pass_color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
render_pass_color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
render_pass_color_attachment.stencilLoadOp =
VK_ATTACHMENT_LOAD_OP_DONT_CARE;
render_pass_color_attachment.stencilStoreOp =
VK_ATTACHMENT_STORE_OP_DONT_CARE;
render_pass_color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
render_pass_color_attachment.finalLayout =
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference render_pass_color_attachment_reference;
render_pass_color_attachment_reference.attachment = 0;
render_pass_color_attachment_reference.layout =
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription render_pass_subpass;
render_pass_subpass.flags = 0;
render_pass_subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
render_pass_subpass.inputAttachmentCount = 0;
render_pass_subpass.pInputAttachments = nullptr;
render_pass_subpass.colorAttachmentCount = 1;
render_pass_subpass.pColorAttachments =
&render_pass_color_attachment_reference;
render_pass_subpass.pResolveAttachments = nullptr;
render_pass_subpass.pDepthStencilAttachment = nullptr;
render_pass_subpass.preserveAttachmentCount = 0;
render_pass_subpass.pPreserveAttachments = nullptr;
VkRenderPassCreateInfo render_pass_create_info;
render_pass_create_info.sType =
VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
render_pass_create_info.pNext = nullptr;
render_pass_create_info.flags = 0;
render_pass_create_info.attachmentCount = 1;
render_pass_create_info.pAttachments = &render_pass_color_attachment;
render_pass_create_info.subpassCount = 1;
render_pass_create_info.pSubpasses = &render_pass_subpass;
render_pass_create_info.dependencyCount = 0;
render_pass_create_info.pDependencies = nullptr;
if (dfn.vkCreateRenderPass(device, &render_pass_create_info, nullptr,
&swap_render_pass_) != VK_SUCCESS) {
XELOGE("Failed to create the Vulkan presentation render pass.");
dfn.vkDestroySwapchainKHR(device, swap_swapchain_, nullptr);
swap_swapchain_ = VK_NULL_HANDLE;
return false;
}
}
std::vector<VkImage> swapchain_images;
uint32_t swapchain_image_count;
VkResult swapchain_images_get_result;
for (;;) {
swapchain_image_count = uint32_t(swapchain_images.size());
bool swapchain_images_was_empty = !swapchain_image_count;
swapchain_images_get_result = dfn.vkGetSwapchainImagesKHR(
device, swap_swapchain_, &swapchain_image_count,
swapchain_images_was_empty ? nullptr : swapchain_images.data());
// If the original swapchain image count was 0 (first call), SUCCESS is
// returned, not INCOMPLETE.
if (swapchain_images_get_result == VK_SUCCESS ||
swapchain_images_get_result == VK_INCOMPLETE) {
swapchain_images.resize(swapchain_image_count);
if (swapchain_images_get_result == VK_SUCCESS &&
(!swapchain_images_was_empty || !swapchain_image_count)) {
break;
}
} else {
break;
}
}
if (swapchain_images_get_result != VK_SUCCESS ||
swapchain_images.empty()) {
XELOGE("Failed to get Vulkan swapchain images");
dfn.vkDestroySwapchainKHR(device, swap_swapchain_, nullptr);
swap_swapchain_ = VK_NULL_HANDLE;
return false;
}
assert_true(swap_swapchain_image_views_.empty());
swap_swapchain_image_views_.reserve(swapchain_image_count);
assert_true(swap_swapchain_framebuffers_.empty());
swap_swapchain_framebuffers_.reserve(swapchain_image_count);
VkImageViewCreateInfo swapchain_image_view_create_info;
swapchain_image_view_create_info.sType =
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
swapchain_image_view_create_info.pNext = nullptr;
swapchain_image_view_create_info.flags = 0;
swapchain_image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
swapchain_image_view_create_info.format = swap_surface_format_.format;
swapchain_image_view_create_info.components.r =
VK_COMPONENT_SWIZZLE_IDENTITY;
swapchain_image_view_create_info.components.g =
VK_COMPONENT_SWIZZLE_IDENTITY;
swapchain_image_view_create_info.components.b =
VK_COMPONENT_SWIZZLE_IDENTITY;
swapchain_image_view_create_info.components.a =
VK_COMPONENT_SWIZZLE_IDENTITY;
swapchain_image_view_create_info.subresourceRange.aspectMask =
VK_IMAGE_ASPECT_COLOR_BIT;
swapchain_image_view_create_info.subresourceRange.baseMipLevel = 0;
swapchain_image_view_create_info.subresourceRange.levelCount = 1;
swapchain_image_view_create_info.subresourceRange.baseArrayLayer = 0;
swapchain_image_view_create_info.subresourceRange.layerCount = 1;
VkFramebufferCreateInfo swapchain_framebuffer_create_info;
swapchain_framebuffer_create_info.sType =
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
swapchain_framebuffer_create_info.pNext = nullptr;
swapchain_framebuffer_create_info.flags = 0;
swapchain_framebuffer_create_info.renderPass = swap_render_pass_;
swapchain_framebuffer_create_info.attachmentCount = 1;
swapchain_framebuffer_create_info.width = swap_swapchain_extent_.width;
swapchain_framebuffer_create_info.height = swap_swapchain_extent_.height;
swapchain_framebuffer_create_info.layers = 1;
for (uint32_t i = 0; i < swapchain_image_count; ++i) {
VkImage swapchain_image = swapchain_images[i];
swapchain_image_view_create_info.image = swapchain_image;
VkImageView swapchain_image_view;
if (dfn.vkCreateImageView(device, &swapchain_image_view_create_info,
nullptr,
&swapchain_image_view) != VK_SUCCESS) {
XELOGE("Failed to create Vulkan swapchain image views");
DestroySwapchainFramebuffers();
dfn.vkDestroySwapchainKHR(device, swap_swapchain_, nullptr);
swap_swapchain_ = VK_NULL_HANDLE;
return false;
}
swap_swapchain_image_views_.push_back(swapchain_image_view);
swapchain_framebuffer_create_info.pAttachments = &swapchain_image_view;
VkFramebuffer swapchain_framebuffer;
if (dfn.vkCreateFramebuffer(device, &swapchain_framebuffer_create_info,
nullptr,
&swapchain_framebuffer) != VK_SUCCESS) {
XELOGE("Failed to create Vulkan swapchain framebuffers");
DestroySwapchainFramebuffers();
dfn.vkDestroySwapchainKHR(device, swap_swapchain_, nullptr);
swap_swapchain_ = VK_NULL_HANDLE;
return false;
}
swap_swapchain_framebuffers_.push_back(swapchain_framebuffer);
}
}
if (swap_swapchain_ == VK_NULL_HANDLE) {
return false;
}
assert_true(swap_surface_ != VK_NULL_HANDLE);
assert_true(swap_render_pass_ != VK_NULL_HANDLE);
assert_false(swap_swapchain_image_views_.empty());
assert_false(swap_swapchain_framebuffers_.empty());
// Await the frame data to be available before doing anything else.
if (swap_submission_completed_ + kSwapchainMaxImageCount <
swap_submission_current_) {
uint64_t submission_awaited =
swap_submission_current_ - kSwapchainMaxImageCount;
VkFence submission_fences[kSwapchainMaxImageCount];
uint32_t submission_fence_count = 0;
while (swap_submission_completed_ + 1 + submission_fence_count <=
submission_awaited) {
assert_true(submission_fence_count < kSwapchainMaxImageCount);
uint32_t submission_index =
(swap_submission_completed_ + 1 + submission_fence_count) %
kSwapchainMaxImageCount;
submission_fences[submission_fence_count++] =
swap_submissions_[submission_index].fence;
}
if (submission_fence_count) {
if (dfn.vkWaitForFences(device, submission_fence_count,
submission_fences, VK_TRUE,
UINT64_MAX) != VK_SUCCESS) {
XELOGE("Failed to await the Vulkan presentation submission fences");
return false;
}
swap_submission_completed_ += submission_fence_count;
}
}
const SwapSubmission& submission =
swap_submissions_[swap_submission_current_ % kSwapchainMaxImageCount];
if (dfn.vkResetCommandPool(device, submission.command_pool, 0) !=
VK_SUCCESS) {
XELOGE("Failed to reset the Vulkan presentation command pool");
return false;
}
// After the image is acquired, this function must not fail before the
// semaphore has been signaled, and the image also must be returned to the
// swapchain.
uint32_t acquired_image_index;
switch (dfn.vkAcquireNextImageKHR(device, swap_swapchain_, UINT64_MAX,
swap_image_acquisition_semaphore_,
nullptr, &acquired_image_index)) {
case VK_SUCCESS:
case VK_SUBOPTIMAL_KHR:
// Not recreating in case of suboptimal, just to prevent a recreation
// loop in case the newly created swapchain is suboptimal too.
break;
case VK_ERROR_DEVICE_LOST:
context_lost_ = true;
return false;
case VK_ERROR_OUT_OF_DATE_KHR:
case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT:
swap_swapchain_or_surface_recreation_needed_ = true;
continue;
case VK_ERROR_SURFACE_LOST_KHR:
RequestSurfaceRecreation();
continue;
default:
return false;
}
swap_swapchain_image_current_ = acquired_image_index;
VkCommandBufferBeginInfo command_buffer_begin_info;
command_buffer_begin_info.sType =
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.pNext = nullptr;
command_buffer_begin_info.flags =
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
command_buffer_begin_info.pInheritanceInfo = nullptr;
dfn.vkBeginCommandBuffer(submission.command_buffer,
&command_buffer_begin_info);
VkClearValue clear_value;
GetClearColor(clear_value.color.float32);
VkRenderPassBeginInfo render_pass_begin_info;
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.pNext = nullptr;
render_pass_begin_info.renderPass = swap_render_pass_;
render_pass_begin_info.framebuffer =
swap_swapchain_framebuffers_[acquired_image_index];
render_pass_begin_info.renderArea.offset.x = 0;
render_pass_begin_info.renderArea.offset.y = 0;
render_pass_begin_info.renderArea.extent = swap_swapchain_extent_;
render_pass_begin_info.clearValueCount = 1;
render_pass_begin_info.pClearValues = &clear_value;
dfn.vkCmdBeginRenderPass(submission.command_buffer, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_INLINE);
return true;
}
// vkAcquireNextImageKHR returned VK_ERROR_OUT_OF_DATE_KHR even after
// recreation.
return false;
}
void VulkanContext::EndSwap() {
if (!target_window_ || context_lost_) {
return;
}
assert_true(swap_swapchain_image_current_ != UINT32_MAX);
if (swap_swapchain_image_current_ == UINT32_MAX) {
return;
}
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
VkQueue queue_graphics_compute = provider.queue_graphics_compute();
const SwapSubmission& submission =
swap_submissions_[swap_submission_current_ % kSwapchainMaxImageCount];
dfn.vkCmdEndRenderPass(submission.command_buffer);
dfn.vkEndCommandBuffer(submission.command_buffer);
dfn.vkResetFences(device, 1, &submission.fence);
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = nullptr;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &swap_image_acquisition_semaphore_;
VkPipelineStageFlags image_acquisition_semaphore_wait_stage =
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
submit_info.pWaitDstStageMask = &image_acquisition_semaphore_wait_stage;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &submission.command_buffer;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &swap_render_completion_semaphore_;
VkResult submit_result = dfn.vkQueueSubmit(queue_graphics_compute, 1,
&submit_info, submission.fence);
if (submit_result != VK_SUCCESS) {
// If failed, can't even return the swapchain image - so treat all errors as
// context loss.
context_lost_ = true;
return;
}
++swap_submission_current_;
VkPresentInfoKHR present_info;
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present_info.pNext = nullptr;
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &swap_render_completion_semaphore_;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swap_swapchain_;
present_info.pImageIndices = &swap_swapchain_image_current_;
present_info.pResults = nullptr;
// FIXME(Triang3l): Allow a separate queue for present - see
// vulkan_provider.cc for details.
VkResult present_result =
dfn.vkQueuePresentKHR(queue_graphics_compute, &present_info);
swap_swapchain_image_current_ = UINT32_MAX;
switch (present_result) {
case VK_SUCCESS:
case VK_SUBOPTIMAL_KHR:
// Not recreating in case of suboptimal, just to prevent a recreation
// loop in case the newly created swapchain is suboptimal too.
break;
case VK_ERROR_OUT_OF_DATE_KHR:
case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT:
swap_swapchain_or_surface_recreation_needed_ = true;
return;
case VK_ERROR_SURFACE_LOST_KHR:
// Safe to await submission completion now - swap_submission_current_ has
// already been incremented to the next frame.
RequestSurfaceRecreation();
return;
default:
// Treat any error as device loss since it would leave the semaphore
// forever signaled anyway, and the image won't be returned to the
// swapchain.
context_lost_ = true;
return;
}
}
std::unique_ptr<RawImage> VulkanContext::Capture() {
// TODO(Triang3l): Read back swap chain front buffer.
return nullptr;
}
void VulkanContext::RequestSurfaceRecreation() {
#if XE_PLATFORM_ANDROID
// The surface doesn't exist when the activity is in background.
swap_swapchain_or_surface_recreation_needed_ =
target_window_->native_handle() != nullptr;
#else
swap_swapchain_or_surface_recreation_needed_ = true;
#endif
if (swap_surface_ == VK_NULL_HANDLE) {
return;
}
AwaitAllSwapSubmissionsCompletion();
DestroySwapchainFramebuffers();
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::InstanceFunctions& ifn = provider.ifn();
VkInstance instance = provider.instance();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
util::DestroyAndNullHandle(dfn.vkDestroySwapchainKHR, device,
swap_swapchain_);
ifn.vkDestroySurfaceKHR(instance, swap_surface_, nullptr);
swap_surface_ = VK_NULL_HANDLE;
}
void VulkanContext::AwaitAllSwapSubmissionsCompletion() {
assert_not_null(target_window_);
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
VkFence fences[kSwapchainMaxImageCount];
uint32_t fence_count = 0;
while (swap_submission_completed_ + 1 < swap_submission_current_) {
assert_true(fence_count < kSwapchainMaxImageCount);
uint32_t submission_index =
++swap_submission_completed_ % kSwapchainMaxImageCount;
fences[fence_count++] = swap_submissions_[submission_index].fence;
}
if (fence_count && !context_lost_) {
dfn.vkWaitForFences(device, fence_count, fences, VK_TRUE, UINT64_MAX);
}
}
void VulkanContext::DestroySwapchainFramebuffers() {
assert_not_null(target_window_);
const VulkanProvider& provider = GetVulkanProvider();
const VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device();
for (VkFramebuffer framebuffer : swap_swapchain_framebuffers_) {
dfn.vkDestroyFramebuffer(device, framebuffer, nullptr);
}
swap_swapchain_framebuffers_.clear();
for (VkImageView image_view : swap_swapchain_image_views_) {
dfn.vkDestroyImageView(device, image_view, nullptr);
}
swap_swapchain_image_views_.clear();
}
} // namespace vulkan
} // namespace ui
} // namespace xe

View File

@ -10,7 +10,9 @@
#ifndef XENIA_UI_VULKAN_VULKAN_CONTEXT_H_
#define XENIA_UI_VULKAN_VULKAN_CONTEXT_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "xenia/ui/graphics_context.h"
#include "xenia/ui/vulkan/vulkan_immediate_drawer.h"
@ -24,19 +26,22 @@ class VulkanContext : public GraphicsContext {
public:
ImmediateDrawer* immediate_drawer() override;
// Returns true if the OS took away our context because we caused a TDR or
// some other outstanding error. When this happens, this context, as well as
// any other shared contexts are junk.
// This context must be made current in order for this call to work properly.
bool WasLost() override { return false; }
bool WasLost() override;
void BeginSwap() override;
bool BeginSwap() override;
void EndSwap() override;
std::unique_ptr<RawImage> Capture() override;
VulkanProvider* GetVulkanProvider() const {
return static_cast<VulkanProvider*>(provider_);
VulkanProvider& GetVulkanProvider() const {
return static_cast<VulkanProvider&>(*provider_);
}
void RequestSurfaceRecreation();
VkCommandBuffer GetSwapCommandBuffer() const {
return swap_submissions_[swap_submission_current_ % kSwapchainMaxImageCount]
.command_buffer;
}
private:
@ -45,6 +50,62 @@ class VulkanContext : public GraphicsContext {
bool Initialize();
private:
void Shutdown();
void AwaitAllSwapSubmissionsCompletion();
// AwaitAllSwapSubmissionsCompletion must be called before. As this can be
// used in swapchain creation or in shutdown,
// swap_swapchain_or_surface_recreation_needed_ won't be set by this.
void DestroySwapchainFramebuffers();
bool context_lost_ = false;
// Actual image count may be less, depending on what the surface can provide.
static constexpr uint32_t kSwapchainMaxImageCount = 3;
// Because of the nature of Vulkan fences (that they belong only to their
// specific submission, not the submission and all prior submissions), ALL
// fences since the last completed submission to the needed submission should
// individually be checked, not just the last one. However, this submission
// number abstraction hides the loosely ordered design of Vulkan submissions
// (it's okay to wait first for completion of A, then of B, no matter if they
// are actually completed in AB or in BA order).
struct SwapSubmission {
// One pool per frame, with resetting the pool itself rather than individual
// command buffers (resetting command buffers themselves is not recommended
// by Arm since it makes the pool unable to use a single big allocation), as
// recommended by Nvidia (Direct3D 12-like way):
// https://developer.nvidia.com/sites/default/files/akamai/gameworks/blog/munich/mschott_vulkan_multi_threading.pdf
VkFence fence = VK_NULL_HANDLE;
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer;
};
SwapSubmission swap_submissions_[kSwapchainMaxImageCount];
uint64_t swap_submission_current_ = 1;
uint64_t swap_submission_completed_ = 0;
VkSemaphore swap_image_acquisition_semaphore_ = VK_NULL_HANDLE;
VkSemaphore swap_render_completion_semaphore_ = VK_NULL_HANDLE;
VkSurfaceKHR swap_surface_ = VK_NULL_HANDLE;
VkSurfaceFormatKHR swap_surface_format_ = {VK_FORMAT_UNDEFINED,
VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
VkPresentModeKHR swap_surface_present_mode_;
VkRenderPass swap_render_pass_ = VK_NULL_HANDLE;
VkSwapchainKHR swap_swapchain_ = VK_NULL_HANDLE;
VkExtent2D swap_swapchain_extent_;
std::vector<VkImageView> swap_swapchain_image_views_;
std::vector<VkFramebuffer> swap_swapchain_framebuffers_;
uint32_t swap_swapchain_image_current_ = UINT32_MAX;
// Attempts to recreate the swapchain will only be made in BeginSwap if this
// is true (set when something relevant is changed), so if creation fails,
// there won't be attempts every frame again and again.
bool swap_swapchain_or_surface_recreation_needed_ = false;
std::unique_ptr<VulkanImmediateDrawer> immediate_drawer_ = nullptr;
};

View File

@ -9,14 +9,26 @@
#include "xenia/ui/vulkan/vulkan_immediate_drawer.h"
#include "xenia/ui/vulkan/vulkan_context.h"
namespace xe {
namespace ui {
namespace vulkan {
class VulkanImmediateTexture : public ImmediateTexture {
public:
VulkanImmediateTexture(uint32_t width, uint32_t height)
: ImmediateTexture(width, height) {}
};
VulkanImmediateDrawer::VulkanImmediateDrawer(VulkanContext& graphics_context)
: ImmediateDrawer(&graphics_context), context_(graphics_context) {}
std::unique_ptr<ImmediateTexture> VulkanImmediateDrawer::CreateTexture(
uint32_t width, uint32_t height, ImmediateTextureFilter filter, bool repeat,
const uint8_t* data) {
return nullptr;
auto texture = std::make_unique<VulkanImmediateTexture>(width, height);
return std::unique_ptr<ImmediateTexture>(texture.release());
}
void VulkanImmediateDrawer::UpdateTexture(ImmediateTexture* texture,

View File

@ -16,8 +16,12 @@ namespace xe {
namespace ui {
namespace vulkan {
class VulkanContext;
class VulkanImmediateDrawer : public ImmediateDrawer {
public:
VulkanImmediateDrawer(VulkanContext& graphics_context);
std::unique_ptr<ImmediateTexture> CreateTexture(uint32_t width,
uint32_t height,
ImmediateTextureFilter filter,
@ -30,6 +34,9 @@ class VulkanImmediateDrawer : public ImmediateDrawer {
void Draw(const ImmediateDraw& draw) override;
void EndDrawBatch() override;
void End() override;
private:
VulkanContext& context_;
};
} // namespace vulkan

View File

@ -24,6 +24,12 @@
#include "xenia/base/platform_win.h"
#endif
// TODO(Triang3l): Disable Vulkan validation before releasing a stable version.
DEFINE_bool(
vulkan_validation, true,
"Enable Vulkan validation (VK_LAYER_KHRONOS_validation). Messages will be "
"written to the OS debug log.",
"GPU");
DEFINE_int32(
vulkan_device, -1,
"Index of the physical device to use, or -1 for any compatible device.",
@ -55,10 +61,10 @@ VulkanProvider::VulkanProvider(Window* main_window)
VulkanProvider::~VulkanProvider() {
if (device_ != VK_NULL_HANDLE) {
ifn_.destroyDevice(device_, nullptr);
ifn_.vkDestroyDevice(device_, nullptr);
}
if (instance_ != VK_NULL_HANDLE) {
destroyInstance_(instance_, nullptr);
lfn_.vkDestroyInstance(instance_, nullptr);
}
#if XE_PLATFORM_LINUX
@ -74,6 +80,7 @@ VulkanProvider::~VulkanProvider() {
bool VulkanProvider::Initialize() {
// Load the library.
bool library_functions_loaded = true;
#if XE_PLATFORM_LINUX
#if XE_PLATFORM_ANDROID
const char* libvulkan_name = "libvulkan.so";
@ -86,61 +93,46 @@ bool VulkanProvider::Initialize() {
XELOGE("Failed to load {}", libvulkan_name);
return false;
}
getInstanceProcAddr_ =
PFN_vkGetInstanceProcAddr(dlsym(library_, "vkGetInstanceProcAddr"));
destroyInstance_ =
PFN_vkDestroyInstance(dlsym(library_, "vkDestroyInstance"));
if (!getInstanceProcAddr_ || !destroyInstance_) {
XELOGE("Failed to get vkGetInstanceProcAddr and vkDestroyInstance from {}",
libvulkan_name);
return false;
}
#define XE_VULKAN_LOAD_MODULE_LFN(name) \
library_functions_loaded &= \
(lfn_.name = PFN_##name(dlsym(library_, #name))) != nullptr;
#elif XE_PLATFORM_WIN32
library_ = LoadLibraryA("vulkan-1.dll");
if (!library_) {
XELOGE("Failed to load vulkan-1.dll");
return false;
}
getInstanceProcAddr_ = PFN_vkGetInstanceProcAddr(
GetProcAddress(library_, "vkGetInstanceProcAddr"));
destroyInstance_ =
PFN_vkDestroyInstance(GetProcAddress(library_, "vkDestroyInstance"));
if (!getInstanceProcAddr_ || !destroyInstance_) {
XELOGE(
"Failed to get vkGetInstanceProcAddr and vkDestroyInstance from "
"vulkan-1.dll");
return false;
}
#define XE_VULKAN_LOAD_MODULE_LFN(name) \
library_functions_loaded &= \
(lfn_.name = PFN_##name(GetProcAddress(library_, #name))) != nullptr;
#else
#error No Vulkan library loading provided for the target platform.
#endif
assert_not_null(getInstanceProcAddr_);
assert_not_null(destroyInstance_);
bool library_functions_loaded = true;
library_functions_loaded &=
(library_functions_.createInstance = PFN_vkCreateInstance(
getInstanceProcAddr_(VK_NULL_HANDLE, "vkCreateInstance"))) !=
nullptr;
library_functions_loaded &=
(library_functions_.enumerateInstanceExtensionProperties =
PFN_vkEnumerateInstanceExtensionProperties(getInstanceProcAddr_(
VK_NULL_HANDLE, "vkEnumerateInstanceExtensionProperties"))) !=
nullptr;
XE_VULKAN_LOAD_MODULE_LFN(vkGetInstanceProcAddr);
XE_VULKAN_LOAD_MODULE_LFN(vkDestroyInstance);
#undef XE_VULKAN_LOAD_MODULE_LFN
if (!library_functions_loaded) {
XELOGE("Failed to get Vulkan library function pointers");
return false;
}
library_functions_.enumerateInstanceVersion_1_1 =
PFN_vkEnumerateInstanceVersion(
getInstanceProcAddr_(VK_NULL_HANDLE, "vkEnumerateInstanceVersion"));
library_functions_loaded &=
(lfn_.vkCreateInstance = PFN_vkCreateInstance(lfn_.vkGetInstanceProcAddr(
VK_NULL_HANDLE, "vkCreateInstance"))) != nullptr;
if (!library_functions_loaded) {
XELOGE(
"Failed to get Vulkan library function pointers via "
"vkGetInstanceProcAddr");
return false;
}
lfn_.v_1_1.vkEnumerateInstanceVersion = PFN_vkEnumerateInstanceVersion(
lfn_.vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceVersion"));
// Get the API version.
const uint32_t api_version_target = VK_MAKE_VERSION(1, 2, 148);
static_assert(VK_HEADER_VERSION_COMPLETE >= api_version_target,
"Vulkan header files must be up to date");
if (!library_functions_.enumerateInstanceVersion_1_1 ||
library_functions_.enumerateInstanceVersion_1_1(&api_version_) !=
VK_SUCCESS) {
if (!lfn_.v_1_1.vkEnumerateInstanceVersion ||
lfn_.v_1_1.vkEnumerateInstanceVersion(&api_version_) != VK_SUCCESS) {
api_version_ = VK_API_VERSION_1_0;
}
XELOGVK("Vulkan instance version {}.{}.{}", VK_VERSION_MAJOR(api_version_),
@ -173,66 +165,59 @@ bool VulkanProvider::Initialize() {
instance_create_info.pNext = nullptr;
instance_create_info.flags = 0;
instance_create_info.pApplicationInfo = &application_info;
// TODO(Triang3l): Enable the validation layer.
instance_create_info.enabledLayerCount = 0;
instance_create_info.ppEnabledLayerNames = nullptr;
static const char* validation_layer = "VK_LAYER_KHRONOS_validation";
if (cvars::vulkan_validation) {
instance_create_info.enabledLayerCount = 1;
instance_create_info.ppEnabledLayerNames = &validation_layer;
} else {
instance_create_info.enabledLayerCount = 0;
instance_create_info.ppEnabledLayerNames = nullptr;
}
instance_create_info.enabledExtensionCount =
uint32_t(instance_extensions_enabled.size());
instance_create_info.ppEnabledExtensionNames =
instance_extensions_enabled.data();
if (library_functions_.createInstance(&instance_create_info, nullptr,
&instance_) != VK_SUCCESS) {
XELOGE("Failed to create a Vulkan instance with surface support");
return false;
VkResult instance_create_result =
lfn_.vkCreateInstance(&instance_create_info, nullptr, &instance_);
if (instance_create_result != VK_SUCCESS) {
if (instance_create_result == VK_ERROR_LAYER_NOT_PRESENT) {
XELOGE("Failed to enable the Vulkan validation layer");
instance_create_info.enabledLayerCount = 0;
instance_create_info.ppEnabledLayerNames = nullptr;
instance_create_result =
lfn_.vkCreateInstance(&instance_create_info, nullptr, &instance_);
}
if (instance_create_result != VK_SUCCESS) {
XELOGE("Failed to create a Vulkan instance with surface support");
return false;
}
}
// Get instance functions.
bool instance_functions_loaded = true;
instance_functions_loaded &=
(ifn_.createDevice = PFN_vkCreateDevice(
getInstanceProcAddr_(instance_, "vkCreateDevice"))) != nullptr;
instance_functions_loaded &=
(ifn_.destroyDevice = PFN_vkDestroyDevice(
getInstanceProcAddr_(instance_, "vkDestroyDevice"))) != nullptr;
instance_functions_loaded &=
(ifn_.enumerateDeviceExtensionProperties =
PFN_vkEnumerateDeviceExtensionProperties(getInstanceProcAddr_(
instance_, "vkEnumerateDeviceExtensionProperties"))) != nullptr;
instance_functions_loaded &=
(ifn_.enumeratePhysicalDevices = PFN_vkEnumeratePhysicalDevices(
getInstanceProcAddr_(instance_, "vkEnumeratePhysicalDevices"))) !=
nullptr;
instance_functions_loaded &=
(ifn_.getDeviceProcAddr = PFN_vkGetDeviceProcAddr(
getInstanceProcAddr_(instance_, "vkGetDeviceProcAddr"))) != nullptr;
instance_functions_loaded &=
(ifn_.getPhysicalDeviceFeatures = PFN_vkGetPhysicalDeviceFeatures(
getInstanceProcAddr_(instance_, "vkGetPhysicalDeviceFeatures"))) !=
nullptr;
instance_functions_loaded &=
(ifn_.getPhysicalDeviceProperties = PFN_vkGetPhysicalDeviceProperties(
getInstanceProcAddr_(instance_, "vkGetPhysicalDeviceProperties"))) !=
nullptr;
instance_functions_loaded &=
(ifn_.getPhysicalDeviceQueueFamilyProperties =
PFN_vkGetPhysicalDeviceQueueFamilyProperties(getInstanceProcAddr_(
instance_, "vkGetPhysicalDeviceQueueFamilyProperties"))) !=
nullptr;
instance_functions_loaded &=
(ifn_.getPhysicalDeviceSurfaceSupportKHR =
PFN_vkGetPhysicalDeviceSurfaceSupportKHR(getInstanceProcAddr_(
instance_, "vkGetPhysicalDeviceSurfaceSupportKHR"))) != nullptr;
#define XE_VULKAN_LOAD_IFN(name) \
instance_functions_loaded &= \
(ifn_.name = PFN_##name( \
lfn_.vkGetInstanceProcAddr(instance_, #name))) != nullptr;
XE_VULKAN_LOAD_IFN(vkCreateDevice);
XE_VULKAN_LOAD_IFN(vkDestroyDevice);
XE_VULKAN_LOAD_IFN(vkDestroySurfaceKHR);
XE_VULKAN_LOAD_IFN(vkEnumerateDeviceExtensionProperties);
XE_VULKAN_LOAD_IFN(vkEnumeratePhysicalDevices);
XE_VULKAN_LOAD_IFN(vkGetDeviceProcAddr);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceFeatures);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceProperties);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceQueueFamilyProperties);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceSurfaceCapabilitiesKHR);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceSurfaceFormatsKHR);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceSurfacePresentModesKHR);
XE_VULKAN_LOAD_IFN(vkGetPhysicalDeviceSurfaceSupportKHR);
#if XE_PLATFORM_ANDROID
instance_functions_loaded &=
(ifn_.createAndroidSurfaceKHR = PFN_vkCreateAndroidSurfaceKHR(
getInstanceProcAddr_(instance_, "vkCreateAndroidSurfaceKHR"))) !=
nullptr;
XE_VULKAN_LOAD_IFN(vkCreateAndroidSurfaceKHR);
#elif XE_PLATFORM_WIN32
instance_functions_loaded &=
(ifn_.createWin32SurfaceKHR = PFN_vkCreateWin32SurfaceKHR(
getInstanceProcAddr_(instance_, "vkCreateWin32SurfaceKHR"))) !=
nullptr;
XE_VULKAN_LOAD_IFN(vkCreateWin32SurfaceKHR);
#endif
#undef XE_VULKAN_LOAD_IFN
if (!instance_functions_loaded) {
XELOGE("Failed to get Vulkan instance function pointers");
return false;
@ -242,8 +227,8 @@ bool VulkanProvider::Initialize() {
std::vector<VkPhysicalDevice> physical_devices;
for (;;) {
uint32_t physical_device_count = uint32_t(physical_devices.size());
bool physical_devices_was_empty = physical_devices.empty();
VkResult physical_device_enumerate_result = ifn_.enumeratePhysicalDevices(
bool physical_devices_was_empty = !physical_device_count;
VkResult physical_device_enumerate_result = ifn_.vkEnumeratePhysicalDevices(
instance_, &physical_device_count,
physical_devices_was_empty ? nullptr : physical_devices.data());
// If the original device count was 0 (first call), SUCCESS is returned, not
@ -288,7 +273,8 @@ bool VulkanProvider::Initialize() {
VkPhysicalDevice physical_device_current = physical_devices[i];
// Get physical device features and check if the needed ones are supported.
ifn_.getPhysicalDeviceFeatures(physical_device_current, &device_features_);
ifn_.vkGetPhysicalDeviceFeatures(physical_device_current,
&device_features_);
// TODO(Triang3l): Make geometry shaders optional by providing compute
// shader fallback (though that would require vertex shader stores).
if (!device_features_.geometryShader) {
@ -299,10 +285,10 @@ bool VulkanProvider::Initialize() {
// (preferably the same for the least latency between the two, as Xenia
// submits sparse binding commands right before graphics commands anyway).
uint32_t queue_family_count = 0;
ifn_.getPhysicalDeviceQueueFamilyProperties(physical_device_current,
&queue_family_count, nullptr);
ifn_.vkGetPhysicalDeviceQueueFamilyProperties(physical_device_current,
&queue_family_count, nullptr);
queue_families.resize(queue_family_count);
ifn_.getPhysicalDeviceQueueFamilyProperties(
ifn_.vkGetPhysicalDeviceQueueFamilyProperties(
physical_device_current, &queue_family_count, queue_families.data());
assert_true(queue_family_count == queue_families.size());
queue_family_graphics_compute_ = UINT32_MAX;
@ -364,9 +350,9 @@ bool VulkanProvider::Initialize() {
for (;;) {
uint32_t device_extension_count =
uint32_t(device_extension_properties.size());
bool device_extensions_was_empty = device_extension_properties.empty();
bool device_extensions_was_empty = !device_extension_count;
device_extensions_enumerate_result =
ifn_.enumerateDeviceExtensionProperties(
ifn_.vkEnumerateDeviceExtensionProperties(
physical_device_current, nullptr, &device_extension_count,
device_extensions_was_empty ? nullptr
: device_extension_properties.data());
@ -411,7 +397,7 @@ bool VulkanProvider::Initialize() {
"support");
return false;
}
ifn_.getPhysicalDeviceProperties(physical_device_, &device_properties_);
ifn_.vkGetPhysicalDeviceProperties(physical_device_, &device_properties_);
XELOGVK(
"Vulkan device: {} (vendor {:04X}, device {:04X}, driver {:08X}, API "
"{}.{}.{})",
@ -454,7 +440,7 @@ bool VulkanProvider::Initialize() {
device_create_info.queueCreateInfoCount =
separate_sparse_binding_queue ? 2 : 1;
device_create_info.pQueueCreateInfos = queue_create_infos;
// TODO(Triang3l): Enable the validation layer.
// Device layers are deprecated - using validation layer on the instance.
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = nullptr;
device_create_info.enabledExtensionCount =
@ -462,29 +448,58 @@ bool VulkanProvider::Initialize() {
device_create_info.ppEnabledExtensionNames = device_extensions_enabled.data();
// TODO(Triang3l): Enable only needed features.
device_create_info.pEnabledFeatures = &device_features_;
if (ifn_.createDevice(physical_device_, &device_create_info, nullptr,
&device_) != VK_SUCCESS) {
if (ifn_.vkCreateDevice(physical_device_, &device_create_info, nullptr,
&device_) != VK_SUCCESS) {
XELOGE("Failed to create a Vulkan device");
return false;
}
// Get device functions.
bool device_functions_loaded = true;
device_functions_loaded &=
(dfn_.getDeviceQueue = PFN_vkGetDeviceQueue(
ifn_.getDeviceProcAddr(device_, "vkGetDeviceQueue"))) != nullptr;
#define XE_VULKAN_LOAD_DFN(name) \
device_functions_loaded &= \
(dfn_.name = PFN_##name(ifn_.vkGetDeviceProcAddr(device_, #name))) != \
nullptr;
XE_VULKAN_LOAD_DFN(vkAcquireNextImageKHR);
XE_VULKAN_LOAD_DFN(vkAllocateCommandBuffers);
XE_VULKAN_LOAD_DFN(vkBeginCommandBuffer);
XE_VULKAN_LOAD_DFN(vkCmdBeginRenderPass);
XE_VULKAN_LOAD_DFN(vkCmdEndRenderPass);
XE_VULKAN_LOAD_DFN(vkCreateCommandPool);
XE_VULKAN_LOAD_DFN(vkCreateFence);
XE_VULKAN_LOAD_DFN(vkCreateFramebuffer);
XE_VULKAN_LOAD_DFN(vkCreateImageView);
XE_VULKAN_LOAD_DFN(vkCreateRenderPass);
XE_VULKAN_LOAD_DFN(vkCreateSemaphore);
XE_VULKAN_LOAD_DFN(vkCreateSwapchainKHR);
XE_VULKAN_LOAD_DFN(vkDestroyCommandPool);
XE_VULKAN_LOAD_DFN(vkDestroyFence);
XE_VULKAN_LOAD_DFN(vkDestroyFramebuffer);
XE_VULKAN_LOAD_DFN(vkDestroyImageView);
XE_VULKAN_LOAD_DFN(vkDestroyRenderPass);
XE_VULKAN_LOAD_DFN(vkDestroySemaphore);
XE_VULKAN_LOAD_DFN(vkDestroySwapchainKHR);
XE_VULKAN_LOAD_DFN(vkEndCommandBuffer);
XE_VULKAN_LOAD_DFN(vkGetDeviceQueue);
XE_VULKAN_LOAD_DFN(vkGetSwapchainImagesKHR);
XE_VULKAN_LOAD_DFN(vkResetCommandPool);
XE_VULKAN_LOAD_DFN(vkResetFences);
XE_VULKAN_LOAD_DFN(vkQueuePresentKHR);
XE_VULKAN_LOAD_DFN(vkQueueSubmit);
XE_VULKAN_LOAD_DFN(vkWaitForFences);
#undef XE_VULKAN_LOAD_DFN
if (!device_functions_loaded) {
XELOGE("Failed to get Vulkan device function pointers");
return false;
}
// Get the queues.
dfn_.getDeviceQueue(device_, queue_family_graphics_compute_, 0,
&queue_graphics_compute_);
dfn_.vkGetDeviceQueue(device_, queue_family_graphics_compute_, 0,
&queue_graphics_compute_);
if (queue_family_sparse_binding != UINT32_MAX) {
if (separate_sparse_binding_queue) {
dfn_.getDeviceQueue(device_, queue_family_sparse_binding, 0,
&queue_sparse_binding_);
dfn_.vkGetDeviceQueue(device_, queue_family_sparse_binding, 0,
&queue_sparse_binding_);
} else {
queue_sparse_binding_ = queue_graphics_compute_;
}

View File

@ -24,7 +24,7 @@
#ifndef VK_USE_PLATFORM_WIN32_KHR
#define VK_USE_PLATFORM_WIN32_KHR 1
#endif
#endif // XE_PLATFORM_WIN32
#endif
#ifndef VK_NO_PROTOTYPES
#define VK_NO_PROTOTYPES 1
@ -47,37 +47,45 @@ class VulkanProvider : public GraphicsProvider {
Window* target_window) override;
std::unique_ptr<GraphicsContext> CreateOffscreenContext() override;
// Functions with a version suffix (like _1_1) are null when api_version() is
// below this version.
struct LibraryFunctions {
PFN_vkCreateInstance createInstance;
PFN_vkEnumerateInstanceExtensionProperties
enumerateInstanceExtensionProperties;
PFN_vkEnumerateInstanceVersion enumerateInstanceVersion_1_1;
// From the module.
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
PFN_vkDestroyInstance vkDestroyInstance;
// From vkGetInstanceProcAddr.
PFN_vkCreateInstance vkCreateInstance;
struct {
PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;
} v_1_1;
};
const LibraryFunctions& library_functions() const {
return library_functions_;
}
const LibraryFunctions& lfn() const { return lfn_; }
uint32_t api_version() const { return api_version_; }
VkInstance instance() const { return instance_; }
struct InstanceFunctions {
PFN_vkCreateDevice createDevice;
PFN_vkDestroyDevice destroyDevice;
PFN_vkEnumerateDeviceExtensionProperties enumerateDeviceExtensionProperties;
PFN_vkEnumeratePhysicalDevices enumeratePhysicalDevices;
PFN_vkGetDeviceProcAddr getDeviceProcAddr;
PFN_vkGetPhysicalDeviceFeatures getPhysicalDeviceFeatures;
PFN_vkGetPhysicalDeviceProperties getPhysicalDeviceProperties;
PFN_vkCreateDevice vkCreateDevice;
PFN_vkDestroyDevice vkDestroyDevice;
PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;
PFN_vkEnumerateDeviceExtensionProperties
vkEnumerateDeviceExtensionProperties;
PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;
PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
PFN_vkGetPhysicalDeviceQueueFamilyProperties
getPhysicalDeviceQueueFamilyProperties;
PFN_vkGetPhysicalDeviceSurfaceSupportKHR getPhysicalDeviceSurfaceSupportKHR;
vkGetPhysicalDeviceQueueFamilyProperties;
PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
PFN_vkGetPhysicalDeviceSurfaceFormatsKHR
vkGetPhysicalDeviceSurfaceFormatsKHR;
PFN_vkGetPhysicalDeviceSurfacePresentModesKHR
vkGetPhysicalDeviceSurfacePresentModesKHR;
PFN_vkGetPhysicalDeviceSurfaceSupportKHR
vkGetPhysicalDeviceSurfaceSupportKHR;
#if XE_PLATFORM_ANDROID
PFN_vkCreateAndroidSurfaceKHR createAndroidSurfaceKHR;
PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;
#elif XE_PLATFORM_WIN32
PFN_vkCreateWin32SurfaceKHR createWin32SurfaceKHR;
PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;
#endif
};
const InstanceFunctions& ifn() const { return ifn_; }
@ -103,7 +111,33 @@ class VulkanProvider : public GraphicsProvider {
VkDevice device() const { return device_; }
struct DeviceFunctions {
PFN_vkGetDeviceQueue getDeviceQueue;
PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;
PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;
PFN_vkBeginCommandBuffer vkBeginCommandBuffer;
PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;
PFN_vkCmdEndRenderPass vkCmdEndRenderPass;
PFN_vkCreateCommandPool vkCreateCommandPool;
PFN_vkCreateFence vkCreateFence;
PFN_vkCreateFramebuffer vkCreateFramebuffer;
PFN_vkCreateImageView vkCreateImageView;
PFN_vkCreateRenderPass vkCreateRenderPass;
PFN_vkCreateSemaphore vkCreateSemaphore;
PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;
PFN_vkDestroyCommandPool vkDestroyCommandPool;
PFN_vkDestroyFence vkDestroyFence;
PFN_vkDestroyFramebuffer vkDestroyFramebuffer;
PFN_vkDestroyImageView vkDestroyImageView;
PFN_vkDestroyRenderPass vkDestroyRenderPass;
PFN_vkDestroySemaphore vkDestroySemaphore;
PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;
PFN_vkEndCommandBuffer vkEndCommandBuffer;
PFN_vkGetDeviceQueue vkGetDeviceQueue;
PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
PFN_vkResetCommandPool vkResetCommandPool;
PFN_vkResetFences vkResetFences;
PFN_vkQueuePresentKHR vkQueuePresentKHR;
PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkWaitForFences vkWaitForFences;
};
const DeviceFunctions& dfn() const { return dfn_; }
@ -122,9 +156,7 @@ class VulkanProvider : public GraphicsProvider {
HMODULE library_ = nullptr;
#endif
PFN_vkGetInstanceProcAddr getInstanceProcAddr_ = nullptr;
PFN_vkDestroyInstance destroyInstance_ = nullptr;
LibraryFunctions library_functions_ = {};
LibraryFunctions lfn_ = {};
uint32_t api_version_ = VK_API_VERSION_1_0;

View File

@ -200,11 +200,15 @@ void Window::OnPaint(UIEvent* e) {
io.DisplaySize = ImVec2(static_cast<float>(scaled_width()),
static_cast<float>(scaled_height()));
context_->BeginSwap();
bool can_swap = context_->BeginSwap();
if (context_->WasLost()) {
on_context_lost(e);
return;
}
if (!can_swap) {
// Surface not available.
return;
}
ImGui::NewFrame();

1
third_party/volk vendored

@ -1 +0,0 @@
Subproject commit 30a851b67e129a3d91f191b2e9dcdad65ba98438

30
third_party/volk.lua vendored
View File

@ -1,30 +0,0 @@
group("third_party")
project("volk")
uuid("C9781C93-2DF5-47A2-94EE-2C5EBED61239")
kind("StaticLib")
language("C")
defines({
"_LIB",
"API_NAME=\"vulkan\"",
})
removedefines({
"_UNICODE",
"UNICODE",
})
includedirs({
"volk",
})
files({
"volk/volk.c",
"volk/volk.h",
})
filter("platforms:Windows")
defines({
"VK_USE_PLATFORM_WIN32_KHR",
})
filter("platforms:Linux")
defines({
"VK_USE_PLATFORM_XCB_KHR",
})