Formatting fixes only

- Mostly remove camelcase usage in old code.
- Properly annotate vulkan API imports with _vk prefix to make it clear they are not regular variables.
This commit is contained in:
kd-11 2021-02-22 22:54:25 +03:00 committed by kd-11
parent 3063369322
commit d459da1378
7 changed files with 52 additions and 52 deletions

View File

@ -835,7 +835,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
info.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
info.buffer = m_cond_render_buffer->value;
m_device->cmdBeginConditionalRenderingEXT(*m_current_command_buffer, &info);
m_device->_vkCmdBeginConditionalRenderingEXT(*m_current_command_buffer, &info);
m_current_command_buffer->flags |= vk::command_buffer::cb_has_conditional_render;
}
}
@ -1029,7 +1029,7 @@ void VKGSRender::end()
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
{
m_device->cmdEndConditionalRenderingEXT(*m_current_command_buffer);
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
m_current_command_buffer->flags &= ~(vk::command_buffer::cb_has_conditional_render);
}

View File

@ -1971,7 +1971,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
{
ensure(m_render_pass_open);
m_device->cmdEndConditionalRenderingEXT(*m_current_command_buffer);
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
}
#endif

View File

@ -87,11 +87,11 @@ namespace vk
u32 memory_type_index = memory_map.host_visible_coherent;
VkFlags access_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
ensure(memory_map.getMemoryHostPointerPropertiesEXT);
ensure(memory_map._vkGetMemoryHostPointerPropertiesEXT);
VkMemoryHostPointerPropertiesEXT memory_properties{};
memory_properties.sType = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT;
CHECK_RESULT(memory_map.getMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
CHECK_RESULT(memory_map._vkGetMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);

View File

@ -44,9 +44,9 @@ namespace vk
features2.pNext = &driver_properties;
}
auto getPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
ensure(getPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
getPhysicalDeviceFeatures2KHR(dev, &features2);
auto _vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
ensure(_vkGetPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
_vkGetPhysicalDeviceFeatures2KHR(dev, &features2);
shader_types_support.allow_float64 = !!features2.features.shaderFloat64;
shader_types_support.allow_float16 = !!shader_support_info.shaderFloat16;
@ -417,8 +417,8 @@ namespace vk
// Import optional function endpoints
if (pgpu->conditional_render_support)
{
cmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdBeginConditionalRenderingEXT"));
cmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdEndConditionalRenderingEXT"));
_vkCmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdBeginConditionalRenderingEXT"));
_vkCmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdEndConditionalRenderingEXT"));
}
memory_map = vk::get_memory_mapping(pdev);
@ -427,7 +427,7 @@ namespace vk
if (pgpu->external_memory_host_support)
{
memory_map.getMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(vkGetDeviceProcAddr(dev, "vkGetMemoryHostPointerPropertiesEXT"));
memory_map._vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(vkGetDeviceProcAddr(dev, "vkGetMemoryHostPointerPropertiesEXT"));
}
if (g_cfg.video.disable_vulkan_mem_allocator)

View File

@ -31,7 +31,7 @@ namespace vk
u32 host_visible_coherent;
u32 device_local;
PFN_vkGetMemoryHostPointerPropertiesEXT getMemoryHostPointerPropertiesEXT;
PFN_vkGetMemoryHostPointerPropertiesEXT _vkGetMemoryHostPointerPropertiesEXT;
};
class physical_device
@ -99,8 +99,8 @@ namespace vk
public:
// Exported device endpoints
PFN_vkCmdBeginConditionalRenderingEXT cmdBeginConditionalRenderingEXT = nullptr;
PFN_vkCmdEndConditionalRenderingEXT cmdEndConditionalRenderingEXT = nullptr;
PFN_vkCmdBeginConditionalRenderingEXT _vkCmdBeginConditionalRenderingEXT = nullptr;
PFN_vkCmdEndConditionalRenderingEXT _vkCmdEndConditionalRenderingEXT = nullptr;
public:
render_device() = default;

View File

@ -59,8 +59,8 @@ namespace vk
VkInstance m_instance = VK_NULL_HANDLE;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
PFN_vkDestroyDebugReportCallbackEXT destroyDebugReportCallback = nullptr;
PFN_vkCreateDebugReportCallbackEXT createDebugReportCallback = nullptr;
PFN_vkDestroyDebugReportCallbackEXT _vkDestroyDebugReportCallback = nullptr;
PFN_vkCreateDebugReportCallbackEXT _vkCreateDebugReportCallback = nullptr;
VkDebugReportCallbackEXT m_debugger = nullptr;
bool extensions_loaded = false;
@ -83,7 +83,7 @@ namespace vk
if (m_debugger)
{
destroyDebugReportCallback(m_instance, m_debugger, nullptr);
_vkDestroyDebugReportCallback(m_instance, m_debugger, nullptr);
m_debugger = nullptr;
}
@ -103,15 +103,15 @@ namespace vk
PFN_vkDebugReportCallbackEXT callback = vk::dbgFunc;
createDebugReportCallback = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkCreateDebugReportCallbackEXT"));
destroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkDestroyDebugReportCallbackEXT"));
_vkCreateDebugReportCallback = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkCreateDebugReportCallbackEXT"));
_vkDestroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkDestroyDebugReportCallbackEXT"));
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo = {};
dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
dbgCreateInfo.pfnCallback = callback;
dbgCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
CHECK_RESULT(createDebugReportCallback(m_instance, &dbgCreateInfo, NULL, &m_debugger));
CHECK_RESULT(_vkCreateDebugReportCallback(m_instance, &dbgCreateInfo, NULL, &m_debugger));
}
#ifdef __clang__
#pragma clang diagnostic push
@ -217,7 +217,7 @@ namespace vk
// Register some global states
if (m_debugger)
{
destroyDebugReportCallback(m_instance, m_debugger, nullptr);
_vkDestroyDebugReportCallback(m_instance, m_debugger, nullptr);
m_debugger = nullptr;
}
@ -315,9 +315,9 @@ namespace vk
vkGetPhysicalDeviceSurfaceSupportKHR(dev, index, m_surface, &supports_present[index]);
}
u32 graphicsQueueNodeIndex = UINT32_MAX;
u32 presentQueueNodeIndex = UINT32_MAX;
u32 transferQueueNodeIndex = UINT32_MAX;
u32 graphics_queue_idx = UINT32_MAX;
u32 present_queue_idx = UINT32_MAX;
u32 transfer_queue_idx = UINT32_MAX;
auto test_queue_family = [&](u32 index, u32 desired_flags)
{
@ -333,37 +333,37 @@ namespace vk
for (u32 i = 0; i < device_queues; ++i)
{
// 1. Test for a present queue possibly one that also supports present
if (presentQueueNodeIndex == UINT32_MAX && supports_present[i])
if (present_queue_idx == UINT32_MAX && supports_present[i])
{
presentQueueNodeIndex = i;
present_queue_idx = i;
if (test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
{
graphicsQueueNodeIndex = i;
graphics_queue_idx = i;
}
}
// 2. Check for graphics support
else if (graphicsQueueNodeIndex == UINT32_MAX && test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
else if (graphics_queue_idx == UINT32_MAX && test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
{
graphicsQueueNodeIndex = i;
graphics_queue_idx = i;
if (supports_present[i])
{
presentQueueNodeIndex = i;
present_queue_idx = i;
}
}
// 3. Check if transfer + compute is available
else if (transferQueueNodeIndex == UINT32_MAX && test_queue_family(i, VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))
else if (transfer_queue_idx == UINT32_MAX && test_queue_family(i, VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))
{
transferQueueNodeIndex = i;
transfer_queue_idx = i;
}
}
if (graphicsQueueNodeIndex == UINT32_MAX)
if (graphics_queue_idx == UINT32_MAX)
{
rsx_log.fatal("Failed to find a suitable graphics queue");
return nullptr;
}
if (graphicsQueueNodeIndex != presentQueueNodeIndex)
if (graphics_queue_idx != present_queue_idx)
{
// Separate graphics and present, use headless fallback
present_possible = false;
@ -374,7 +374,7 @@ namespace vk
//Native(sw) swapchain
rsx_log.error("It is not possible for the currently selected GPU to present to the window (Likely caused by NVIDIA driver running the current display)");
rsx_log.warning("Falling back to software present support (native windowing API)");
auto swapchain = new swapchain_NATIVE(dev, UINT32_MAX, graphicsQueueNodeIndex, transferQueueNodeIndex);
auto swapchain = new swapchain_NATIVE(dev, UINT32_MAX, graphics_queue_idx, transfer_queue_idx);
swapchain->create(window_handle);
return swapchain;
}
@ -411,7 +411,7 @@ namespace vk
color_space = surfFormats[0].colorSpace;
return new swapchain_WSI(dev, presentQueueNodeIndex, graphicsQueueNodeIndex, transferQueueNodeIndex, format, m_surface, color_space, force_wm_reporting_off);
return new swapchain_WSI(dev, present_queue_idx, graphics_queue_idx, transfer_queue_idx, format, m_surface, color_space, force_wm_reporting_off);
}
};
}

View File

@ -475,11 +475,11 @@ namespace vk
VkColorSpaceKHR m_color_space = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
VkSwapchainKHR m_vk_swapchain = nullptr;
PFN_vkCreateSwapchainKHR createSwapchainKHR = nullptr;
PFN_vkDestroySwapchainKHR destroySwapchainKHR = nullptr;
PFN_vkGetSwapchainImagesKHR getSwapchainImagesKHR = nullptr;
PFN_vkAcquireNextImageKHR acquireNextImageKHR = nullptr;
PFN_vkQueuePresentKHR queuePresentKHR = nullptr;
PFN_vkCreateSwapchainKHR _vkCreateSwapchainKHR = nullptr;
PFN_vkDestroySwapchainKHR _vkDestroySwapchainKHR = nullptr;
PFN_vkGetSwapchainImagesKHR _vkGetSwapchainImagesKHR = nullptr;
PFN_vkAcquireNextImageKHR _vkAcquireNextImageKHR = nullptr;
PFN_vkQueuePresentKHR _vkQueuePresentKHR = nullptr;
bool m_wm_reports_flag = false;
@ -487,13 +487,13 @@ namespace vk
void init_swapchain_images(render_device& dev, u32 /*preferred_count*/ = 0) override
{
u32 nb_swap_images = 0;
getSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, nullptr);
_vkGetSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, nullptr);
if (!nb_swap_images) fmt::throw_exception("Driver returned 0 images for swapchain");
std::vector<VkImage> vk_images;
vk_images.resize(nb_swap_images);
getSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, vk_images.data());
_vkGetSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, vk_images.data());
swapchain_images.resize(nb_swap_images);
for (u32 i = 0; i < nb_swap_images; ++i)
@ -506,11 +506,11 @@ namespace vk
swapchain_WSI(vk::physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format, VkSurfaceKHR surface, VkColorSpaceKHR color_space, bool force_wm_reporting_off)
: WSI_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{
createSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(vkGetDeviceProcAddr(dev, "vkCreateSwapchainKHR"));
destroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(vkGetDeviceProcAddr(dev, "vkDestroySwapchainKHR"));
getSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(vkGetDeviceProcAddr(dev, "vkGetSwapchainImagesKHR"));
acquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(vkGetDeviceProcAddr(dev, "vkAcquireNextImageKHR"));
queuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(vkGetDeviceProcAddr(dev, "vkQueuePresentKHR"));
_vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(vkGetDeviceProcAddr(dev, "vkCreateSwapchainKHR"));
_vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(vkGetDeviceProcAddr(dev, "vkDestroySwapchainKHR"));
_vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(vkGetDeviceProcAddr(dev, "vkGetSwapchainImagesKHR"));
_vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(vkGetDeviceProcAddr(dev, "vkAcquireNextImageKHR"));
_vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(vkGetDeviceProcAddr(dev, "vkQueuePresentKHR"));
m_surface = surface;
m_color_space = color_space;
@ -546,7 +546,7 @@ namespace vk
{
if (m_vk_swapchain)
{
destroySwapchainKHR(pdev, m_vk_swapchain, nullptr);
_vkDestroySwapchainKHR(pdev, m_vk_swapchain, nullptr);
}
dev.destroy();
@ -729,7 +729,7 @@ namespace vk
rsx_log.notice("Swapchain: requesting full screen exclusive mode %d.", static_cast<int>(full_screen_exclusive_info.fullScreenExclusive));
#endif
createSwapchainKHR(dev, &swap_info, nullptr, &m_vk_swapchain);
_vkCreateSwapchainKHR(dev, &swap_info, nullptr, &m_vk_swapchain);
if (old_swapchain)
{
@ -738,7 +738,7 @@ namespace vk
swapchain_images.clear();
}
destroySwapchainKHR(dev, old_swapchain, nullptr);
_vkDestroySwapchainKHR(dev, old_swapchain, nullptr);
}
init_swapchain_images(dev);
@ -770,7 +770,7 @@ namespace vk
present.waitSemaphoreCount = 1;
present.pWaitSemaphores = &semaphore;
return queuePresentKHR(dev.get_present_queue(), &present);
return _vkQueuePresentKHR(dev.get_present_queue(), &present);
}
VkImage get_image(u32 index) override