[Vulkan] Gather shader stages that VS can be translated into

This commit is contained in:
Triang3l 2022-05-15 16:31:24 +03:00
parent 7d19a8c0e8
commit 185c23dd50
6 changed files with 60 additions and 42 deletions

View File

@ -68,6 +68,21 @@ bool VulkanCommandProcessor::SetupContext() {
const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); const ui::vulkan::VulkanProvider& provider = GetVulkanProvider();
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn();
VkDevice device = provider.device(); VkDevice device = provider.device();
const VkPhysicalDeviceFeatures& device_features = provider.device_features();
guest_shader_pipeline_stages_ = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
guest_shader_vertex_stages_ = VK_SHADER_STAGE_VERTEX_BIT;
if (device_features.tessellationShader) {
guest_shader_pipeline_stages_ |=
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
guest_shader_vertex_stages_ |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
}
if (!device_features.vertexPipelineStoresAndAtomics) {
// For memory export from vertex shaders converted to compute shaders.
guest_shader_pipeline_stages_ |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
guest_shader_vertex_stages_ |= VK_SHADER_STAGE_COMPUTE_BIT;
}
// No specific reason for 32768, just the "too much" amount from Direct3D 12 // No specific reason for 32768, just the "too much" amount from Direct3D 12
// PIX warnings. // PIX warnings.
@ -98,15 +113,14 @@ bool VulkanCommandProcessor::SetupContext() {
XELOGE("Failed to create an empty Vulkan descriptor set layout"); XELOGE("Failed to create an empty Vulkan descriptor set layout");
return false; return false;
} }
VkShaderStageFlags shader_stages_guest_vertex = VkShaderStageFlags guest_shader_stages =
GetGuestVertexShaderStageFlags(); guest_shader_vertex_stages_ | VK_SHADER_STAGE_FRAGMENT_BIT;
VkDescriptorSetLayoutBinding descriptor_set_layout_binding_uniform_buffer; VkDescriptorSetLayoutBinding descriptor_set_layout_binding_uniform_buffer;
descriptor_set_layout_binding_uniform_buffer.binding = 0; descriptor_set_layout_binding_uniform_buffer.binding = 0;
descriptor_set_layout_binding_uniform_buffer.descriptorType = descriptor_set_layout_binding_uniform_buffer.descriptorType =
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_set_layout_binding_uniform_buffer.descriptorCount = 1; descriptor_set_layout_binding_uniform_buffer.descriptorCount = 1;
descriptor_set_layout_binding_uniform_buffer.stageFlags = descriptor_set_layout_binding_uniform_buffer.stageFlags = guest_shader_stages;
shader_stages_guest_vertex | VK_SHADER_STAGE_FRAGMENT_BIT;
descriptor_set_layout_binding_uniform_buffer.pImmutableSamplers = nullptr; descriptor_set_layout_binding_uniform_buffer.pImmutableSamplers = nullptr;
descriptor_set_layout_create_info.bindingCount = 1; descriptor_set_layout_create_info.bindingCount = 1;
descriptor_set_layout_create_info.pBindings = descriptor_set_layout_create_info.pBindings =
@ -120,7 +134,7 @@ bool VulkanCommandProcessor::SetupContext() {
return false; return false;
} }
descriptor_set_layout_binding_uniform_buffer.stageFlags = descriptor_set_layout_binding_uniform_buffer.stageFlags =
shader_stages_guest_vertex; guest_shader_vertex_stages_;
if (dfn.vkCreateDescriptorSetLayout( if (dfn.vkCreateDescriptorSetLayout(
device, &descriptor_set_layout_create_info, nullptr, device, &descriptor_set_layout_create_info, nullptr,
&descriptor_set_layout_float_constants_vertex_) != VK_SUCCESS) { &descriptor_set_layout_float_constants_vertex_) != VK_SUCCESS) {
@ -139,9 +153,8 @@ bool VulkanCommandProcessor::SetupContext() {
"float constants uniform buffer"); "float constants uniform buffer");
return false; return false;
} }
descriptor_set_layout_binding_uniform_buffer.stageFlags = descriptor_set_layout_binding_uniform_buffer.stageFlags = guest_shader_stages;
shader_stages_guest_vertex | VK_SHADER_STAGE_FRAGMENT_BIT; if (device_features.tessellationShader) {
if (provider.device_features().tessellationShader) {
descriptor_set_layout_binding_uniform_buffer.stageFlags |= descriptor_set_layout_binding_uniform_buffer.stageFlags |=
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT; VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
} }
@ -169,7 +182,7 @@ bool VulkanCommandProcessor::SetupContext() {
// vertex shader access to the shared memory for the tessellation vertex // vertex shader access to the shared memory for the tessellation vertex
// shader (to retrieve tessellation factors). // shader (to retrieve tessellation factors).
descriptor_set_layout_bindings_shared_memory_and_edram[0].stageFlags = descriptor_set_layout_bindings_shared_memory_and_edram[0].stageFlags =
shader_stages_guest_vertex | VK_SHADER_STAGE_FRAGMENT_BIT; guest_shader_stages;
descriptor_set_layout_bindings_shared_memory_and_edram[0].pImmutableSamplers = descriptor_set_layout_bindings_shared_memory_and_edram[0].pImmutableSamplers =
nullptr; nullptr;
// TODO(Triang3l): EDRAM storage image binding for the fragment shader // TODO(Triang3l): EDRAM storage image binding for the fragment shader
@ -185,8 +198,8 @@ bool VulkanCommandProcessor::SetupContext() {
return false; return false;
} }
shared_memory_ = shared_memory_ = std::make_unique<VulkanSharedMemory>(
std::make_unique<VulkanSharedMemory>(*this, *memory_, trace_writer_); *this, *memory_, trace_writer_, guest_shader_pipeline_stages_);
if (!shared_memory_->Initialize()) { if (!shared_memory_->Initialize()) {
XELOGE("Failed to initialize shared memory"); XELOGE("Failed to initialize shared memory");
return false; return false;
@ -209,7 +222,8 @@ bool VulkanCommandProcessor::SetupContext() {
} }
pipeline_cache_ = std::make_unique<VulkanPipelineCache>( pipeline_cache_ = std::make_unique<VulkanPipelineCache>(
*this, *register_file_, *render_target_cache_); *this, *register_file_, *render_target_cache_,
guest_shader_vertex_stages_);
if (!pipeline_cache_->Initialize()) { if (!pipeline_cache_->Initialize()) {
XELOGE("Failed to initialize the graphics pipeline cache"); XELOGE("Failed to initialize the graphics pipeline cache");
return false; return false;
@ -1151,8 +1165,7 @@ VulkanCommandProcessor::GetPipelineLayout(uint32_t texture_count_pixel,
descriptor_set_layout_binding.descriptorType = descriptor_set_layout_binding.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_set_layout_binding.descriptorCount = texture_count_vertex; descriptor_set_layout_binding.descriptorCount = texture_count_vertex;
descriptor_set_layout_binding.stageFlags = descriptor_set_layout_binding.stageFlags = guest_shader_vertex_stages_;
GetGuestVertexShaderStageFlags();
descriptor_set_layout_binding.pImmutableSamplers = nullptr; descriptor_set_layout_binding.pImmutableSamplers = nullptr;
VkDescriptorSetLayoutCreateInfo descriptor_set_layout_create_info; VkDescriptorSetLayoutCreateInfo descriptor_set_layout_create_info;
descriptor_set_layout_create_info.sType = descriptor_set_layout_create_info.sType =
@ -2130,18 +2143,6 @@ void VulkanCommandProcessor::SplitPendingBarrier() {
pending_image_memory_barrier_count; pending_image_memory_barrier_count;
} }
VkShaderStageFlags VulkanCommandProcessor::GetGuestVertexShaderStageFlags()
const {
VkShaderStageFlags stages = VK_SHADER_STAGE_VERTEX_BIT;
const ui::vulkan::VulkanProvider& provider = GetVulkanProvider();
if (provider.device_features().tessellationShader) {
stages |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
}
// TODO(Triang3l): Vertex to compute translation for rectangle and possibly
// point emulation.
return stages;
}
void VulkanCommandProcessor::UpdateDynamicState( void VulkanCommandProcessor::UpdateDynamicState(
const draw_util::ViewportInfo& viewport_info, bool primitive_polygonal, const draw_util::ViewportInfo& viewport_info, bool primitive_polygonal,
reg::RB_DEPTHCONTROL normalized_depth_control) { reg::RB_DEPTHCONTROL normalized_depth_control) {

View File

@ -240,8 +240,6 @@ class VulkanCommandProcessor : public CommandProcessor {
void SplitPendingBarrier(); void SplitPendingBarrier();
VkShaderStageFlags GetGuestVertexShaderStageFlags() const;
void UpdateDynamicState(const draw_util::ViewportInfo& viewport_info, void UpdateDynamicState(const draw_util::ViewportInfo& viewport_info,
bool primitive_polygonal, bool primitive_polygonal,
reg::RB_DEPTHCONTROL normalized_depth_control); reg::RB_DEPTHCONTROL normalized_depth_control);
@ -261,6 +259,12 @@ class VulkanCommandProcessor : public CommandProcessor {
bool cache_clear_requested_ = false; bool cache_clear_requested_ = false;
// Host shader types that guest shaders can be translated into - they can
// access the shared memory (via vertex fetch, memory export, or manual index
// buffer reading) and textures.
VkPipelineStageFlags guest_shader_pipeline_stages_ = 0;
VkShaderStageFlags guest_shader_vertex_stages_ = 0;
std::vector<VkFence> fences_free_; std::vector<VkFence> fences_free_;
std::vector<VkSemaphore> semaphores_free_; std::vector<VkSemaphore> semaphores_free_;

View File

@ -39,10 +39,12 @@ namespace vulkan {
VulkanPipelineCache::VulkanPipelineCache( VulkanPipelineCache::VulkanPipelineCache(
VulkanCommandProcessor& command_processor, VulkanCommandProcessor& command_processor,
const RegisterFile& register_file, const RegisterFile& register_file,
VulkanRenderTargetCache& render_target_cache) VulkanRenderTargetCache& render_target_cache,
VkShaderStageFlags guest_shader_vertex_stages)
: command_processor_(command_processor), : command_processor_(command_processor),
register_file_(register_file), register_file_(register_file),
render_target_cache_(render_target_cache) {} render_target_cache_(render_target_cache),
guest_shader_vertex_stages_(guest_shader_vertex_stages) {}
VulkanPipelineCache::~VulkanPipelineCache() { Shutdown(); } VulkanPipelineCache::~VulkanPipelineCache() { Shutdown(); }
@ -607,6 +609,17 @@ bool VulkanPipelineCache::GetCurrentStateDescription(
bool VulkanPipelineCache::ArePipelineRequirementsMet( bool VulkanPipelineCache::ArePipelineRequirementsMet(
const PipelineDescription& description) const { const PipelineDescription& description) const {
VkShaderStageFlags vertex_shader_stage =
Shader::IsHostVertexShaderTypeDomain(
SpirvShaderTranslator::Modification(
description.vertex_shader_modification)
.vertex.host_vertex_shader_type)
? VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
: VK_SHADER_STAGE_VERTEX_BIT;
if (!(guest_shader_vertex_stages_ & vertex_shader_stage)) {
return false;
}
const ui::vulkan::VulkanProvider& provider = const ui::vulkan::VulkanProvider& provider =
command_processor_.GetVulkanProvider(); command_processor_.GetVulkanProvider();
const VkPhysicalDeviceFeatures& device_features = provider.device_features(); const VkPhysicalDeviceFeatures& device_features = provider.device_features();

View File

@ -50,7 +50,8 @@ class VulkanPipelineCache {
VulkanPipelineCache(VulkanCommandProcessor& command_processor, VulkanPipelineCache(VulkanCommandProcessor& command_processor,
const RegisterFile& register_file, const RegisterFile& register_file,
VulkanRenderTargetCache& render_target_cache); VulkanRenderTargetCache& render_target_cache,
VkShaderStageFlags guest_shader_vertex_stages);
~VulkanPipelineCache(); ~VulkanPipelineCache();
bool Initialize(); bool Initialize();
@ -270,6 +271,7 @@ class VulkanPipelineCache {
VulkanCommandProcessor& command_processor_; VulkanCommandProcessor& command_processor_;
const RegisterFile& register_file_; const RegisterFile& register_file_;
VulkanRenderTargetCache& render_target_cache_; VulkanRenderTargetCache& render_target_cache_;
VkShaderStageFlags guest_shader_vertex_stages_;
// Temporary storage for AnalyzeUcode calls on the processor thread. // Temporary storage for AnalyzeUcode calls on the processor thread.
StringBuffer ucode_disasm_buffer_; StringBuffer ucode_disasm_buffer_;

View File

@ -35,10 +35,12 @@ namespace vulkan {
VulkanSharedMemory::VulkanSharedMemory( VulkanSharedMemory::VulkanSharedMemory(
VulkanCommandProcessor& command_processor, Memory& memory, VulkanCommandProcessor& command_processor, Memory& memory,
TraceWriter& trace_writer) TraceWriter& trace_writer,
VkPipelineStageFlags guest_shader_pipeline_stages)
: SharedMemory(memory), : SharedMemory(memory),
command_processor_(command_processor), command_processor_(command_processor),
trace_writer_(trace_writer) {} trace_writer_(trace_writer),
guest_shader_pipeline_stages_(guest_shader_pipeline_stages) {}
VulkanSharedMemory::~VulkanSharedMemory() { Shutdown(true); } VulkanSharedMemory::~VulkanSharedMemory() { Shutdown(true); }
@ -463,14 +465,8 @@ void VulkanSharedMemory::GetUsageMasks(Usage usage,
default: default:
break; break;
} }
stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | stage_mask =
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | guest_shader_pipeline_stages_;
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
const ui::vulkan::VulkanProvider& provider =
command_processor_.GetVulkanProvider();
if (provider.device_features().tessellationShader) {
stage_mask |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
}
access_mask = VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_SHADER_READ_BIT; access_mask = VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
switch (usage) { switch (usage) {
case Usage::kRead: case Usage::kRead:

View File

@ -30,7 +30,8 @@ class VulkanCommandProcessor;
class VulkanSharedMemory : public SharedMemory { class VulkanSharedMemory : public SharedMemory {
public: public:
VulkanSharedMemory(VulkanCommandProcessor& command_processor, Memory& memory, VulkanSharedMemory(VulkanCommandProcessor& command_processor, Memory& memory,
TraceWriter& trace_writer); TraceWriter& trace_writer,
VkPipelineStageFlags guest_shader_pipeline_stages);
~VulkanSharedMemory() override; ~VulkanSharedMemory() override;
bool Initialize(); bool Initialize();
@ -70,6 +71,7 @@ class VulkanSharedMemory : public SharedMemory {
VulkanCommandProcessor& command_processor_; VulkanCommandProcessor& command_processor_;
TraceWriter& trace_writer_; TraceWriter& trace_writer_;
VkPipelineStageFlags guest_shader_pipeline_stages_;
VkBuffer buffer_ = VK_NULL_HANDLE; VkBuffer buffer_ = VK_NULL_HANDLE;
uint32_t buffer_memory_type_; uint32_t buffer_memory_type_;