vk: Do not use raw GPU command buffer handles for any wrapped API calls

- There is no reason to do so and we seriously hamper debugging efforts
This commit is contained in:
kd-11 2023-05-17 18:38:33 +03:00 committed by kd-11
parent edb2b60f2f
commit fad6647255
13 changed files with 55 additions and 57 deletions

View File

@ -128,7 +128,7 @@ namespace vk
m_used_descriptors = 0; m_used_descriptors = 0;
} }
void compute_task::load_program(VkCommandBuffer cmd) void compute_task::load_program(const vk::command_buffer& cmd)
{ {
if (!m_program) if (!m_program)
{ {
@ -170,7 +170,7 @@ namespace vk
m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline_layout); m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline_layout);
} }
void compute_task::run(VkCommandBuffer cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z) void compute_task::run(const vk::command_buffer& cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z)
{ {
// CmdDispatch is outside renderpass scope only // CmdDispatch is outside renderpass scope only
if (vk::is_renderpass_open(cmd)) if (vk::is_renderpass_open(cmd))
@ -182,7 +182,7 @@ namespace vk
vkCmdDispatch(cmd, invocations_x, invocations_y, invocations_z); vkCmdDispatch(cmd, invocations_x, invocations_y, invocations_z);
} }
void compute_task::run(VkCommandBuffer cmd, u32 num_invocations) void compute_task::run(const vk::command_buffer& cmd, u32 num_invocations)
{ {
u32 invocations_x, invocations_y; u32 invocations_x, invocations_y;
if (num_invocations > max_invocations_x) if (num_invocations > max_invocations_x)
@ -282,13 +282,13 @@ namespace vk
m_program->bind_buffer({ m_data->value, m_data_offset, m_data_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set); m_program->bind_buffer({ m_data->value, m_data_offset, m_data_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
} }
void cs_shuffle_base::set_parameters(VkCommandBuffer cmd, const u32* params, u8 count) void cs_shuffle_base::set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count)
{ {
ensure(use_push_constants); ensure(use_push_constants);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params); vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
} }
void cs_shuffle_base::run(VkCommandBuffer cmd, const vk::buffer* data, u32 data_length, u32 data_offset) void cs_shuffle_base::run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_length, u32 data_offset)
{ {
m_data = data; m_data = data;
m_data_offset = data_offset; m_data_offset = data_offset;
@ -328,7 +328,7 @@ namespace vk
m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set); m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
} }
void cs_interleave_task::run(VkCommandBuffer cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset) void cs_interleave_task::run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset)
{ {
u32 parameters[4] = { data_length, zeta_offset - data_offset, stencil_offset - data_offset, 0 }; u32 parameters[4] = { data_length, zeta_offset - data_offset, stencil_offset - data_offset, 0 };
set_parameters(cmd, parameters, 4); set_parameters(cmd, parameters, 4);
@ -389,7 +389,7 @@ namespace vk
m_program->bind_buffer({ dst->value, 0, 4 }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set); m_program->bind_buffer({ dst->value, 0, 4 }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
} }
void cs_aggregator::run(VkCommandBuffer cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words) void cs_aggregator::run(const vk::command_buffer& cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words)
{ {
this->dst = dst; this->dst = dst;
this->src = src; this->src = src;

View File

@ -49,10 +49,10 @@ namespace vk
virtual void bind_resources() {} virtual void bind_resources() {}
virtual void declare_inputs() {} virtual void declare_inputs() {}
void load_program(VkCommandBuffer cmd); void load_program(const vk::command_buffer& cmd);
void run(VkCommandBuffer cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z); void run(const vk::command_buffer& cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z);
void run(VkCommandBuffer cmd, u32 num_invocations); void run(const vk::command_buffer& cmd, u32 num_invocations);
}; };
struct cs_shuffle_base : compute_task struct cs_shuffle_base : compute_task
@ -71,9 +71,9 @@ namespace vk
void bind_resources() override; void bind_resources() override;
void set_parameters(VkCommandBuffer cmd, const u32* params, u8 count); void set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count);
void run(VkCommandBuffer cmd, const vk::buffer* data, u32 data_length, u32 data_offset = 0); void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_length, u32 data_offset = 0);
}; };
struct cs_shuffle_16 : cs_shuffle_base struct cs_shuffle_16 : cs_shuffle_base
@ -139,7 +139,7 @@ namespace vk
void bind_resources() override; void bind_resources() override;
void run(VkCommandBuffer cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset); void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset);
}; };
template<bool _SwapBytes = false> template<bool _SwapBytes = false>
@ -359,7 +359,7 @@ namespace vk
m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set); m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
} }
void run(VkCommandBuffer cmd, const vk::buffer* data, u32 src_offset, u32 src_length, u32 dst_offset) void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 src_offset, u32 src_length, u32 dst_offset)
{ {
u32 data_offset; u32 data_offset;
if (src_offset > dst_offset) if (src_offset > dst_offset)
@ -382,7 +382,7 @@ namespace vk
// Reverse morton-order block arrangement // Reverse morton-order block arrangement
struct cs_deswizzle_base : compute_task struct cs_deswizzle_base : compute_task
{ {
virtual void run(VkCommandBuffer cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) = 0; virtual void run(const vk::command_buffer& cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) = 0;
}; };
template <typename _BlockType, typename _BaseType, bool _SwapBytes> template <typename _BlockType, typename _BaseType, bool _SwapBytes>
@ -461,12 +461,12 @@ namespace vk
m_program->bind_buffer({ dst_buffer->value, out_offset, block_length }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set); m_program->bind_buffer({ dst_buffer->value, out_offset, block_length }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
} }
void set_parameters(VkCommandBuffer cmd) void set_parameters(const vk::command_buffer& cmd)
{ {
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data); vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data);
} }
void run(VkCommandBuffer cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) override void run(const vk::command_buffer& cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) override
{ {
dst_buffer = dst; dst_buffer = dst;
src_buffer = src; src_buffer = src;
@ -501,7 +501,7 @@ namespace vk
void bind_resources() override; void bind_resources() override;
void run(VkCommandBuffer cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words); void run(const vk::command_buffer& cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words);
}; };
// TODO: Replace with a proper manager // TODO: Replace with a proper manager

View File

@ -831,7 +831,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
} }
bool reload_state = (!m_current_draw.subdraw_id++); bool reload_state = (!m_current_draw.subdraw_id++);
vk::renderpass_op(*m_current_command_buffer, [&](VkCommandBuffer cmd, VkRenderPass pass, VkFramebuffer fbo) vk::renderpass_op(*m_current_command_buffer, [&](const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer fbo)
{ {
if (get_render_pass() == pass && m_draw_fbo->value == fbo) if (get_render_pass() == pass && m_draw_fbo->value == fbo)
{ {

View File

@ -74,8 +74,8 @@ namespace vk
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags); VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags);
// Other texture management helpers // Other texture management helpers
void copy_image_to_buffer(VkCommandBuffer cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, bool swap_bytes = false); void copy_image_to_buffer(const vk::command_buffer& cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, bool swap_bytes = false);
void copy_buffer_to_image(VkCommandBuffer cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region); void copy_buffer_to_image(const vk::command_buffer& cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region);
u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect); u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect);
void copy_image_typeless(const command_buffer &cmd, image *src, image *dst, const areai& src_rect, const areai& dst_rect, void copy_image_typeless(const command_buffer &cmd, image *src, image *dst, const areai& src_rect, const areai& dst_rect,

View File

@ -341,7 +341,7 @@ namespace vk
g_renderpass_cache.clear(); g_renderpass_cache.clear();
} }
void begin_renderpass(VkCommandBuffer cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region) void begin_renderpass(const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region)
{ {
auto& renderpass_info = g_current_renderpass[cmd]; auto& renderpass_info = g_current_renderpass[cmd];
if (renderpass_info.pass == pass && renderpass_info.fbo == target) if (renderpass_info.pass == pass && renderpass_info.fbo == target)
@ -366,7 +366,7 @@ namespace vk
renderpass_info = { pass, target }; renderpass_info = { pass, target };
} }
void begin_renderpass(VkDevice dev, VkCommandBuffer cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region) void begin_renderpass(VkDevice dev, const vk::command_buffer& cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region)
{ {
if (renderpass_key != g_cached_renderpass_key) if (renderpass_key != g_cached_renderpass_key)
{ {
@ -377,18 +377,18 @@ namespace vk
begin_renderpass(cmd, g_cached_renderpass, target, framebuffer_region); begin_renderpass(cmd, g_cached_renderpass, target, framebuffer_region);
} }
void end_renderpass(VkCommandBuffer cmd) void end_renderpass(const vk::command_buffer& cmd)
{ {
vkCmdEndRenderPass(cmd); vkCmdEndRenderPass(cmd);
g_current_renderpass[cmd] = {}; g_current_renderpass[cmd] = {};
} }
bool is_renderpass_open(VkCommandBuffer cmd) bool is_renderpass_open(const vk::command_buffer& cmd)
{ {
return g_current_renderpass[cmd].pass != VK_NULL_HANDLE; return g_current_renderpass[cmd].pass != VK_NULL_HANDLE;
} }
void renderpass_op(VkCommandBuffer cmd, const renderpass_op_callback_t& op) void renderpass_op(const vk::command_buffer& cmd, const renderpass_op_callback_t& op)
{ {
const auto& active = g_current_renderpass[cmd]; const auto& active = g_current_renderpass[cmd];
op(cmd, active.pass, active.fbo); op(cmd, active.pass, active.fbo);

View File

@ -6,6 +6,7 @@
namespace vk namespace vk
{ {
class image; class image;
class command_buffer;
u64 get_renderpass_key(const std::vector<vk::image*>& images, const std::vector<u8>& input_attachment_ids = {}); u64 get_renderpass_key(const std::vector<vk::image*>& images, const std::vector<u8>& input_attachment_ids = {});
u64 get_renderpass_key(const std::vector<vk::image*>& images, u64 previous_key); u64 get_renderpass_key(const std::vector<vk::image*>& images, u64 previous_key);
@ -16,11 +17,11 @@ namespace vk
// Renderpass scope management helpers. // Renderpass scope management helpers.
// NOTE: These are not thread safe by design. // NOTE: These are not thread safe by design.
void begin_renderpass(VkDevice dev, VkCommandBuffer cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region); void begin_renderpass(VkDevice dev, const vk::command_buffer& cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region);
void begin_renderpass(VkCommandBuffer cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region); void begin_renderpass(const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region);
void end_renderpass(VkCommandBuffer cmd); void end_renderpass(const vk::command_buffer& cmd);
bool is_renderpass_open(VkCommandBuffer cmd); bool is_renderpass_open(const vk::command_buffer& cmd);
using renderpass_op_callback_t = std::function<void(VkCommandBuffer, VkRenderPass, VkFramebuffer)>; using renderpass_op_callback_t = std::function<void(const vk::command_buffer&, VkRenderPass, VkFramebuffer)>;
void renderpass_op(VkCommandBuffer cmd, const renderpass_op_callback_t& op); void renderpass_op(const vk::command_buffer& cmd, const renderpass_op_callback_t& op);
} }

View File

@ -125,7 +125,7 @@ namespace vk
m_program->bind_uniform({ VK_NULL_HANDLE, resolved_view->value, resolve->current_layout }, "resolve", VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_descriptor_set); m_program->bind_uniform({ VK_NULL_HANDLE, resolved_view->value, resolve->current_layout }, "resolve", VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_descriptor_set);
} }
void run(VkCommandBuffer cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image) void run(const vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image)
{ {
ensure(msaa_image->samples() > 1); ensure(msaa_image->samples() > 1);
ensure(resolve_image->samples() == 1); ensure(resolve_image->samples() == 1);

View File

@ -8,7 +8,7 @@
namespace vk namespace vk
{ {
void insert_image_memory_barrier( void insert_image_memory_barrier(
VkCommandBuffer cmd, VkImage image, const vk::command_buffer& cmd, VkImage image,
VkImageLayout current_layout, VkImageLayout new_layout, VkImageLayout current_layout, VkImageLayout new_layout,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage,
VkAccessFlags src_mask, VkAccessFlags dst_mask, VkAccessFlags src_mask, VkAccessFlags dst_mask,
@ -33,7 +33,7 @@ namespace vk
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier); vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
} }
void insert_buffer_memory_barrier(VkCommandBuffer cmd, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize length, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask) void insert_buffer_memory_barrier(const vk::command_buffer& cmd, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize length, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask)
{ {
if (vk::is_renderpass_open(cmd)) if (vk::is_renderpass_open(cmd))
{ {
@ -53,7 +53,7 @@ namespace vk
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 1, &barrier, 0, nullptr); vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 1, &barrier, 0, nullptr);
} }
void insert_global_memory_barrier(VkCommandBuffer cmd, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_access, VkAccessFlags dst_access) void insert_global_memory_barrier(const vk::command_buffer& cmd, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_access, VkAccessFlags dst_access)
{ {
if (vk::is_renderpass_open(cmd)) if (vk::is_renderpass_open(cmd))
{ {
@ -67,7 +67,7 @@ namespace vk
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 1, &barrier, 0, nullptr, 0, nullptr); vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 1, &barrier, 0, nullptr, 0, nullptr);
} }
void insert_texture_barrier(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, VkImageSubresourceRange range) void insert_texture_barrier(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, VkImageSubresourceRange range)
{ {
// NOTE: Sampling from an attachment in ATTACHMENT_OPTIMAL layout on some hw ends up with garbage output // NOTE: Sampling from an attachment in ATTACHMENT_OPTIMAL layout on some hw ends up with garbage output
// Transition to GENERAL if this resource is both input and output // Transition to GENERAL if this resource is both input and output
@ -105,7 +105,7 @@ namespace vk
vkCmdPipelineBarrier(cmd, src_stage, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); vkCmdPipelineBarrier(cmd, src_stage, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
} }
void insert_texture_barrier(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout) void insert_texture_barrier(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout)
{ {
insert_texture_barrier(cmd, image->value, image->current_layout, new_layout, { image->aspect(), 0, 1, 0, 1 }); insert_texture_barrier(cmd, image->value, image->current_layout, new_layout, { image->aspect(), 0, 1, 0, 1 });
image->current_layout = new_layout; image->current_layout = new_layout;

View File

@ -5,19 +5,20 @@
namespace vk namespace vk
{ {
class image; class image;
class command_buffer;
//Texture barrier applies to a texture to ensure writes to it are finished before any reads are attempted to avoid RAW hazards //Texture barrier applies to a texture to ensure writes to it are finished before any reads are attempted to avoid RAW hazards
void insert_texture_barrier(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, VkImageSubresourceRange range); void insert_texture_barrier(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, VkImageSubresourceRange range);
void insert_texture_barrier(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout); void insert_texture_barrier(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout);
void insert_buffer_memory_barrier(VkCommandBuffer cmd, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize length, void insert_buffer_memory_barrier(const vk::command_buffer& cmd, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize length,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask); VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask);
void insert_image_memory_barrier(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, void insert_image_memory_barrier(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask,
const VkImageSubresourceRange& range); const VkImageSubresourceRange& range);
void insert_global_memory_barrier(VkCommandBuffer cmd, void insert_global_memory_barrier(const vk::command_buffer& cmd,
VkPipelineStageFlags src_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VkPipelineStageFlags src_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dst_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VkPipelineStageFlags dst_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkAccessFlags src_access = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, VkAccessFlags src_access = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,

View File

@ -360,7 +360,7 @@ namespace vk
} }
} }
void descriptor_set::bind(VkCommandBuffer cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout) void descriptor_set::bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
{ {
if ((m_push_type_mask & ~m_update_after_bind_mask) || (m_pending_writes.size() >= max_cache_size)) if ((m_push_type_mask & ~m_update_after_bind_mask) || (m_pending_writes.size() >= max_cache_size))
{ {
@ -370,11 +370,6 @@ namespace vk
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr); vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr);
} }
void descriptor_set::bind(const command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
{
bind(static_cast<VkCommandBuffer>(cmd), bind_point, layout);
}
void descriptor_set::flush() void descriptor_set::flush()
{ {
if (!m_push_type_mask) if (!m_push_type_mask)

View File

@ -67,8 +67,7 @@ namespace vk
void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding); void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding);
void push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax); void push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax);
void bind(VkCommandBuffer cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout); void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void bind(const command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void flush(); void flush();

View File

@ -55,7 +55,7 @@ namespace vk
return{ final_mapping[1], final_mapping[2], final_mapping[3], final_mapping[0] }; return{ final_mapping[1], final_mapping[2], final_mapping[3], final_mapping[0] };
} }
void change_image_layout(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range, void change_image_layout(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range,
u32 src_queue_family, u32 dst_queue_family, u32 src_access_mask_bits, u32 dst_access_mask_bits) u32 src_queue_family, u32 dst_queue_family, u32 src_access_mask_bits, u32 dst_access_mask_bits)
{ {
if (vk::is_renderpass_open(cmd)) if (vk::is_renderpass_open(cmd))
@ -207,7 +207,7 @@ namespace vk
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier); vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
} }
void change_image_layout(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range) void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range)
{ {
if (image->current_layout == new_layout) return; if (image->current_layout == new_layout) return;
@ -215,7 +215,7 @@ namespace vk
image->current_layout = new_layout; image->current_layout = new_layout;
} }
void change_image_layout(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout) void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout)
{ {
if (image->current_layout == new_layout) return; if (image->current_layout == new_layout) return;

View File

@ -4,15 +4,17 @@
namespace vk namespace vk
{ {
class image; class image;
class command_buffer;
extern VkComponentMapping default_component_map; extern VkComponentMapping default_component_map;
VkImageAspectFlags get_aspect_flags(VkFormat format); VkImageAspectFlags get_aspect_flags(VkFormat format);
VkComponentMapping apply_swizzle_remap(const std::array<VkComponentSwizzle, 4>& base_remap, const std::pair<std::array<u8, 4>, std::array<u8, 4>>& remap_vector); VkComponentMapping apply_swizzle_remap(const std::array<VkComponentSwizzle, 4>& base_remap, const std::pair<std::array<u8, 4>, std::array<u8, 4>>& remap_vector);
void change_image_layout(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range, void change_image_layout(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range,
u32 src_queue_family = VK_QUEUE_FAMILY_IGNORED, u32 dst_queue_family = VK_QUEUE_FAMILY_IGNORED, u32 src_queue_family = VK_QUEUE_FAMILY_IGNORED, u32 dst_queue_family = VK_QUEUE_FAMILY_IGNORED,
u32 src_access_mask_bits = 0xFFFFFFFF, u32 dst_access_mask_bits = 0xFFFFFFFF); u32 src_access_mask_bits = 0xFFFFFFFF, u32 dst_access_mask_bits = 0xFFFFFFFF);
void change_image_layout(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range); void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range);
void change_image_layout(VkCommandBuffer cmd, vk::image* image, VkImageLayout new_layout); void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout);
} }