diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.cpp b/rpcs3/Emu/RSX/Common/BufferUtils.cpp index 56d6461d9b..34526bedb8 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.cpp +++ b/rpcs3/Emu/RSX/Common/BufferUtils.cpp @@ -2,9 +2,6 @@ #include "BufferUtils.h" #include "../rsx_methods.h" -#define MIN2(x, y) ((x) < (y)) ? (x) : (y) -#define MAX2(x, y) ((x) > (y)) ? (x) : (y) - namespace { // FIXME: GSL as_span break build if template parameter is non const with current revision. @@ -49,19 +46,17 @@ namespace } } -void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride) +void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride) { Expects(vector_element_count > 0); - u32 element_size = rsx::get_vertex_type_size_on_host(type, vector_element_count); - switch (type) { case rsx::vertex_base_type::ub: case rsx::vertex_base_type::ub256: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array(dst_span, src_ptr, vector_element_count, element_size, attribute_src_stride, first, count); + copy_whole_attribute_array(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); return; } case rsx::vertex_base_type::s1: @@ -69,13 +64,13 @@ void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const case rsx::vertex_base_type::s32k: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, element_size, attribute_src_stride, first, count); + copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); return; } case rsx::vertex_base_type::f: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, element_size, attribute_src_stride, first, count); + copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); return; } case rsx::vertex_base_type::cmp: @@ -85,10 +80,10 @@ void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const { auto* c_src = (const be_t*)(src_ptr + attribute_src_stride * (first + i)); const auto& decoded_vector = decode_cmp_vector(*c_src); - dst_span[i * element_size / sizeof(u16)] = decoded_vector[0]; - dst_span[i * element_size / sizeof(u16) + 1] = decoded_vector[1]; - dst_span[i * element_size / sizeof(u16) + 2] = decoded_vector[2]; - dst_span[i * element_size / sizeof(u16) + 3] = decoded_vector[3]; + dst_span[i * dst_stride / sizeof(u16)] = decoded_vector[0]; + dst_span[i * dst_stride / sizeof(u16) + 1] = decoded_vector[1]; + dst_span[i * dst_stride / sizeof(u16) + 2] = decoded_vector[2]; + dst_span[i * dst_stride / sizeof(u16) + 3] = decoded_vector[3]; } return; } @@ -114,8 +109,8 @@ std::tuple upload_untouched(gsl::span> src, gsl::span } else { - max_index = MAX2(max_index, index); - min_index = MIN2(min_index, index); + max_index = std::max(max_index, index); + min_index = std::min(min_index, index); } dst[dst_idx++] = index; } @@ -134,8 +129,8 @@ std::tuple expand_indexed_triangle_fan(gsl::span> src, gs const T index0 = src[0]; if (!is_primitive_restart_enabled || index0 != -1) // Cut { - min_index = MIN2(min_index, index0); - max_index = MAX2(max_index, index0); + min_index = std::min(min_index, index0); + max_index = std::max(max_index, index0); } size_t dst_idx = 0; @@ -149,8 +144,8 @@ std::tuple expand_indexed_triangle_fan(gsl::span> src, gs } else { - min_index = MIN2(min_index, index1); - max_index = MAX2(max_index, index1); + min_index = std::min(min_index, index1); + max_index = std::max(max_index, index1); } T index2 = tri_indexes[1]; if (is_primitive_restart_enabled && index2 == primitive_restart_index) @@ -159,8 +154,8 @@ std::tuple expand_indexed_triangle_fan(gsl::span> src, gs } else { - min_index = MIN2(min_index, index2); - max_index = MAX2(max_index, index2); + min_index = std::min(min_index, index2); + max_index = std::max(max_index, index2); } dst[dst_idx++] = index0; @@ -192,8 +187,8 @@ std::tuple expand_indexed_quads(gsl::span> src, gsl::span } else { - min_index = MIN2(min_index, index0); - max_index = MAX2(max_index, index0); + min_index = std::min(min_index, index0); + max_index = std::max(max_index, index0); } T index1 = quad_indexes[1]; if (is_primitive_restart_enabled && index1 == primitive_restart_index) @@ -202,8 +197,8 @@ std::tuple expand_indexed_quads(gsl::span> src, gsl::span } else { - min_index = MIN2(min_index, index1); - max_index = MAX2(max_index, index1); + min_index = std::min(min_index, index1); + max_index = std::max(max_index, index1); } T index2 = quad_indexes[2]; if (is_primitive_restart_enabled && index2 == primitive_restart_index) @@ -212,8 +207,8 @@ std::tuple expand_indexed_quads(gsl::span> src, gsl::span } else { - min_index = MIN2(min_index, index2); - max_index = MAX2(max_index, index2); + min_index = std::min(min_index, index2); + max_index = std::max(max_index, index2); } T index3 = quad_indexes[3]; if (is_primitive_restart_enabled &&index3 == primitive_restart_index) @@ -222,8 +217,8 @@ std::tuple expand_indexed_quads(gsl::span> src, gsl::span } else { - min_index = MIN2(min_index, index3); - max_index = MAX2(max_index, index3); + min_index = std::min(min_index, index3); + max_index = std::max(max_index, index3); } // First triangle @@ -394,14 +389,16 @@ std::tuple write_index_array_data_to_buffer_impl(gsl::span write_index_array_data_to_buffer(gsl::span dst, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments) +std::tuple write_index_array_data_to_buffer(gsl::span dst, rsx::index_array_type type, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments) { - return write_index_array_data_to_buffer_impl(dst, draw_mode, first_count_arguments); -} - -std::tuple write_index_array_data_to_buffer(gsl::span dst, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments) -{ - return write_index_array_data_to_buffer_impl(dst, draw_mode, first_count_arguments); + switch (type) + { + case rsx::index_array_type::u16: + return write_index_array_data_to_buffer_impl(as_span_workaround(dst), draw_mode, first_count_arguments); + case rsx::index_array_type::u32: + return write_index_array_data_to_buffer_impl(as_span_workaround(dst), draw_mode, first_count_arguments); + } + throw EXCEPTION("Unknow index type"); } std::tuple write_index_array_data_to_buffer_untouched(gsl::span dst, const std::vector > &first_count_arguments) diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.h b/rpcs3/Emu/RSX/Common/BufferUtils.h index df0d0d18c2..2ba850af51 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.h +++ b/rpcs3/Emu/RSX/Common/BufferUtils.h @@ -7,7 +7,7 @@ * Write count vertex attributes from src_ptr starting at first. * src_ptr array layout is deduced from the type, vector element count and src_stride arguments. */ -void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride); +void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride); /* * If primitive mode is not supported and need to be emulated (using an index buffer) returns false. @@ -29,8 +29,8 @@ size_t get_index_type_size(rsx::index_array_type type); * Returns min/max index found during the process. * The function expands index buffer for non native primitive type. */ -std::tuple write_index_array_data_to_buffer(gsl::span dst, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments); -std::tuple write_index_array_data_to_buffer(gsl::span dst, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments); +std::tuple write_index_array_data_to_buffer(gsl::span dst, rsx::index_array_type, rsx::primitive_type draw_mode, const std::vector > &first_count_arguments); + /** * Doesn't expand index diff --git a/rpcs3/Emu/RSX/Common/TextureUtils.cpp b/rpcs3/Emu/RSX/Common/TextureUtils.cpp index 7d0f478f65..f39c06601b 100644 --- a/rpcs3/Emu/RSX/Common/TextureUtils.cpp +++ b/rpcs3/Emu/RSX/Common/TextureUtils.cpp @@ -5,7 +5,6 @@ #include "../rsx_utils.h" -#define MAX2(a, b) ((a) > (b)) ? (a) : (b) namespace { // FIXME: GSL as_span break build if template parameter is non const with current revision. @@ -101,8 +100,8 @@ namespace result.push_back(current_subresource_layout); offset_in_src += miplevel_height_in_block * src_pitch_in_block * block_size_in_bytes * depth; - miplevel_height_in_block = MAX2(miplevel_height_in_block / 2, 1); - miplevel_width_in_block = MAX2(miplevel_width_in_block / 2, 1); + miplevel_height_in_block = std::max(miplevel_height_in_block / 2, 1); + miplevel_width_in_block = std::max(miplevel_width_in_block / 2, 1); } offset_in_src = align(offset_in_src, 128); } @@ -343,7 +342,7 @@ u8 get_format_block_size_in_texel(int format) size_t get_placed_texture_storage_size(const rsx::texture &texture, size_t rowPitchAlignement, size_t mipmapAlignment) { - size_t w = texture.width(), h = texture.height(), d = MAX2(texture.depth(), 1); + size_t w = texture.width(), h = texture.height(), d = std::max(texture.depth(), 1); int format = texture.format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN); size_t blockEdge = get_format_block_size_in_texel(format); @@ -352,14 +351,13 @@ size_t get_placed_texture_storage_size(const rsx::texture &texture, size_t rowPi size_t heightInBlocks = (h + blockEdge - 1) / blockEdge; size_t widthInBlocks = (w + blockEdge - 1) / blockEdge; - size_t result = 0; for (unsigned mipmap = 0; mipmap < texture.mipmap(); ++mipmap) { size_t rowPitch = align(blockSizeInByte * widthInBlocks, rowPitchAlignement); result += align(rowPitch * heightInBlocks * d, mipmapAlignment); - heightInBlocks = MAX2(heightInBlocks / 2, 1); - widthInBlocks = MAX2(widthInBlocks / 2, 1); + heightInBlocks = std::max(heightInBlocks / 2, 1); + widthInBlocks = std::max(widthInBlocks / 2, 1); } return result * (texture.cubemap() ? 6 : 1); diff --git a/rpcs3/Emu/RSX/Common/ring_buffer_helper.h b/rpcs3/Emu/RSX/Common/ring_buffer_helper.h new file mode 100644 index 0000000000..68c8333973 --- /dev/null +++ b/rpcs3/Emu/RSX/Common/ring_buffer_helper.h @@ -0,0 +1,86 @@ +#pragma once + +/** + * Ring buffer memory helper : + * There are 2 "pointers" (offset inside a memory buffer to be provided by class derrivative) + * PUT pointer "points" to the start of allocatable space. + * GET pointer "points" to the start of memory in use by the GPU. + * Space between GET and PUT is used by the GPU ; this structure check that this memory is not overwritten. + * User has to update the GET pointer when synchronisation happens. + */ +struct data_heap +{ + /** + * Does alloc cross get position ? + */ + template + bool can_alloc(size_t size) const + { + size_t alloc_size = align(size, Alignement); + size_t aligned_put_pos = align(m_put_pos, Alignement); + if (aligned_put_pos + alloc_size < m_size) + { + // range before get + if (aligned_put_pos + alloc_size < m_get_pos) + return true; + // range after get + if (aligned_put_pos > m_get_pos) + return true; + return false; + } + else + { + // ..]....[..get.. + if (aligned_put_pos < m_get_pos) + return false; + // ..get..]...[... + // Actually all resources extending beyond heap space starts at 0 + if (alloc_size > m_get_pos) + return false; + return true; + } + } + + size_t m_size; + size_t m_put_pos; // Start of free space +public: + data_heap() = default; + ~data_heap() = default; + data_heap(const data_heap&) = delete; + data_heap(data_heap&&) = delete; + + size_t m_get_pos; // End of free space + + void init(size_t heap_size) + { + m_size = heap_size; + m_put_pos = 0; + m_get_pos = heap_size - 1; + } + + template + size_t alloc(size_t size) + { + if (!can_alloc(size)) throw EXCEPTION("Working buffer not big enough"); + size_t alloc_size = align(size, Alignement); + size_t aligned_put_pos = align(m_put_pos, Alignement); + if (aligned_put_pos + alloc_size < m_size) + { + m_put_pos = aligned_put_pos + alloc_size; + return aligned_put_pos; + } + else + { + m_put_pos = alloc_size; + return 0; + } + } + + /** + * return current putpos - 1 + */ + size_t get_current_put_pos_minus_one() const + { + return (m_put_pos - 1 > 0) ? m_put_pos - 1 : m_size - 1; + } +}; diff --git a/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp b/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp index 70238a88b7..a6a94fffe6 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp +++ b/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp @@ -104,7 +104,7 @@ std::vector D3D12GSRender::upload_vertex_attrib for (const auto &range : vertex_ranges) { gsl::span mapped_buffer_span = { (gsl::byte*)mapped_buffer, gsl::narrow_cast(buffer_size) }; - write_vertex_array_data_to_buffer(mapped_buffer_span, src_ptr, range.first, range.second, info.type, info.size, info.stride); + write_vertex_array_data_to_buffer(mapped_buffer_span, src_ptr, range.first, range.second, info.type, info.size, info.stride, element_size); mapped_buffer = (char*)mapped_buffer + range.second * element_size; } m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); @@ -146,7 +146,7 @@ namespace std::tuple, size_t> upload_inlined_vertex_array( gsl::span vertex_attribute_infos, gsl::span inlined_array_raw_data, - data_heap& ring_buffer_data, + d3d12_data_heap& ring_buffer_data, ID3D12Resource* vertex_buffer_placement, ID3D12GraphicsCommandList* command_list ) @@ -364,18 +364,9 @@ std::tuple> D3D12GSRe void *mapped_buffer = m_buffer_data.map(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); u32 min_index, max_index; + gsl::span dst{ reinterpret_cast(mapped_buffer), gsl::narrow(buffer_size) }; - if (indexed_type == rsx::index_array_type::u16) - { - gsl::span dst = { (u16*)mapped_buffer, gsl::narrow(buffer_size / index_size) }; - std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); - } - - if (indexed_type == rsx::index_array_type::u32) - { - gsl::span dst = { (u32*)mapped_buffer, gsl::narrow(buffer_size / index_size) }; - std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); - } + std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, indexed_type, draw_mode, first_count_commands); m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); D3D12_INDEX_BUFFER_VIEW index_buffer_view = { diff --git a/rpcs3/Emu/RSX/D3D12/D3D12GSRender.h b/rpcs3/Emu/RSX/D3D12/D3D12GSRender.h index e5db7bc3b9..51dd134cfe 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12GSRender.h +++ b/rpcs3/Emu/RSX/D3D12/D3D12GSRender.h @@ -106,8 +106,8 @@ private: resource_storage &get_non_current_resource_storage(); // Textures, constants, index and vertex buffers storage - data_heap m_buffer_data; - data_heap m_readback_resources; + d3d12_data_heap m_buffer_data; + d3d12_data_heap m_readback_resources; ComPtr m_vertex_buffer_data; rsx::render_targets m_rtts; diff --git a/rpcs3/Emu/RSX/D3D12/D3D12MemoryHelpers.h b/rpcs3/Emu/RSX/D3D12/D3D12MemoryHelpers.h index e8cb6c9dec..24290d5b91 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12MemoryHelpers.h +++ b/rpcs3/Emu/RSX/D3D12/D3D12MemoryHelpers.h @@ -1,58 +1,16 @@ #pragma once #include "D3D12Utils.h" #include "d3dx12.h" +#include "../Common/ring_buffer_helper.h" - -/** -* Wrapper around a ID3D12Resource or a ID3D12Heap. -* Acts as a ring buffer : hold a get and put pointers, -* put pointer is used as storage space offset -* and get is used as beginning of in use data space. -* This wrapper checks that put pointer doesn't cross get one. -*/ -class data_heap +struct d3d12_data_heap : public data_heap { - /** - * Does alloc cross get position ? - */ - template - bool can_alloc(size_t size) const - { - size_t alloc_size = align(size, Alignement); - size_t aligned_put_pos = align(m_put_pos, Alignement); - if (aligned_put_pos + alloc_size < m_size) - { - // range before get - if (aligned_put_pos + alloc_size < m_get_pos) - return true; - // range after get - if (aligned_put_pos > m_get_pos) - return true; - return false; - } - else - { - // ..]....[..get.. - if (aligned_put_pos < m_get_pos) - return false; - // ..get..]...[... - // Actually all resources extending beyond heap space starts at 0 - if (alloc_size > m_get_pos) - return false; - return true; - } - } - - size_t m_size; - size_t m_put_pos; // Start of free space ComPtr m_heap; public: - data_heap() = default; - ~data_heap() = default; - data_heap(const data_heap&) = delete; - data_heap(data_heap&&) = delete; - - size_t m_get_pos; // End of free space + d3d12_data_heap() = default; + ~d3d12_data_heap() = default; + d3d12_data_heap(const d3d12_data_heap&) = delete; + d3d12_data_heap(d3d12_data_heap&&) = delete; template void init(ID3D12Device *device, size_t heap_size, D3D12_HEAP_TYPE type, D3D12_RESOURCE_STATES state) @@ -72,24 +30,6 @@ public: ); } - template - size_t alloc(size_t size) - { - if (!can_alloc(size)) throw EXCEPTION("Working buffer not big enough"); - size_t alloc_size = align(size, Alignement); - size_t aligned_put_pos = align(m_put_pos, Alignement); - if (aligned_put_pos + alloc_size < m_size) - { - m_put_pos = aligned_put_pos + alloc_size; - return aligned_put_pos; - } - else - { - m_put_pos = alloc_size; - return 0; - } - } - template T* map(const D3D12_RANGE &range) { @@ -122,14 +62,6 @@ public: { return m_heap.Get(); } - - /** - * return current putpos - 1 - */ - size_t get_current_put_pos_minus_one() const - { - return (m_put_pos - 1 > 0) ? m_put_pos - 1 : m_size - 1; - } }; struct texture_entry diff --git a/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.cpp b/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.cpp index 36e271cf1e..f10cbb9bbe 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.cpp +++ b/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.cpp @@ -240,7 +240,7 @@ namespace size_t download_to_readback_buffer( ID3D12Device *device, ID3D12GraphicsCommandList * command_list, - data_heap &readback_heap, + d3d12_data_heap &readback_heap, ID3D12Resource * color_surface, rsx::surface_color_format color_surface_format ) @@ -262,7 +262,7 @@ namespace return heap_offset; } - void copy_readback_buffer_to_dest(void *dest, data_heap &readback_heap, size_t offset_in_heap, size_t dst_pitch, size_t src_pitch, size_t height) + void copy_readback_buffer_to_dest(void *dest, d3d12_data_heap &readback_heap, size_t offset_in_heap, size_t dst_pitch, size_t src_pitch, size_t height) { // TODO: Use exact range void *mapped_buffer = readback_heap.map(offset_in_heap); diff --git a/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.h b/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.h index 15c8db05c0..ca21547541 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.h +++ b/rpcs3/Emu/RSX/D3D12/D3D12RenderTargetSets.h @@ -130,7 +130,7 @@ struct render_target_traits std::tuple, HANDLE> issue_download_command( gsl::not_null rtt, surface_color_format color_format, size_t width, size_t height, - gsl::not_null device, gsl::not_null command_queue, data_heap &readback_heap, resource_storage &res_store + gsl::not_null device, gsl::not_null command_queue, d3d12_data_heap &readback_heap, resource_storage &res_store ) { ID3D12GraphicsCommandList* command_list = res_store.command_list.Get(); @@ -163,7 +163,7 @@ struct render_target_traits std::tuple, HANDLE> issue_depth_download_command( gsl::not_null ds, surface_depth_format depth_format, size_t width, size_t height, - gsl::not_null device, gsl::not_null command_queue, data_heap &readback_heap, resource_storage &res_store + gsl::not_null device, gsl::not_null command_queue, d3d12_data_heap &readback_heap, resource_storage &res_store ) { ID3D12GraphicsCommandList* command_list = res_store.command_list.Get(); @@ -196,7 +196,7 @@ struct render_target_traits std::tuple, HANDLE> issue_stencil_download_command( gsl::not_null stencil, size_t width, size_t height, - gsl::not_null device, gsl::not_null command_queue, data_heap &readback_heap, resource_storage &res_store + gsl::not_null device, gsl::not_null command_queue, d3d12_data_heap &readback_heap, resource_storage &res_store ) { ID3D12GraphicsCommandList* command_list = res_store.command_list.Get(); @@ -226,7 +226,7 @@ struct render_target_traits static gsl::span map_downloaded_buffer(const std::tuple, HANDLE> &sync_data, - gsl::not_null device, gsl::not_null command_queue, data_heap &readback_heap, resource_storage &res_store) + gsl::not_null device, gsl::not_null command_queue, d3d12_data_heap &readback_heap, resource_storage &res_store) { size_t offset; size_t buffer_size; @@ -243,7 +243,7 @@ struct render_target_traits static void unmap_downloaded_buffer(const std::tuple, HANDLE> &sync_data, - gsl::not_null device, gsl::not_null command_queue, data_heap &readback_heap, resource_storage &res_store) + gsl::not_null device, gsl::not_null command_queue, d3d12_data_heap &readback_heap, resource_storage &res_store) { readback_heap.unmap(); } diff --git a/rpcs3/Emu/RSX/D3D12/D3D12Texture.cpp b/rpcs3/Emu/RSX/D3D12/D3D12Texture.cpp index 160b2ee844..26ca992e45 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12Texture.cpp +++ b/rpcs3/Emu/RSX/D3D12/D3D12Texture.cpp @@ -69,7 +69,7 @@ namespace { void update_existing_texture( const rsx::texture &texture, ID3D12GraphicsCommandList *command_list, - data_heap &texture_buffer_heap, + d3d12_data_heap &texture_buffer_heap, ID3D12Resource *existing_texture) { size_t w = texture.width(), h = texture.height(); @@ -123,7 +123,7 @@ ComPtr upload_single_texture( const rsx::texture &texture, ID3D12Device *device, ID3D12GraphicsCommandList *command_list, - data_heap &texture_buffer_heap) + d3d12_data_heap &texture_buffer_heap) { ComPtr result; CHECK_HRESULT(device->CreateCommittedResource( diff --git a/rpcs3/Emu/RSX/GL/vertex_buffer.cpp b/rpcs3/Emu/RSX/GL/vertex_buffer.cpp index 2ee749981e..27bd00410e 100644 --- a/rpcs3/Emu/RSX/GL/vertex_buffer.cpp +++ b/rpcs3/Emu/RSX/GL/vertex_buffer.cpp @@ -188,15 +188,8 @@ void GLGSRender::set_vertex_buffer() vertex_draw_count = (u32)get_index_count(draw_mode, gsl::narrow(vertex_draw_count)); vertex_index_array.resize(vertex_draw_count * type_size); - switch (type) - { - case rsx::index_array_type::u32: - std::tie(min_index, max_index) = write_index_array_data_to_buffer(gsl::span((u32*)vertex_index_array.data(), vertex_draw_count), draw_mode, first_count_commands); - break; - case rsx::index_array_type::u16: - std::tie(min_index, max_index) = write_index_array_data_to_buffer(gsl::span((u16*)vertex_index_array.data(), vertex_draw_count), draw_mode, first_count_commands); - break; - } + gsl::span dst{ reinterpret_cast(vertex_index_array.data()), gsl::narrow(vertex_index_array.size()) }; + std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, type, draw_mode, first_count_commands); } if (draw_command == rsx::draw_command::inlined_array) @@ -326,7 +319,7 @@ void GLGSRender::set_vertex_buffer() for (const auto &first_count : first_count_commands) { - write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride); + write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.stride)); offset += first_count.second * element_size; } } @@ -336,7 +329,7 @@ void GLGSRender::set_vertex_buffer() gsl::span dest_span(vertex_array); prepare_buffer_for_writing(vertex_array.data(), vertex_info.type, vertex_info.size, vertex_draw_count); - write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride); + write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.stride)); } size_t size = vertex_array.size(); diff --git a/rpcs3/Emu/RSX/RSXThread.cpp b/rpcs3/Emu/RSX/RSXThread.cpp index 7def771356..f3e28cf1a2 100644 --- a/rpcs3/Emu/RSX/RSXThread.cpp +++ b/rpcs3/Emu/RSX/RSXThread.cpp @@ -308,18 +308,17 @@ namespace rsx draw_state.vertex_count += range.second; } draw_state.index_type = rsx::to_index_array_type(rsx::method_registers[NV4097_SET_INDEX_ARRAY_DMA] >> 4); + if (draw_state.index_type == rsx::index_array_type::u16) { draw_state.index.resize(2 * draw_state.vertex_count); - gsl::span dst = { (u16*)draw_state.index.data(), gsl::narrow(draw_state.vertex_count) }; - write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); } if (draw_state.index_type == rsx::index_array_type::u32) { draw_state.index.resize(4 * draw_state.vertex_count); - gsl::span dst = { (u16*)draw_state.index.data(), gsl::narrow(draw_state.vertex_count) }; - write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); } + gsl::span dst = { (gsl::byte*)draw_state.index.data(), gsl::narrow(draw_state.index.size()) }; + write_index_array_data_to_buffer(dst, draw_state.index_type, draw_mode, first_count_commands); } draw_state.programs = get_programs(); diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp index bd11a1db52..b3be3b0497 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp +++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp @@ -403,11 +403,11 @@ VKGSRender::VKGSRender() : GSRender(frame_type::Vulkan) #define RING_BUFFER_SIZE 16 * 1024 * 1024 m_uniform_buffer_ring_info.init(RING_BUFFER_SIZE); - m_uniform_buffer.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, 0)); + m_uniform_buffer_ring_info.heap.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, 0)); m_index_buffer_ring_info.init(RING_BUFFER_SIZE); - m_index_buffer.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, 0)); + m_index_buffer_ring_info.heap.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, 0)); m_texture_upload_buffer_ring_info.init(8 * RING_BUFFER_SIZE); - m_texture_upload_buffer.reset(new vk::buffer(*m_device, 8 * RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0)); + m_texture_upload_buffer_ring_info.heap.reset(new vk::buffer(*m_device, 8 * RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0)); m_render_passes = get_precomputed_render_passes(*m_device, m_optimal_tiling_supported_formats); @@ -451,10 +451,10 @@ VKGSRender::~VKGSRender() //TODO: Properly destroy shader modules instead of calling clear... m_prog_buffer.clear(); - m_index_buffer.release(); - m_uniform_buffer.release(); - m_attrib_buffers.release(); - m_texture_upload_buffer.release(); + m_index_buffer_ring_info.heap.release(); + m_uniform_buffer_ring_info.heap.release(); + m_attrib_ring_info.heap.release(); + m_texture_upload_buffer_ring_info.heap.release(); null_buffer.release(); null_buffer_view.release(); m_buffer_view_to_clean.clear(); @@ -562,7 +562,7 @@ void VKGSRender::end() m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL }, "tex" + std::to_string(i), descriptor_sets); continue; } - vk::image_view *texture0 = m_texture_cache.upload_texture(m_command_buffer, textures[i], m_rtts, m_memory_type_mapping, m_texture_upload_buffer_ring_info, m_texture_upload_buffer.get()); + vk::image_view *texture0 = m_texture_cache.upload_texture(m_command_buffer, textures[i], m_rtts, m_memory_type_mapping, m_texture_upload_buffer_ring_info, m_texture_upload_buffer_ring_info.heap.get()); VkFilter min_filter; VkSamplerMipmapMode mip_mode; @@ -603,7 +603,7 @@ void VKGSRender::end() VkDeviceSize offset; std::tie(std::ignore, std::ignore, index_count, offset, index_type) = upload_info; - vkCmdBindIndexBuffer(m_command_buffer, m_index_buffer->value, offset, index_type); + vkCmdBindIndexBuffer(m_command_buffer, m_index_buffer_ring_info.heap->value, offset, index_type); vkCmdDrawIndexed(m_command_buffer, index_count, 1, 0, 0, 0); } @@ -656,7 +656,7 @@ void VKGSRender::on_init_thread() { GSRender::on_init_thread(); m_attrib_ring_info.init(8 * RING_BUFFER_SIZE); - m_attrib_buffers.reset(new vk::buffer(*m_device, 8 * RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, 0)); + m_attrib_ring_info.heap.reset(new vk::buffer(*m_device, 8 * RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, 0)); } void VKGSRender::on_exit() @@ -893,7 +893,7 @@ bool VKGSRender::load_program() //3. Update fragment constants const size_t scale_offset_offset = m_uniform_buffer_ring_info.alloc<256>(256); - u8 *buf = (u8*)m_uniform_buffer->map(scale_offset_offset, 256); + u8 *buf = (u8*)m_uniform_buffer_ring_info.map(scale_offset_offset, 256); //TODO: Add case for this in RSXThread /** @@ -926,22 +926,22 @@ bool VKGSRender::load_program() memset((char*)buf+64, 0, 8); memcpy((char*)buf + 64, &rsx::method_registers[NV4097_SET_FOG_PARAMS], sizeof(float)); memcpy((char*)buf + 68, &rsx::method_registers[NV4097_SET_FOG_PARAMS + 1], sizeof(float)); - m_uniform_buffer->unmap(); + m_uniform_buffer_ring_info.unmap(); const size_t vertex_constants_offset = m_uniform_buffer_ring_info.alloc<256>(512 * 4 * sizeof(float)); - buf = (u8*)m_uniform_buffer->map(vertex_constants_offset, 512 * 4 * sizeof(float)); + buf = (u8*)m_uniform_buffer_ring_info.map(vertex_constants_offset, 512 * 4 * sizeof(float)); fill_vertex_program_constants_data(buf); - m_uniform_buffer->unmap(); + m_uniform_buffer_ring_info.unmap(); const size_t fragment_constants_sz = m_prog_buffer.get_fragment_constants_buffer_size(fragment_program); const size_t fragment_constants_offset = m_uniform_buffer_ring_info.alloc<256>(fragment_constants_sz); - buf = (u8*)m_uniform_buffer->map(fragment_constants_offset, fragment_constants_sz); + buf = (u8*)m_uniform_buffer_ring_info.map(fragment_constants_offset, fragment_constants_sz); m_prog_buffer.fill_fragment_constans_buffer({ reinterpret_cast(buf), gsl::narrow(fragment_constants_sz) }, fragment_program); - m_uniform_buffer->unmap(); + m_uniform_buffer_ring_info.unmap(); - m_program->bind_uniform({ m_uniform_buffer->value, scale_offset_offset, 256 }, SCALE_OFFSET_BIND_SLOT, descriptor_sets); - m_program->bind_uniform({ m_uniform_buffer->value, vertex_constants_offset, 512 * 4 * sizeof(float) }, VERTEX_CONSTANT_BUFFERS_BIND_SLOT, descriptor_sets); - m_program->bind_uniform({ m_uniform_buffer->value, fragment_constants_offset, fragment_constants_sz }, FRAGMENT_CONSTANT_BUFFERS_BIND_SLOT, descriptor_sets); + m_program->bind_uniform({ m_uniform_buffer_ring_info.heap->value, scale_offset_offset, 256 }, SCALE_OFFSET_BIND_SLOT, descriptor_sets); + m_program->bind_uniform({ m_uniform_buffer_ring_info.heap->value, vertex_constants_offset, 512 * 4 * sizeof(float) }, VERTEX_CONSTANT_BUFFERS_BIND_SLOT, descriptor_sets); + m_program->bind_uniform({ m_uniform_buffer_ring_info.heap->value, fragment_constants_offset, fragment_constants_sz }, FRAGMENT_CONSTANT_BUFFERS_BIND_SLOT, descriptor_sets); return true; } diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.h b/rpcs3/Emu/RSX/VK/VKGSRender.h index 9009da69c7..7c2142b54a 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.h +++ b/rpcs3/Emu/RSX/VK/VKGSRender.h @@ -23,8 +23,7 @@ private: rsx::surface_info m_surface; - vk::data_heap m_attrib_ring_info; - std::unique_ptr m_attrib_buffers; + vk::vk_data_heap m_attrib_ring_info; vk::texture_cache m_texture_cache; rsx::vk_render_targets m_rtts; @@ -45,12 +44,9 @@ private: vk::swap_chain* m_swap_chain; //buffer - vk::data_heap m_uniform_buffer_ring_info; - std::unique_ptr m_uniform_buffer; - vk::data_heap m_index_buffer_ring_info; - std::unique_ptr m_index_buffer; - vk::data_heap m_texture_upload_buffer_ring_info; - std::unique_ptr m_texture_upload_buffer; + vk::vk_data_heap m_uniform_buffer_ring_info; + vk::vk_data_heap m_index_buffer_ring_info; + vk::vk_data_heap m_texture_upload_buffer_ring_info; //Vulkan internals u32 m_current_present_image = 0xFFFF; diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.h b/rpcs3/Emu/RSX/VK/VKHelpers.h index 756e73df95..53106de926 100644 --- a/rpcs3/Emu/RSX/VK/VKHelpers.h +++ b/rpcs3/Emu/RSX/VK/VKHelpers.h @@ -15,6 +15,7 @@ #include "VulkanAPI.h" #include "../GCM.h" #include "../Common/TextureUtils.h" +#include "../Common/ring_buffer_helper.h" namespace rsx { @@ -1308,87 +1309,21 @@ namespace vk }; } - - - // TODO: factorize between backends - class data_heap + struct vk_data_heap : public data_heap { - /** - * Does alloc cross get position ? - */ - template - bool can_alloc(size_t size) const + std::unique_ptr heap; + + void* map(size_t offset, size_t size) { - size_t alloc_size = align(size, Alignement); - size_t aligned_put_pos = align(m_put_pos, Alignement); - if (aligned_put_pos + alloc_size < m_size) - { - // range before get - if (aligned_put_pos + alloc_size < m_get_pos) - return true; - // range after get - if (aligned_put_pos > m_get_pos) - return true; - return false; - } - else - { - // ..]....[..get.. - if (aligned_put_pos < m_get_pos) - return false; - // ..get..]...[... - // Actually all resources extending beyond heap space starts at 0 - if (alloc_size > m_get_pos) - return false; - return true; - } + return heap->map(offset, size); } - size_t m_size; - size_t m_put_pos; // Start of free space - public: - data_heap() = default; - ~data_heap() = default; - data_heap(const data_heap&) = delete; - data_heap(data_heap&&) = delete; - - size_t m_get_pos; // End of free space - - void init(size_t heap_size) + void unmap() { - m_size = heap_size; - m_put_pos = 0; - m_get_pos = heap_size - 1; - } - - template - size_t alloc(size_t size) - { - if (!can_alloc(size)) throw EXCEPTION("Working buffer not big enough"); - size_t alloc_size = align(size, Alignement); - size_t aligned_put_pos = align(m_put_pos, Alignement); - if (aligned_put_pos + alloc_size < m_size) - { - m_put_pos = aligned_put_pos + alloc_size; - return aligned_put_pos; - } - else - { - m_put_pos = alloc_size; - return 0; - } - } - - /** - * return current putpos - 1 - */ - size_t get_current_put_pos_minus_one() const - { - return (m_put_pos - 1 > 0) ? m_put_pos - 1 : m_size - 1; + heap->unmap(); } }; - /** * Allocate enough space in upload_buffer and write all mipmap/layer data into the subbuffer. * Then copy all layers into dst_image. @@ -1396,5 +1331,5 @@ namespace vk */ void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, VkImage dst_image, const std::vector subresource_layout, int format, bool is_swizzled, u16 mipmap_count, - vk::data_heap &upload_heap, vk::buffer* upload_buffer); + vk::vk_data_heap &upload_heap, vk::buffer* upload_buffer); } \ No newline at end of file diff --git a/rpcs3/Emu/RSX/VK/VKTexture.cpp b/rpcs3/Emu/RSX/VK/VKTexture.cpp index aefd3b643f..a97a336d80 100644 --- a/rpcs3/Emu/RSX/VK/VKTexture.cpp +++ b/rpcs3/Emu/RSX/VK/VKTexture.cpp @@ -130,7 +130,7 @@ namespace vk void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, VkImage dst_image, const std::vector subresource_layout, int format, bool is_swizzled, u16 mipmap_count, - vk::data_heap &upload_heap, vk::buffer* upload_buffer) + vk::vk_data_heap &upload_heap, vk::buffer* upload_buffer) { u32 mipmap_level = 0; u32 block_in_pixel = get_format_block_size_in_texel(format); diff --git a/rpcs3/Emu/RSX/VK/VKTextureCache.h b/rpcs3/Emu/RSX/VK/VKTextureCache.h index cdcb890639..6bac1c1db4 100644 --- a/rpcs3/Emu/RSX/VK/VKTextureCache.h +++ b/rpcs3/Emu/RSX/VK/VKTextureCache.h @@ -149,7 +149,7 @@ namespace vk m_cache.resize(0); } - vk::image_view* upload_texture(command_buffer cmd, rsx::texture &tex, rsx::vk_render_targets &m_rtts, const vk::memory_type_mapping &memory_type_mapping, data_heap& upload_heap, vk::buffer* upload_buffer) + vk::image_view* upload_texture(command_buffer cmd, rsx::texture &tex, rsx::vk_render_targets &m_rtts, const vk::memory_type_mapping &memory_type_mapping, vk_data_heap& upload_heap, vk::buffer* upload_buffer) { const u32 texaddr = rsx::get_address(tex.offset(), tex.location()); const u32 range = (u32)get_texture_size(tex); diff --git a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp index 20968cceb8..6adff8ac03 100644 --- a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp +++ b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp @@ -218,18 +218,20 @@ namespace vk namespace { - size_t alloc_and_copy(vk::buffer* buffer, vk::data_heap& data_heap_info, size_t size, std::function ptr)> copy_function) + struct data_heap_alloc { - size_t offset = data_heap_info.alloc<256>(size); - void* buf = buffer->map(offset, size); - gsl::span mapped_span = {reinterpret_cast(buf), gsl::narrow(size) }; - copy_function(mapped_span); - buffer->unmap(); - return offset; - } + static size_t alloc_and_copy(vk::vk_data_heap& data_heap_info, size_t size, std::function ptr)> copy_function) + { + size_t offset = data_heap_info.alloc<256>(size); + void* buf = data_heap_info.map(offset, size); + gsl::span mapped_span = { reinterpret_cast(buf), gsl::narrow(size) }; + copy_function(mapped_span); + data_heap_info.unmap(); + return offset; + } + }; } - std::tuple VKGSRender::upload_vertex_data() { @@ -341,9 +343,9 @@ VKGSRender::upload_vertex_data() throw EXCEPTION("Unknown base type %d", vertex_info.type); } - size_t offset_in_attrib_buffer = alloc_and_copy(m_attrib_buffers.get(), m_attrib_ring_info, data_size, [&vertex_arrays_data, data_size](gsl::span ptr) { memcpy(ptr.data(), vertex_arrays_data.data(), data_size); }); + size_t offset_in_attrib_buffer = data_heap_alloc::alloc_and_copy(m_attrib_ring_info, data_size, [&vertex_arrays_data, data_size](gsl::span ptr) { memcpy(ptr.data(), vertex_arrays_data.data(), data_size); }); - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_buffers->value, format, offset_in_attrib_buffer, data_size)); + m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size)); m_program->bind_uniform(m_buffer_view_to_clean.back()->value, reg_table[index], descriptor_sets); } } @@ -396,7 +398,7 @@ VKGSRender::upload_vertex_data() for (const auto &first_count : first_count_commands) { - write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride); + write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, element_size); offset += first_count.second * element_size; } } @@ -407,7 +409,7 @@ VKGSRender::upload_vertex_data() gsl::span dest_span(vertex_array); vk::prepare_buffer_for_writing(vertex_array.data(), vertex_info.type, vertex_info.size, vertex_draw_count); - write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride); + write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, element_size); } std::vector converted_buffer; @@ -428,8 +430,8 @@ VKGSRender::upload_vertex_data() const VkFormat format = vk::get_suitable_vk_format(vertex_info.type, vertex_info.size); const u32 data_size = vk::get_suitable_vk_size(vertex_info.type, vertex_info.size) * num_stored_verts; - size_t offset_in_attrib_buffer = alloc_and_copy(m_attrib_buffers.get(), m_attrib_ring_info, data_size, [data_ptr, data_size](gsl::span ptr) { memcpy(ptr.data(), data_ptr, data_size); }); - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_buffers->value, format, offset_in_attrib_buffer, data_size)); + size_t offset_in_attrib_buffer = data_heap_alloc::alloc_and_copy(m_attrib_ring_info, data_size, [data_ptr, data_size](gsl::span ptr) { memcpy(ptr.data(), data_ptr, data_size); }); + m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size)); m_program->bind_uniform(m_buffer_view_to_clean.back()->value, reg_table[index], descriptor_sets); } else if (register_vertex_info[index].size > 0) @@ -465,8 +467,8 @@ VKGSRender::upload_vertex_data() } - size_t offset_in_attrib_buffer = alloc_and_copy(m_attrib_buffers.get(), m_attrib_ring_info, data_size, [data_ptr, data_size](gsl::span ptr) { memcpy(ptr.data(), data_ptr, data_size); }); - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_buffers->value, format, offset_in_attrib_buffer, data_size)); + size_t offset_in_attrib_buffer = data_heap_alloc::alloc_and_copy(m_attrib_ring_info, data_size, [data_ptr, data_size](gsl::span ptr) { memcpy(ptr.data(), data_ptr, data_size); }); + m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size)); m_program->bind_uniform(m_buffer_view_to_clean.back()->value, reg_table[index], descriptor_sets); break; } @@ -500,9 +502,9 @@ VKGSRender::upload_vertex_data() index_count = vk::expand_line_loop_array_to_strip(vertex_draw_count, indices); size_t upload_size = index_count * sizeof(u16); offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size); - void* buf = m_index_buffer->map(offset_in_index_buffer, upload_size); + void* buf = m_index_buffer_ring_info.heap->map(offset_in_index_buffer, upload_size); memcpy(buf, indices.data(), upload_size); - m_index_buffer->unmap(); + m_index_buffer_ring_info.heap->unmap(); } else { @@ -515,18 +517,18 @@ VKGSRender::upload_vertex_data() index_count = vk::expand_indexed_line_loop_to_strip(vertex_draw_count, (u32*)vertex_index_array.data(), indices32); size_t upload_size = index_count * sizeof(u32); offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size); - void* buf = m_index_buffer->map(offset_in_index_buffer, upload_size); + void* buf = m_index_buffer_ring_info.heap->map(offset_in_index_buffer, upload_size); memcpy(buf, indices32.data(), upload_size); - m_index_buffer->unmap(); + m_index_buffer_ring_info.heap->unmap(); } else { index_count = vk::expand_indexed_line_loop_to_strip(vertex_draw_count, (u16*)vertex_index_array.data(), indices); size_t upload_size = index_count * sizeof(u16); offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size); - void* buf = m_index_buffer->map(offset_in_index_buffer, upload_size); + void* buf = m_index_buffer_ring_info.heap->map(offset_in_index_buffer, upload_size); memcpy(buf, indices.data(), upload_size); - m_index_buffer->unmap(); + m_index_buffer_ring_info.heap->unmap(); } } } @@ -543,8 +545,8 @@ VKGSRender::upload_vertex_data() std::vector> ranges; ranges.push_back(std::pair(0, vertex_draw_count)); - gsl::span dst = { (u16*)indices.data(), gsl::narrow(index_count) }; - write_index_array_data_to_buffer(dst, draw_mode, ranges); + gsl::span dst = { (gsl::byte*)indices.data(), gsl::narrow(index_count * 2) }; + write_index_array_data_to_buffer(dst, rsx::index_array_type::u16, draw_mode, ranges); } else { @@ -553,9 +555,9 @@ VKGSRender::upload_vertex_data() size_t upload_size = index_count * sizeof(u16); offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size); - void* buf = m_index_buffer->map(offset_in_index_buffer, upload_size); + void* buf = m_index_buffer_ring_info.heap->map(offset_in_index_buffer, upload_size); memcpy(buf, indices.data(), upload_size); - m_index_buffer->unmap(); + m_index_buffer_ring_info.heap->unmap(); } is_indexed_draw = true; @@ -582,9 +584,9 @@ VKGSRender::upload_vertex_data() size_t upload_size = vertex_index_array.size(); offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size); - void* buf = m_index_buffer->map(offset_in_index_buffer, upload_size); + void* buf = m_index_buffer_ring_info.heap->map(offset_in_index_buffer, upload_size); memcpy(buf, vertex_index_array.data(), upload_size); - m_index_buffer->unmap(); + m_index_buffer_ring_info.heap->unmap(); } return std::make_tuple(prims, is_indexed_draw, index_count, offset_in_index_buffer, index_format); diff --git a/rpcs3/emucore.vcxproj b/rpcs3/emucore.vcxproj index 9d8a1f3e02..1b29045549 100644 --- a/rpcs3/emucore.vcxproj +++ b/rpcs3/emucore.vcxproj @@ -550,6 +550,7 @@ + diff --git a/rpcs3/emucore.vcxproj.filters b/rpcs3/emucore.vcxproj.filters index 50df4d3a0b..a8b1e1e296 100644 --- a/rpcs3/emucore.vcxproj.filters +++ b/rpcs3/emucore.vcxproj.filters @@ -1776,10 +1776,13 @@ Emu\GPU\RSX - + + Emu\GPU\RSX\Common + + Header Files - + Emu\GPU\RSX\Common