Merge pull request #1772 from kd-11/gl

opengl performance fixes and improvements
This commit is contained in:
Ivan 2016-06-15 02:11:18 +03:00 committed by GitHub
commit a66147705f
8 changed files with 324 additions and 162 deletions

View File

@ -7,6 +7,7 @@
#include "../Common/BufferUtils.h" #include "../Common/BufferUtils.h"
extern cfg::bool_entry g_cfg_rsx_debug_output; extern cfg::bool_entry g_cfg_rsx_debug_output;
extern cfg::bool_entry g_cfg_rsx_overlay;
#define DUMP_VERTEX_DATA 0 #define DUMP_VERTEX_DATA 0
@ -70,6 +71,8 @@ void GLGSRender::begin()
init_buffers(); init_buffers();
std::chrono::time_point<std::chrono::system_clock> then = std::chrono::system_clock::now();
u32 color_mask = rsx::method_registers[NV4097_SET_COLOR_MASK]; u32 color_mask = rsx::method_registers[NV4097_SET_COLOR_MASK];
bool color_mask_b = !!(color_mask & 0xff); bool color_mask_b = !!(color_mask & 0xff);
bool color_mask_g = !!((color_mask >> 8) & 0xff); bool color_mask_g = !!((color_mask >> 8) & 0xff);
@ -241,6 +244,10 @@ void GLGSRender::begin()
{ {
__glcheck glPrimitiveRestartIndex(rsx::method_registers[NV4097_SET_RESTART_INDEX]); __glcheck glPrimitiveRestartIndex(rsx::method_registers[NV4097_SET_RESTART_INDEX]);
} }
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
m_begin_time += std::chrono::duration_cast<std::chrono::microseconds>(now - then).count();
m_draw_calls++;
} }
namespace namespace
@ -266,8 +273,6 @@ void GLGSRender::end()
return; return;
} }
//LOG_NOTICE(Log::RSX, "draw()");
draw_fbo.bind(); draw_fbo.bind();
m_program->use(); m_program->use();
@ -292,33 +297,35 @@ void GLGSRender::end()
} }
} }
set_vertex_buffer(); u32 offset_in_index_buffer = set_vertex_buffer();
m_vao.bind();
/** std::chrono::time_point<std::chrono::system_clock> then = std::chrono::system_clock::now();
* Validate fails if called right after linking a program because the VS and FS both use textures bound using different
* samplers. So far only sampler2D has been largely used, hiding the problem. This call shall also degrade performance further if (g_cfg_rsx_debug_output)
* if used every draw call. Fixes shader validation issues on AMD. m_program->validate();
*/
m_program->validate();
if (draw_command == rsx::draw_command::indexed) if (draw_command == rsx::draw_command::indexed)
{ {
rsx::index_array_type indexed_type = rsx::to_index_array_type(rsx::method_registers[NV4097_SET_INDEX_ARRAY_DMA] >> 4); rsx::index_array_type indexed_type = rsx::to_index_array_type(rsx::method_registers[NV4097_SET_INDEX_ARRAY_DMA] >> 4);
if (indexed_type == rsx::index_array_type::u32) if (indexed_type == rsx::index_array_type::u32)
__glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_INT, nullptr); __glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_INT, (GLvoid *)(offset_in_index_buffer));
if (indexed_type == rsx::index_array_type::u16) if (indexed_type == rsx::index_array_type::u16)
__glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_SHORT, nullptr); __glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_SHORT, (GLvoid *)(offset_in_index_buffer));
} }
else if (!is_primitive_native(draw_mode)) else if (!is_primitive_native(draw_mode))
{ {
__glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_SHORT, nullptr); __glcheck glDrawElements(gl::draw_mode(draw_mode), vertex_draw_count, GL_UNSIGNED_SHORT, (GLvoid *)(offset_in_index_buffer));
} }
else else
{ {
draw_fbo.draw_arrays(draw_mode, vertex_draw_count); draw_fbo.draw_arrays(draw_mode, vertex_draw_count);
} }
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
m_draw_time += std::chrono::duration_cast<std::chrono::microseconds>(now - then).count();
write_buffers(); write_buffers();
rsx::thread::end(); rsx::thread::end();
@ -374,32 +381,20 @@ void GLGSRender::on_init_thread()
LOG_NOTICE(RSX, "%s", (const char*)glGetString(GL_VENDOR)); LOG_NOTICE(RSX, "%s", (const char*)glGetString(GL_VENDOR));
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE); glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glGetIntegerv(GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT, &m_min_texbuffer_alignment);
m_vao.create(); m_vao.create();
m_vbo.create();
m_ebo.create();
m_scale_offset_buffer.create(32 * sizeof(float));
m_vertex_constants_buffer.create(512 * 4 * sizeof(float));
m_fragment_constants_buffer.create();
glBindBufferBase(GL_UNIFORM_BUFFER, 0, m_scale_offset_buffer.id()); for (gl::texture &tex : m_gl_attrib_buffers)
glBindBufferBase(GL_UNIFORM_BUFFER, 1, m_vertex_constants_buffer.id());
glBindBufferBase(GL_UNIFORM_BUFFER, 2, m_fragment_constants_buffer.id());
m_vao.array_buffer = m_vbo;
m_vao.element_array_buffer = m_ebo;
for (texture_buffer_pair &attrib_buffer : m_gl_attrib_buffers)
{ {
gl::texture *&tex = attrib_buffer.texture; tex.create();
tex = new gl::texture(gl::texture::target::textureBuffer); tex.set_target(gl::texture::target::textureBuffer);
tex->create();
tex->set_target(gl::texture::target::textureBuffer);
gl::buffer *&buf = attrib_buffer.buffer;
buf = new gl::buffer();
buf->create();
} }
m_attrib_ring_buffer.reset(new gl::ring_buffer(16 * 0x100000, gl::buffer::target::texture));
m_uniform_ring_buffer.reset(new gl::ring_buffer(16 * 0x100000, gl::buffer::target::uniform));
m_index_ring_buffer.reset(new gl::ring_buffer(0x100000, gl::buffer::target::element_array));
m_vao.element_array_buffer = m_index_ring_buffer->get_buffer();
m_gl_texture_cache.initialize_rtt_cache(); m_gl_texture_cache.initialize_rtt_cache();
} }
@ -418,36 +413,17 @@ void GLGSRender::on_exit()
if (m_flip_tex_color) if (m_flip_tex_color)
m_flip_tex_color.remove(); m_flip_tex_color.remove();
if (m_vbo)
m_vbo.remove();
if (m_ebo)
m_ebo.remove();
if (m_vao) if (m_vao)
m_vao.remove(); m_vao.remove();
if (m_scale_offset_buffer) for (gl::texture &tex : m_gl_attrib_buffers)
m_scale_offset_buffer.remove();
if (m_vertex_constants_buffer)
m_vertex_constants_buffer.remove();
if (m_fragment_constants_buffer)
m_fragment_constants_buffer.remove();
for (texture_buffer_pair &attrib_buffer : m_gl_attrib_buffers)
{ {
gl::texture *&tex = attrib_buffer.texture; tex.remove();
tex->remove();
delete tex;
tex = nullptr;
gl::buffer *&buf = attrib_buffer.buffer;
buf->remove();
delete buf;
buf = nullptr;
} }
m_attrib_ring_buffer->destroy();
m_uniform_ring_buffer->destroy();
m_index_ring_buffer->destroy();
} }
void nv4097_clear_surface(u32 arg, GLGSRender* renderer) void nv4097_clear_surface(u32 arg, GLGSRender* renderer)
@ -580,32 +556,47 @@ bool GLGSRender::load_program()
(m_program.recreate() += { fp.compile(), vp.compile() }).make(); (m_program.recreate() += { fp.compile(), vp.compile() }).make();
#endif #endif
size_t max_buffer_sz =(size_t) m_vertex_constants_buffer.size(); u32 fragment_constants_sz = m_prog_buffer.get_fragment_constants_buffer_size(fragment_program);
size_t fragment_constants_sz = m_prog_buffer.get_fragment_constants_buffer_size(fragment_program); fragment_constants_sz = std::max(32U, fragment_constants_sz);
if (fragment_constants_sz > max_buffer_sz) u32 max_buffer_sz = 8192 + 512 + fragment_constants_sz;
max_buffer_sz = fragment_constants_sz;
u32 is_alpha_tested = !!(rsx::method_registers[NV4097_SET_ALPHA_TEST_ENABLE]); u32 is_alpha_tested = !!(rsx::method_registers[NV4097_SET_ALPHA_TEST_ENABLE]);
u8 alpha_ref_raw = (u8)(rsx::method_registers[NV4097_SET_ALPHA_REF] & 0xFF); u8 alpha_ref_raw = (u8)(rsx::method_registers[NV4097_SET_ALPHA_REF] & 0xFF);
float alpha_ref = alpha_ref_raw / 255.f; float alpha_ref = alpha_ref_raw / 255.f;
std::vector<u8> client_side_buf(max_buffer_sz); u8 *buf;
u32 scale_offset_offset;
u32 vertex_constants_offset;
u32 fragment_constants_offset;
fill_scale_offset_data(client_side_buf.data(), false); m_uniform_ring_buffer->reserve_and_map(max_buffer_sz);
memcpy(client_side_buf.data() + 16 * sizeof(float), &rsx::method_registers[NV4097_SET_FOG_PARAMS], sizeof(float)); auto mapping = m_uniform_ring_buffer->alloc_from_reserve(512);
memcpy(client_side_buf.data() + 17 * sizeof(float), &rsx::method_registers[NV4097_SET_FOG_PARAMS + 1], sizeof(float)); buf = static_cast<u8*>(mapping.first);
memcpy(client_side_buf.data() + 18 * sizeof(float), &is_alpha_tested, sizeof(u32)); scale_offset_offset = mapping.second;
memcpy(client_side_buf.data() + 19 * sizeof(float), &alpha_ref, sizeof(float));
m_scale_offset_buffer.data(m_scale_offset_buffer.size(), nullptr);
m_scale_offset_buffer.sub_data(0, m_scale_offset_buffer.size(), client_side_buf.data());
fill_vertex_program_constants_data(client_side_buf.data()); fill_scale_offset_data(buf, false);
m_vertex_constants_buffer.data(m_vertex_constants_buffer.size(), nullptr); memcpy(buf + 16 * sizeof(float), &rsx::method_registers[NV4097_SET_FOG_PARAMS], sizeof(float));
m_vertex_constants_buffer.sub_data(0, m_vertex_constants_buffer.size(), client_side_buf.data()); memcpy(buf + 17 * sizeof(float), &rsx::method_registers[NV4097_SET_FOG_PARAMS + 1], sizeof(float));
memcpy(buf + 18 * sizeof(float), &is_alpha_tested, sizeof(u32));
memcpy(buf + 19 * sizeof(float), &alpha_ref, sizeof(float));
m_prog_buffer.fill_fragment_constans_buffer({ reinterpret_cast<float*>(client_side_buf.data()), gsl::narrow<int>(fragment_constants_sz) }, fragment_program); mapping = m_uniform_ring_buffer->alloc_from_reserve(512 * 16);
m_fragment_constants_buffer.data(fragment_constants_sz, nullptr); buf = static_cast<u8*>(mapping.first);
m_fragment_constants_buffer.sub_data(0, fragment_constants_sz, client_side_buf.data()); vertex_constants_offset = mapping.second;
fill_vertex_program_constants_data(buf);
mapping = m_uniform_ring_buffer->alloc_from_reserve(fragment_constants_sz);
buf = static_cast<u8*>(mapping.first);
fragment_constants_offset = mapping.second;
m_prog_buffer.fill_fragment_constans_buffer({ reinterpret_cast<float*>(buf), gsl::narrow<int>(fragment_constants_sz) }, fragment_program);
m_uniform_ring_buffer->unmap();
glBindBufferRange(GL_UNIFORM_BUFFER, 0, m_uniform_ring_buffer->get_buffer().id(), scale_offset_offset, 512);
glBindBufferRange(GL_UNIFORM_BUFFER, 1, m_uniform_ring_buffer->get_buffer().id(), vertex_constants_offset, 512 * 16);
glBindBufferRange(GL_UNIFORM_BUFFER, 2, m_uniform_ring_buffer->get_buffer().id(), fragment_constants_offset, fragment_constants_sz);
return true; return true;
} }
@ -725,6 +716,26 @@ void GLGSRender::flip(int buffer)
m_frame->flip(m_context); m_frame->flip(m_context);
if (g_cfg_rsx_overlay)
{
//TODO: Display overlay in a cross-platform manner
//Core context throws wgl font functions out of the window as they use display lists
//Only show debug info if the user really requests it
if (g_cfg_rsx_debug_output)
{
std::string message =
"draw_calls: " + std::to_string(m_draw_calls) + ", " + "draw_call_setup: " + std::to_string(m_begin_time) + "us, " + "vertex_upload_time: " + std::to_string(m_vertex_upload_time) + "us, " + "draw_call_execution: " + std::to_string(m_draw_time) + "us";
LOG_ERROR(RSX, message.c_str());
}
}
m_draw_calls = 0;
m_begin_time = 0;
m_draw_time = 0;
m_vertex_upload_time = 0;
for (auto &tex : m_rtts.invalidated_resources) for (auto &tex : m_rtts.invalidated_resources)
{ {
tex->remove(); tex->remove();

View File

@ -25,15 +25,20 @@ private:
rsx::surface_info m_surface; rsx::surface_info m_surface;
gl_render_targets m_rtts; gl_render_targets m_rtts;
struct texture_buffer_pair
{
gl::texture *texture;
gl::buffer *buffer;
}
m_gl_attrib_buffers[rsx::limits::vertex_count];
gl::gl_texture_cache m_gl_texture_cache; gl::gl_texture_cache m_gl_texture_cache;
gl::texture m_gl_attrib_buffers[rsx::limits::vertex_count];
std::unique_ptr<gl::ring_buffer> m_attrib_ring_buffer;
std::unique_ptr<gl::ring_buffer> m_uniform_ring_buffer;
std::unique_ptr<gl::ring_buffer> m_index_ring_buffer;
u32 m_draw_calls = 0;
u32 m_begin_time = 0;
u32 m_draw_time = 0;
u32 m_vertex_upload_time = 0;
GLint m_min_texbuffer_alignment = 256;
public: public:
gl::fbo draw_fbo; gl::fbo draw_fbo;
@ -44,12 +49,7 @@ private:
gl::fbo m_flip_fbo; gl::fbo m_flip_fbo;
gl::texture m_flip_tex_color; gl::texture m_flip_tex_color;
gl::buffer m_scale_offset_buffer; //vaos are mandatory for core profile
gl::buffer m_vertex_constants_buffer;
gl::buffer m_fragment_constants_buffer;
gl::buffer m_vbo;
gl::buffer m_ebo;
gl::vao m_vao; gl::vao m_vao;
public: public:
@ -58,7 +58,7 @@ public:
private: private:
static u32 enable(u32 enable, u32 cap); static u32 enable(u32 enable, u32 cap);
static u32 enable(u32 enable, u32 cap, u32 index); static u32 enable(u32 enable, u32 cap, u32 index);
void set_vertex_buffer(); u32 set_vertex_buffer();
public: public:
bool load_program(); bool load_program();

View File

@ -163,7 +163,8 @@ OPENGL_PROC(PFNGLGETINTEGER64VPROC, GetInteger64v);
OPENGL_PROC(PFNGLCHECKFRAMEBUFFERSTATUSPROC, CheckFramebufferStatus); OPENGL_PROC(PFNGLCHECKFRAMEBUFFERSTATUSPROC, CheckFramebufferStatus);
OPENGL_PROC(PFNGLMAPBUFFERRANGEPROC, MapBufferRange);
OPENGL_PROC(PFNGLBINDBUFFERRANGEPROC, BindBufferRange);
OPENGL_PROC(PFNGLBINDBUFFERBASEPROC, BindBufferBase); OPENGL_PROC(PFNGLBINDBUFFERBASEPROC, BindBufferBase);
//Texture Buffers //Texture Buffers

View File

@ -25,6 +25,12 @@ typedef BOOL (WINAPI* PFNWGLSWAPINTERVALEXTPROC) (int interval);
#include <GL/glxext.h> #include <GL/glxext.h>
#endif #endif
#ifndef GL_TEXTURE_BUFFER_BINDING
//During spec release, this enum was removed during upgrade from ARB equivalent
//See https://www.khronos.org/bugzilla/show_bug.cgi?id=844
#define GL_TEXTURE_BUFFER_BINDING 0x8C2A
#endif
namespace gl namespace gl
{ {
void init(); void init();

View File

@ -372,7 +372,9 @@ namespace gl
pixel_pack = GL_PIXEL_PACK_BUFFER, pixel_pack = GL_PIXEL_PACK_BUFFER,
pixel_unpack = GL_PIXEL_UNPACK_BUFFER, pixel_unpack = GL_PIXEL_UNPACK_BUFFER,
array = GL_ARRAY_BUFFER, array = GL_ARRAY_BUFFER,
element_array = GL_ELEMENT_ARRAY_BUFFER element_array = GL_ELEMENT_ARRAY_BUFFER,
uniform = GL_UNIFORM_BUFFER,
texture = GL_TEXTURE_BUFFER
}; };
enum class access enum class access
{ {
@ -421,6 +423,8 @@ namespace gl
case target::pixel_unpack: pname = GL_PIXEL_UNPACK_BUFFER_BINDING; break; case target::pixel_unpack: pname = GL_PIXEL_UNPACK_BUFFER_BINDING; break;
case target::array: pname = GL_ARRAY_BUFFER_BINDING; break; case target::array: pname = GL_ARRAY_BUFFER_BINDING; break;
case target::element_array: pname = GL_ELEMENT_ARRAY_BUFFER_BINDING; break; case target::element_array: pname = GL_ELEMENT_ARRAY_BUFFER_BINDING; break;
case target::uniform: pname = GL_UNIFORM_BUFFER_BINDING; break;
case target::texture: pname = GL_TEXTURE_BUFFER_BINDING; break;
} }
glGetIntegerv(pname, &m_last_binding); glGetIntegerv(pname, &m_last_binding);
@ -465,11 +469,18 @@ namespace gl
data(size, data_); data(size, data_);
} }
void create(target target_, GLsizeiptr size, const void* data_ = nullptr)
{
create();
m_target = target_;
data(size, data_);
}
void data(GLsizeiptr size, const void* data_ = nullptr) void data(GLsizeiptr size, const void* data_ = nullptr)
{ {
target target_ = current_target(); target target_ = current_target();
save_binding_state save(target_, *this); save_binding_state save(target_, *this);
glBufferData((GLenum)target_, size, data_, GL_STREAM_COPY); glBufferData((GLenum)target_, size, data_, GL_STREAM_DRAW);
m_size = size; m_size = size;
} }
@ -569,6 +580,106 @@ namespace gl
} }
}; };
class ring_buffer
{
buffer storage_buffer;
buffer::target m_target;
u32 m_data_loc = 0;
u32 m_size;
u32 m_mapped_block_size = 0;
u32 m_mapped_block_offset;
u32 m_mapped_reserve_offset;
u32 m_mapped_bytes_available;
void *m_mapped_base = nullptr;
public:
ring_buffer(u32 initial_size, buffer::target target)
{
storage_buffer.create();
storage_buffer.data(initial_size);
m_size = initial_size;
m_target = target;
}
void destroy()
{
storage_buffer.remove();
}
std::pair<void*, u32> alloc_and_map(u32 size)
{
size = (size + 255) & ~255;
glBindBuffer((GLenum)m_target, storage_buffer.id());
u32 limit = m_data_loc + size;
if (limit > m_size)
{
if (size > m_size)
m_size = size;
storage_buffer.data(m_size, nullptr);
m_data_loc = 0;
}
void *ptr = glMapBufferRange((GLenum)m_target, m_data_loc, size, GL_MAP_WRITE_BIT|GL_MAP_INVALIDATE_RANGE_BIT|GL_MAP_UNSYNCHRONIZED_BIT);
u32 offset = m_data_loc;
m_data_loc += size;
return std::make_pair(ptr, offset);
}
void unmap()
{
glUnmapBuffer((GLenum)m_target);
m_mapped_block_size = 0;
m_mapped_base = 0;
}
void reserve_and_map(u32 max_size)
{
max_size = (max_size + 4095) & ~4095;
auto mapping = alloc_and_map(max_size);
m_mapped_base = mapping.first;
m_mapped_block_offset = mapping.second;
m_mapped_reserve_offset = 0;
m_mapped_bytes_available = max_size;
}
std::pair<void*, u32> alloc_from_reserve(u32 size, u32 alignment = 16)
{
alignment -= 1;
size = (size + alignment) & ~alignment;
if (m_mapped_bytes_available < size || !m_mapped_base)
{
if (m_mapped_base)
{
//This doesn't really work for some reason, probably since the caller should bind the target
//before making this call as the block may be reallocated
LOG_ERROR(RSX, "reserved allocation exceeded. check for corruption!");
unmap();
}
reserve_and_map((size > 4096) ? size : 4096);
}
EXPECTS(m_mapped_bytes_available >= size);
void *ptr = (char*)m_mapped_base + m_mapped_reserve_offset;
u32 offset = m_mapped_reserve_offset + m_mapped_block_offset;
m_mapped_reserve_offset += size;
m_mapped_bytes_available -= size;
EXPECTS((offset & alignment) == 0);
return std::make_pair(ptr, offset);
}
buffer& get_buffer()
{
return storage_buffer;
}
};
class vao class vao
{ {
template<buffer::target BindId, uint GetStateId> template<buffer::target BindId, uint GetStateId>
@ -1140,11 +1251,11 @@ namespace gl
if (get_target() != target::textureBuffer) if (get_target() != target::textureBuffer)
throw EXCEPTION("OpenGL error: texture cannot copy from buffer"); throw EXCEPTION("OpenGL error: texture cannot copy from buffer");
if (!offset) /* if (!offset)
{ {
copy_from(buf, gl_format_type); copy_from(buf, gl_format_type);
return; return;
} }*/
if (glTextureBufferRangeEXT == nullptr) if (glTextureBufferRangeEXT == nullptr)
throw EXCEPTION("OpenGL error: partial buffer access for textures is unsupported on your system"); throw EXCEPTION("OpenGL error: partial buffer access for textures is unsupported on your system");

View File

@ -62,6 +62,8 @@ namespace gl
std::vector<gl_cached_texture> texture_cache; std::vector<gl_cached_texture> texture_cache;
std::vector<cached_rtt> rtt_cache; std::vector<cached_rtt> rtt_cache;
u32 frame_ctr; u32 frame_ctr;
std::pair<u64, u64> texture_cache_range = std::make_pair(0xFFFFFFFF, 0);
u32 max_tex_address = 0;
bool lock_memory_region(u32 start, u32 size) bool lock_memory_region(u32 start, u32 size)
{ {
@ -69,6 +71,12 @@ namespace gl
start = start & ~(memory_page_size - 1); start = start & ~(memory_page_size - 1);
size = (u32)align(size, memory_page_size); size = (u32)align(size, memory_page_size);
if (start < texture_cache_range.first)
texture_cache_range = std::make_pair(start, texture_cache_range.second);
if ((start+size) > texture_cache_range.second)
texture_cache_range = std::make_pair(texture_cache_range.first, (start+size));
return vm::page_protect(start, size, 0, 0, vm::page_writable); return vm::page_protect(start, size, 0, 0, vm::page_writable);
} }
@ -500,6 +508,10 @@ namespace gl
bool mark_as_dirty(u32 address) bool mark_as_dirty(u32 address)
{ {
if (address < texture_cache_range.first ||
address > texture_cache_range.second)
return false;
bool response = false; bool response = false;
for (gl_cached_texture &tex: texture_cache) for (gl_cached_texture &tex: texture_cache)

View File

@ -127,8 +127,8 @@ namespace
throw EXCEPTION("unknow vertex type"); throw EXCEPTION("unknow vertex type");
} }
// return vertex count and filled index array if primitive type is not native (empty array otherwise) // return vertex count if primitive type is not native (empty array otherwise)
std::tuple<u32, std::vector<u8>> get_index_array_for_emulated_non_indexed_draw(const std::vector<std::pair<u32, u32>> &first_count_commands, rsx::primitive_type primitive_mode) std::tuple<u32, u32> get_index_array_for_emulated_non_indexed_draw(const std::vector<std::pair<u32, u32>> &first_count_commands, rsx::primitive_type primitive_mode, gl::ring_buffer &dst)
{ {
u32 vertex_draw_count = 0; u32 vertex_draw_count = 0;
assert(!is_primitive_native(primitive_mode)); assert(!is_primitive_native(primitive_mode));
@ -138,9 +138,10 @@ namespace
vertex_draw_count += (u32)get_index_count(primitive_mode, pair.second); vertex_draw_count += (u32)get_index_count(primitive_mode, pair.second);
} }
std::vector<u8> vertex_index_array(vertex_draw_count * sizeof(u16));
u32 first = 0; u32 first = 0;
char* mapped_buffer = (char*)vertex_index_array.data(); auto mapping = dst.alloc_and_map(vertex_draw_count * sizeof(u16));
char *mapped_buffer = (char *)mapping.first;
for (const auto &pair : first_count_commands) for (const auto &pair : first_count_commands)
{ {
size_t element_count = get_index_count(primitive_mode, pair.second); size_t element_count = get_index_count(primitive_mode, pair.second);
@ -149,16 +150,17 @@ namespace
first += pair.second; first += pair.second;
} }
return std::make_tuple(vertex_draw_count, vertex_index_array); dst.unmap();
return std::make_tuple(vertex_draw_count, mapping.second);
} }
} }
void GLGSRender::set_vertex_buffer() u32 GLGSRender::set_vertex_buffer()
{ {
//initialize vertex attributes //initialize vertex attributes
//merge all vertex arrays //merge all vertex arrays
std::vector<u8> vertex_arrays_data;
std::chrono::time_point<std::chrono::system_clock> then = std::chrono::system_clock::now();
const std::string reg_table[] = const std::string reg_table[] =
{ {
@ -171,10 +173,22 @@ void GLGSRender::set_vertex_buffer()
}; };
u32 input_mask = rsx::method_registers[NV4097_SET_VERTEX_ATTRIB_INPUT_MASK]; u32 input_mask = rsx::method_registers[NV4097_SET_VERTEX_ATTRIB_INPUT_MASK];
u32 min_index = 0, max_index = 0;
u32 max_vertex_attrib_size = 0;
u32 offset_in_index_buffer = 0;
std::vector<u8> vertex_index_array;
vertex_draw_count = 0; vertex_draw_count = 0;
u32 min_index, max_index;
//place holder; replace with actual index buffer
gsl::span<gsl::byte> index_array;
for (u8 index = 0; index < rsx::limits::vertex_count; ++index)
{
if (vertex_arrays_info[index].size == 0)
continue;
max_vertex_attrib_size += 16;
}
if (draw_command == rsx::draw_command::indexed) if (draw_command == rsx::draw_command::indexed)
{ {
@ -184,12 +198,19 @@ void GLGSRender::set_vertex_buffer()
{ {
vertex_draw_count += first_count.second; vertex_draw_count += first_count.second;
} }
// Index count // Index count
vertex_draw_count = (u32)get_index_count(draw_mode, gsl::narrow<int>(vertex_draw_count)); vertex_draw_count = (u32)get_index_count(draw_mode, gsl::narrow<int>(vertex_draw_count));
vertex_index_array.resize(vertex_draw_count * type_size); u32 block_sz = vertex_draw_count * type_size;
gsl::span<gsl::byte> dst{ reinterpret_cast<gsl::byte*>(vertex_index_array.data()), gsl::narrow<u32>(vertex_index_array.size()) }; auto mapping = m_index_ring_buffer->alloc_and_map(block_sz);
void *ptr = mapping.first;
offset_in_index_buffer = mapping.second;
gsl::span<gsl::byte> dst{ reinterpret_cast<gsl::byte*>(ptr), gsl::narrow<u32>(block_sz) };
std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, type, draw_mode, first_count_commands); std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, type, draw_mode, first_count_commands);
m_index_ring_buffer->unmap();
} }
if (draw_command == rsx::draw_command::inlined_array) if (draw_command == rsx::draw_command::inlined_array)
@ -207,6 +228,7 @@ void GLGSRender::set_vertex_buffer()
} }
vertex_draw_count = (u32)(inline_vertex_array.size() * sizeof(u32)) / stride; vertex_draw_count = (u32)(inline_vertex_array.size() * sizeof(u32)) / stride;
m_attrib_ring_buffer->reserve_and_map(vertex_draw_count * max_vertex_attrib_size);
for (int index = 0; index < rsx::limits::vertex_count; ++index) for (int index = 0; index < rsx::limits::vertex_count; ++index)
{ {
@ -228,12 +250,11 @@ void GLGSRender::set_vertex_buffer()
u32 data_size = element_size * vertex_draw_count; u32 data_size = element_size * vertex_draw_count;
u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size); u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
auto &buffer = m_gl_attrib_buffers[index].buffer; auto &texture = m_gl_attrib_buffers[index];
auto &texture = m_gl_attrib_buffers[index].texture;
vertex_arrays_data.resize(data_size);
u8 *src = reinterpret_cast<u8*>(inline_vertex_array.data()); u8 *src = reinterpret_cast<u8*>(inline_vertex_array.data());
u8 *dst = vertex_arrays_data.data(); auto mapping = m_attrib_ring_buffer->alloc_from_reserve(data_size, m_min_texbuffer_alignment);
u8 *dst = static_cast<u8*>(mapping.first);
src += offsets[index]; src += offsets[index];
prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_draw_count); prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_draw_count);
@ -255,17 +276,13 @@ void GLGSRender::set_vertex_buffer()
dst += element_size; dst += element_size;
} }
buffer->data(data_size, nullptr); texture.copy_from(m_attrib_ring_buffer->get_buffer(), gl_type, mapping.second, data_size);
buffer->sub_data(0, data_size, vertex_arrays_data.data());
//Attach buffer to texture
texture->copy_from(*buffer, gl_type);
//Link texture to uniform //Link texture to uniform
m_program->uniforms.texture(location, index + rsx::limits::textures_count, *texture); m_program->uniforms.texture(location, index + rsx::limits::textures_count, texture);
if (!is_primitive_native(draw_mode)) if (!is_primitive_native(draw_mode))
{ {
std::tie(vertex_draw_count, vertex_index_array) = get_index_array_for_emulated_non_indexed_draw({ { 0, vertex_draw_count } }, draw_mode); std::tie(vertex_draw_count, offset_in_index_buffer) = get_index_array_for_emulated_non_indexed_draw({ { 0, vertex_draw_count } }, draw_mode, *m_index_ring_buffer);
} }
} }
} }
@ -280,6 +297,9 @@ void GLGSRender::set_vertex_buffer()
if (draw_command == rsx::draw_command::array || draw_command == rsx::draw_command::indexed) if (draw_command == rsx::draw_command::array || draw_command == rsx::draw_command::indexed)
{ {
u32 verts_allocated = std::max(vertex_draw_count, max_index + 1);
m_attrib_ring_buffer->reserve_and_map(verts_allocated * max_vertex_attrib_size);
for (int index = 0; index < rsx::limits::vertex_count; ++index) for (int index = 0; index < rsx::limits::vertex_count; ++index)
{ {
int location; int location;
@ -298,12 +318,16 @@ void GLGSRender::set_vertex_buffer()
if (vertex_arrays_info[index].size > 0) if (vertex_arrays_info[index].size > 0)
{ {
auto &vertex_info = vertex_arrays_info[index]; auto &vertex_info = vertex_arrays_info[index];
// Active vertex array
std::vector<gsl::byte> vertex_array;
// Fill vertex_array // Fill vertex_array
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size); u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size);
vertex_array.resize(vertex_draw_count * element_size); //vertex_array.resize(vertex_draw_count * element_size);
u32 data_size = vertex_draw_count * element_size;
u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
auto &texture = m_gl_attrib_buffers[index];
u32 buffer_offset = 0;
// Get source pointer // Get source pointer
u32 base_offset = rsx::method_registers[NV4097_SET_VERTEX_DATA_BASE_OFFSET]; u32 base_offset = rsx::method_registers[NV4097_SET_VERTEX_DATA_BASE_OFFSET];
@ -313,9 +337,13 @@ void GLGSRender::set_vertex_buffer()
if (draw_command == rsx::draw_command::array) if (draw_command == rsx::draw_command::array)
{ {
auto mapping = m_attrib_ring_buffer->alloc_from_reserve(data_size, m_min_texbuffer_alignment);
gsl::byte *dst = static_cast<gsl::byte*>(mapping.first);
buffer_offset = mapping.second;
size_t offset = 0; size_t offset = 0;
gsl::span<gsl::byte> dest_span(vertex_array); gsl::span<gsl::byte> dest_span(dst, data_size);
prepare_buffer_for_writing(vertex_array.data(), vertex_info.type, vertex_info.size, vertex_draw_count); prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_draw_count);
for (const auto &first_count : first_count_commands) for (const auto &first_count : first_count_commands)
{ {
@ -325,30 +353,21 @@ void GLGSRender::set_vertex_buffer()
} }
if (draw_command == rsx::draw_command::indexed) if (draw_command == rsx::draw_command::indexed)
{ {
vertex_array.resize((max_index + 1) * element_size); data_size = (max_index + 1) * element_size;
gsl::span<gsl::byte> dest_span(vertex_array); auto mapping = m_attrib_ring_buffer->alloc_from_reserve(data_size, m_min_texbuffer_alignment);
prepare_buffer_for_writing(vertex_array.data(), vertex_info.type, vertex_info.size, vertex_draw_count); gsl::byte *dst = static_cast<gsl::byte*>(mapping.first);
buffer_offset = mapping.second;
gsl::span<gsl::byte> dest_span(dst, data_size);
prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_draw_count);
write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size)); write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size));
} }
size_t size = vertex_array.size(); texture.copy_from(m_attrib_ring_buffer->get_buffer(), gl_type, buffer_offset, data_size);
size_t position = vertex_arrays_data.size();
vertex_arrays_data.resize(position + size);
u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
auto &buffer = m_gl_attrib_buffers[index].buffer;
auto &texture = m_gl_attrib_buffers[index].texture;
buffer->data(static_cast<u32>(size), nullptr);
buffer->sub_data(0, static_cast<u32>(size), vertex_array.data());
//Attach buffer to texture
texture->copy_from(*buffer, gl_type);
//Link texture to uniform //Link texture to uniform
m_program->uniforms.texture(location, index + rsx::limits::textures_count, *texture); m_program->uniforms.texture(location, index + rsx::limits::textures_count, texture);
} }
else if (register_vertex_info[index].size > 0) else if (register_vertex_info[index].size > 0)
{ {
@ -364,17 +383,16 @@ void GLGSRender::set_vertex_buffer()
const u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size); const u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
const size_t data_size = vertex_data.size(); const size_t data_size = vertex_data.size();
auto &buffer = m_gl_attrib_buffers[index].buffer; auto &texture = m_gl_attrib_buffers[index];
auto &texture = m_gl_attrib_buffers[index].texture;
buffer->data(data_size, nullptr); auto mapping = m_attrib_ring_buffer->alloc_from_reserve(data_size, m_min_texbuffer_alignment);
buffer->sub_data(0, data_size, vertex_data.data()); u8 *dst = static_cast<u8*>(mapping.first);
//Attach buffer to texture memcpy(dst, vertex_data.data(), data_size);
texture->copy_from(*buffer, gl_type); texture.copy_from(m_attrib_ring_buffer->get_buffer(), gl_type, mapping.second, data_size);
//Link texture to uniform //Link texture to uniform
m_program->uniforms.texture(location, index + rsx::limits::textures_count, *texture); m_program->uniforms.texture(location, index + rsx::limits::textures_count, texture);
break; break;
} }
default: default:
@ -390,23 +408,16 @@ void GLGSRender::set_vertex_buffer()
continue; continue;
} }
} }
if (draw_command == rsx::draw_command::array && !is_primitive_native(draw_mode)) if (draw_command == rsx::draw_command::array && !is_primitive_native(draw_mode))
{ {
std::tie(vertex_draw_count, vertex_index_array) = get_index_array_for_emulated_non_indexed_draw(first_count_commands, draw_mode); std::tie(vertex_draw_count, offset_in_index_buffer) = get_index_array_for_emulated_non_indexed_draw(first_count_commands, draw_mode, *m_index_ring_buffer);
} }
} }
// glDraw* will fail without at least attrib0 defined if we are on compatibility profile m_attrib_ring_buffer->unmap();
// Someone should really test AMD behaviour here, Nvidia is too permissive. There is no buffer currently bound, but on NV it works ok std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
glEnableVertexAttribArray(0); m_vertex_upload_time += std::chrono::duration_cast<std::chrono::microseconds>(now - then).count();
glVertexAttribPointer(0, 2, GL_FLOAT, false, 0, 0);
if (draw_command == rsx::draw_command::indexed) return offset_in_index_buffer;
{
m_ebo.data(vertex_index_array.size(), vertex_index_array.data());
}
else if (!is_primitive_native(draw_mode))
{
m_ebo.data(vertex_index_array.size(), vertex_index_array.data());
}
} }

View File

@ -30,7 +30,7 @@ namespace vk
{ {
private: private:
std::vector<cached_texture_object> m_cache; std::vector<cached_texture_object> m_cache;
std::pair<u64, u64> texture_cache_range = std::make_pair(0xFFFFFFFF, 0);
std::vector<std::unique_ptr<vk::image_view> > m_temporary_image_view; std::vector<std::unique_ptr<vk::image_view> > m_temporary_image_view;
bool lock_memory_region(u32 start, u32 size) bool lock_memory_region(u32 start, u32 size)
@ -120,6 +120,12 @@ namespace vk
obj.protected_rgn_end += obj.protected_rgn_start; obj.protected_rgn_end += obj.protected_rgn_start;
lock_memory_region(static_cast<u32>(obj.protected_rgn_start), static_cast<u32>(obj.native_rsx_size)); lock_memory_region(static_cast<u32>(obj.protected_rgn_start), static_cast<u32>(obj.native_rsx_size));
if (obj.protected_rgn_start < texture_cache_range.first)
texture_cache_range = std::make_pair(obj.protected_rgn_start, texture_cache_range.second);
if (obj.protected_rgn_end > texture_cache_range.second)
texture_cache_range = std::make_pair(texture_cache_range.first, obj.protected_rgn_end);
} }
void unlock_object(cached_texture_object &obj) void unlock_object(cached_texture_object &obj)
@ -254,6 +260,10 @@ namespace vk
bool invalidate_address(u32 rsx_address) bool invalidate_address(u32 rsx_address)
{ {
if (rsx_address < texture_cache_range.first ||
rsx_address > texture_cache_range.second)
return false;
for (cached_texture_object &tex : m_cache) for (cached_texture_object &tex : m_cache)
{ {
if (tex.dirty) continue; if (tex.dirty) continue;