diff --git a/src/util/opengl_device.cpp b/src/util/opengl_device.cpp index cdd2a2d1b..19d20f74f 100644 --- a/src/util/opengl_device.cpp +++ b/src/util/opengl_device.cpp @@ -282,20 +282,10 @@ bool OpenGLDevice::CreateDeviceAndMainSwapChain(std::string_view adapter, Featur return false; } -#if 0 - // Is this needed? - m_window_info = m_gl_context->GetWindowInfo(); - m_vsync_mode = ; -#endif - - const bool opengl_is_available = - ((!m_gl_context->IsGLES() && (GLAD_GL_VERSION_3_0 || GLAD_GL_ARB_uniform_buffer_object)) || - (m_gl_context->IsGLES() && GLAD_GL_ES_VERSION_3_1)); - if (!opengl_is_available) + // Context version restrictions are mostly fine here, but we still need to check for UBO for GL3.0. + if (!m_gl_context->IsGLES() && !GLAD_GL_ARB_uniform_buffer_object) { - Host::ReportErrorAsync(TRANSLATE_SV("GPUDevice", "Error"), - TRANSLATE_SV("GPUDevice", "OpenGL renderer unavailable, your driver or hardware is not " - "recent enough. OpenGL 3.1 or OpenGL ES 3.1 is required.")); + Error::SetStringView(error, "OpenGL 3.1 or GL_ARB_uniform_buffer_object is required."); m_gl_context.reset(); return false; } @@ -1043,17 +1033,6 @@ void OpenGLDevice::UnbindPipeline(const OpenGLPipeline* pl) } } -ALWAYS_INLINE_RELEASE void OpenGLDevice::SetVertexBufferOffsets(u32 base_vertex) -{ - const OpenGLPipeline::VertexArrayCacheKey& va = m_last_vao->first; - const size_t stride = va.vertex_attribute_stride; - for (u32 i = 0; i < va.num_vertex_attributes; i++) - { - glBindVertexBuffer(i, m_vertex_buffer->GetGLBufferId(), base_vertex * stride + va.vertex_attributes[i].offset, - static_cast(stride)); - } -} - void OpenGLDevice::Draw(u32 vertex_count, u32 base_vertex) { s_stats.num_draws++; diff --git a/src/util/opengl_pipeline.cpp b/src/util/opengl_pipeline.cpp index 0d9067e2f..de0ed3523 100644 --- a/src/util/opengl_pipeline.cpp +++ b/src/util/opengl_pipeline.cpp @@ -23,6 +23,8 @@ LOG_CHANNEL(GPUDevice); +namespace { + struct PipelineDiskCacheFooter { u32 version; @@ -43,6 +45,28 @@ struct PipelineDiskCacheIndexEntry }; static_assert(sizeof(PipelineDiskCacheIndexEntry) == 112); // No padding +struct VAMapping +{ + GLenum type; + GLboolean normalized; + GLboolean integer; +}; + +} // namespace + +static constexpr const std::array(GPUPipeline::VertexAttribute::Type::MaxCount)> + s_vao_format_mapping = {{ + {GL_FLOAT, GL_FALSE, GL_FALSE}, // Float + {GL_UNSIGNED_BYTE, GL_FALSE, GL_TRUE}, // UInt8 + {GL_BYTE, GL_FALSE, GL_TRUE}, // SInt8 + {GL_UNSIGNED_BYTE, GL_TRUE, GL_FALSE}, // UNorm8 + {GL_UNSIGNED_SHORT, GL_FALSE, GL_TRUE}, // UInt16 + {GL_SHORT, GL_FALSE, GL_TRUE}, // SInt16 + {GL_UNSIGNED_SHORT, GL_TRUE, GL_FALSE}, // UNorm16 + {GL_UNSIGNED_INT, GL_FALSE, GL_TRUE}, // UInt32 + {GL_INT, GL_FALSE, GL_TRUE}, // SInt32 + }}; + static GLenum GetGLShaderType(GPUShaderStage stage) { static constexpr std::array(GPUShaderStage::MaxCount)> mapping = {{ @@ -375,14 +399,25 @@ GLuint OpenGLDevice::CompileProgram(const GPUPipeline::GraphicsConfig& plconfig, } } - glBindFragDataLocation(program_id, 0, "o_col0"); + // Output colour is implicit in GLES. + const bool is_gles = m_gl_context->IsGLES(); + if (!is_gles) + glBindFragDataLocation(program_id, 0, "o_col0"); if (m_features.dual_source_blend) { if (GLAD_GL_VERSION_3_3 || GLAD_GL_ARB_blend_func_extended) + { + if (is_gles) + glBindFragDataLocationIndexed(program_id, 0, 0, "o_col0"); glBindFragDataLocationIndexed(program_id, 1, 0, "o_col1"); + } else if (GLAD_GL_EXT_blend_func_extended) + { + if (is_gles) + glBindFragDataLocationIndexedEXT(program_id, 0, 0, "o_col1"); glBindFragDataLocationIndexedEXT(program_id, 1, 0, "o_col1"); + } } } @@ -515,29 +550,10 @@ GLuint OpenGLDevice::CreateVAO(std::span att m_vertex_buffer->Bind(); m_index_buffer->Bind(); - struct VAMapping - { - GLenum type; - GLboolean normalized; - GLboolean integer; - }; - static constexpr const std::array(GPUPipeline::VertexAttribute::Type::MaxCount)> - format_mapping = {{ - {GL_FLOAT, GL_FALSE, GL_FALSE}, // Float - {GL_UNSIGNED_BYTE, GL_FALSE, GL_TRUE}, // UInt8 - {GL_BYTE, GL_FALSE, GL_TRUE}, // SInt8 - {GL_UNSIGNED_BYTE, GL_TRUE, GL_FALSE}, // UNorm8 - {GL_UNSIGNED_SHORT, GL_FALSE, GL_TRUE}, // UInt16 - {GL_SHORT, GL_FALSE, GL_TRUE}, // SInt16 - {GL_UNSIGNED_SHORT, GL_TRUE, GL_FALSE}, // UNorm16 - {GL_UNSIGNED_INT, GL_FALSE, GL_TRUE}, // UInt32 - {GL_INT, GL_FALSE, GL_TRUE}, // SInt32 - }}; - for (u32 i = 0; i < static_cast(attributes.size()); i++) { const GPUPipeline::VertexAttribute& va = attributes[i]; - const VAMapping& m = format_mapping[static_cast(va.type.GetValue())]; + const VAMapping& m = s_vao_format_mapping[static_cast(va.type.GetValue())]; const void* ptr = reinterpret_cast(static_cast(va.offset.GetValue())); glEnableVertexAttribArray(i); if (m.integer) @@ -552,6 +568,35 @@ GLuint OpenGLDevice::CreateVAO(std::span att return vao; } +void OpenGLDevice::SetVertexBufferOffsets(u32 base_vertex) +{ + const OpenGLPipeline::VertexArrayCacheKey& va = m_last_vao->first; + const u32 stride = va.vertex_attribute_stride; + const u32 base_vertex_start = base_vertex * stride; + + if (glBindVertexBuffer) [[likely]] + { + for (u32 i = 0; i < va.num_vertex_attributes; i++) + { + glBindVertexBuffer(i, m_vertex_buffer->GetGLBufferId(), base_vertex_start + va.vertex_attributes[i].offset, + static_cast(stride)); + } + } + else + { + for (u32 i = 0; i < va.num_vertex_attributes; i++) + { + const GPUPipeline::VertexAttribute& attrib = va.vertex_attributes[i]; + const void* ptr = reinterpret_cast(static_cast(base_vertex_start + attrib.offset)); + const VAMapping& m = s_vao_format_mapping[static_cast(attrib.type.GetValue())]; + if (m.integer) + glVertexAttribIPointer(i, attrib.components, m.type, stride, ptr); + else + glVertexAttribPointer(i, attrib.components, m.type, m.normalized, stride, ptr); + } + } +} + void OpenGLDevice::UnrefVAO(const OpenGLPipeline::VertexArrayCacheKey& key) { auto it = m_vao_cache.find(key);