move ogl-only settings into backend

This commit is contained in:
degasus 2013-03-25 15:14:24 +01:00
parent 81e261eb68
commit 3d5e0a6d3d
6 changed files with 71 additions and 53 deletions

View File

@ -165,11 +165,7 @@ struct VideoConfig
bool bSupportsFormatReinterpretation;
bool bSupportsPixelLighting;
bool bSupportsGLSLUBO;
bool bSupportsGLSLCache;
bool bSupportsGLPinnedMemory;
bool bSupportsGLSync;
bool bSupportsGLBaseVertex;
bool bSupportsGLSLUBO; // needed by pixelShaderGen, so must stay in videoCommon
} backend_info;
// Utility

View File

@ -21,6 +21,7 @@
#include "Debugger.h"
#include "Statistics.h"
#include "ImageWrite.h"
#include "Render.h"
namespace OGL
{
@ -270,7 +271,7 @@ bool ProgramShaderCache::CompileShader ( SHADER& shader, const char* vcode, cons
glAttachShader(pid, vsid);
glAttachShader(pid, psid);
if (g_ActiveConfig.backend_info.bSupportsGLSLCache)
if (g_ogl_config.bSupportsGLSLCache)
glProgramParameteri(pid, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
shader.SetProgramBindings();
@ -406,14 +407,14 @@ void ProgramShaderCache::Init(void)
}
// Read our shader cache, only if supported
if (g_ActiveConfig.backend_info.bSupportsGLSLCache)
if (g_ogl_config.bSupportsGLSLCache)
{
GLint Supported;
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &Supported);
if(!Supported)
{
ERROR_LOG(VIDEO, "GL_ARB_get_program_binary is supported, but no binary format is known. So disable shader cache.");
g_ActiveConfig.backend_info.bSupportsGLSLCache = false;
g_ogl_config.bSupportsGLSLCache = false;
}
else
{
@ -439,7 +440,7 @@ void ProgramShaderCache::Init(void)
void ProgramShaderCache::Shutdown(void)
{
// store all shaders in cache on disk
if (g_ActiveConfig.backend_info.bSupportsGLSLCache)
if (g_ogl_config.bSupportsGLSLCache)
{
PCache::iterator iter = pshaders.begin();
for (; iter != pshaders.end(); ++iter)
@ -488,8 +489,7 @@ void ProgramShaderCache::CreateHeader ( void )
// as sandy do ogl3.1, glsl 140 is supported, so force it in this way.
// TODO: remove this again when the issue is fixed:
// see http://communities.intel.com/thread/36084
char *vendor = (char*)glGetString(GL_VENDOR);
bool glsl140_hack = strcmp(vendor, "Intel") == 0;
bool glsl140_hack = strcmp(g_ogl_config.gl_vendor, "Intel") == 0;
#elif __APPLE__
// as apple doesn't support glsl130 at all, we also have to use glsl140
bool glsl140_hack = true;

View File

@ -104,6 +104,7 @@ int OSDInternalW, OSDInternalH;
namespace OGL
{
VideoConfig g_ogl_config;
// Declarations and definitions
// ----------------------------
@ -119,7 +120,6 @@ static int s_MSAASamples = 1;
static int s_MSAACoverageSamples = 0;
static int s_LastMultisampleMode = 0;
static bool s_bHaveCoverageMSAA = false;
static u32 s_blendMode;
#if defined(HAVE_WX) && HAVE_WX
@ -135,7 +135,7 @@ static std::vector<u32> s_efbCache[2][EFB_CACHE_WIDTH * EFB_CACHE_HEIGHT]; // 2
int GetNumMSAASamples(int MSAAMode)
{
int samples, maxSamples;
int samples;
switch (MSAAMode)
{
case MULTISAMPLE_OFF:
@ -161,12 +161,11 @@ int GetNumMSAASamples(int MSAAMode)
default:
samples = 1;
}
glGetIntegerv(GL_MAX_SAMPLES, &maxSamples);
if(samples <= maxSamples) return samples;
if(samples <= g_ogl_config.max_samples) return samples;
ERROR_LOG(VIDEO, "MSAA Bug: %d samples selected, but only %d supported by gpu.", samples, maxSamples);
return maxSamples;
ERROR_LOG(VIDEO, "MSAA Bug: %d samples selected, but only %d supported by gpu.", samples, g_ogl_config.max_samples);
return g_ogl_config.max_samples;
}
int GetNumMSAACoverageSamples(int MSAAMode)
@ -187,7 +186,7 @@ int GetNumMSAACoverageSamples(int MSAAMode)
default:
samples = 0;
}
if(s_bHaveCoverageMSAA || samples == 0) return samples;
if(g_ogl_config.bSupportCoverageMSAA || samples == 0) return samples;
ERROR_LOG(VIDEO, "MSAA Bug: CSAA selected, but not supported by gpu.");
return 0;
@ -203,15 +202,6 @@ Renderer::Renderer()
s_ShowEFBCopyRegions_VBO = 0;
s_blendMode = 0;
InitFPSCounter();
const char* gl_vendor = (const char*)glGetString(GL_VENDOR);
const char* gl_renderer = (const char*)glGetString(GL_RENDERER);
const char* gl_version = (const char*)glGetString(GL_VERSION);
OSD::AddMessage(StringFromFormat("Video Info: %s, %s, %s",
gl_vendor,
gl_renderer,
gl_version).c_str(), 5000);
bool bSuccess = true;
GLint numvertexattribs = 0;
@ -270,35 +260,54 @@ Renderer::Renderer()
bSuccess = false;
}
s_bHaveCoverageMSAA = GLEW_NV_framebuffer_multisample_coverage;
if (!bSuccess)
return; // TODO: fail
g_Config.backend_info.bSupportsDualSourceBlend = GLEW_ARB_blend_func_extended;
g_Config.backend_info.bSupportsGLSLUBO = GLEW_ARB_uniform_buffer_object;
g_Config.backend_info.bSupportsGLPinnedMemory = GLEW_AMD_pinned_memory;
g_Config.backend_info.bSupportsGLSync = GLEW_ARB_sync;
g_Config.backend_info.bSupportsGLSLCache = GLEW_ARB_get_program_binary;
g_Config.backend_info.bSupportsGLBaseVertex = GLEW_ARB_draw_elements_base_vertex;
g_ogl_config.bSupportsGLSLCache = GLEW_ARB_get_program_binary;
g_ogl_config.bSupportsGLPinnedMemory = GLEW_AMD_pinned_memory;
g_ogl_config.bSupportsGLSync = GLEW_ARB_sync;
g_ogl_config.bSupportsGLBaseVertex = GLEW_ARB_draw_elements_base_vertex;
g_ogl_config.bSupportCoverageMSAA = GLEW_NV_framebuffer_multisample_coverage;
g_ogl_config.gl_vendor = (const char*)glGetString(GL_VENDOR);
g_ogl_config.gl_renderer = (const char*)glGetString(GL_RENDERER);
g_ogl_config.gl_version = (const char*)glGetString(GL_VERSION);
glGetIntegerv(GL_MAX_SAMPLES, &g_ogl_config.max_samples);
if(g_Config.backend_info.bSupportsGLSLUBO && (
// hd3000 get corruption, hd4000 also and a big slowdown
!strcmp(gl_vendor, "Intel Open Source Technology Center") && (!strcmp(gl_version, "3.0 Mesa 9.0.0") || !strcmp(gl_version, "3.0 Mesa 9.0.1") || !strcmp(gl_version, "3.0 Mesa 9.0.2") || !strcmp(gl_version, "3.0 Mesa 9.0.3") || !strcmp(gl_version, "3.0 Mesa 9.1.0") || !strcmp(gl_version, "3.0 Mesa 9.1.1") )
!strcmp(g_ogl_config.gl_vendor, "Intel Open Source Technology Center") && (
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.0.0") ||
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.0.1") ||
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.0.2") ||
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.0.3") ||
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.1.0") ||
!strcmp(g_ogl_config.gl_version, "3.0 Mesa 9.1.1") )
)) {
g_Config.backend_info.bSupportsGLSLUBO = false;
ERROR_LOG(VIDEO, "buggy driver detected. Disable UBO");
}
UpdateActiveConfig();
OSD::AddMessage(StringFromFormat("Missing Extensions: %s%s%s%s%s%s",
OSD::AddMessage(StringFromFormat("Video Info: %s, %s, %s",
g_ogl_config.gl_vendor,
g_ogl_config.gl_renderer,
g_ogl_config.gl_version).c_str(), 5000);
OSD::AddMessage(StringFromFormat("Missing Extensions: %s%s%s%s%s%s%s",
g_ActiveConfig.backend_info.bSupportsDualSourceBlend ? "" : "DualSourceBlend ",
g_ActiveConfig.backend_info.bSupportsGLSLUBO ? "" : "UniformBuffer ",
g_ActiveConfig.backend_info.bSupportsGLPinnedMemory ? "" : "PinnedMemory ",
g_ActiveConfig.backend_info.bSupportsGLSLCache ? "" : "ShaderCache ",
g_ActiveConfig.backend_info.bSupportsGLBaseVertex ? "" : "BaseVertex ",
g_ActiveConfig.backend_info.bSupportsGLSync ? "" : "Sync "
g_ogl_config.bSupportsGLPinnedMemory ? "" : "PinnedMemory ",
g_ogl_config.bSupportsGLSLCache ? "" : "ShaderCache ",
g_ogl_config.bSupportsGLBaseVertex ? "" : "BaseVertex ",
g_ogl_config.bSupportsGLSync ? "" : "Sync ",
g_ogl_config.bSupportCoverageMSAA ? "" : "CSAA "
).c_str(), 5000);
if (!bSuccess)
return; // TODO: fail
s_LastMultisampleMode = g_ActiveConfig.iMultisampleMode;
s_MSAASamples = GetNumMSAASamples(s_LastMultisampleMode);
@ -445,10 +454,6 @@ void Renderer::DrawDebugInfo()
if (g_ActiveConfig.bShowEFBCopyRegions)
{
// Store Line Size
GLfloat lSize;
glGetFloatv(GL_LINE_WIDTH, &lSize);
// Set Line Size
glLineWidth(3.0f);
@ -561,7 +566,7 @@ void Renderer::DrawDebugInfo()
glDrawArrays(GL_LINES, 0, stats.efb_regions.size() * 2*6);
// Restore Line Size
glLineWidth(lSize);
SetLineWidth();
// Clear stored regions
stats.efb_regions.clear();

View File

@ -9,6 +9,21 @@ namespace OGL
void ClearEFBCache();
// ogl-only config, so not in VideoConfig.h
extern struct VideoConfig {
bool bSupportsGLSLCache;
bool bSupportsGLPinnedMemory;
bool bSupportsGLSync;
bool bSupportsGLBaseVertex;
bool bSupportCoverageMSAA;
const char *gl_vendor;
const char *gl_renderer;
const char* gl_version;
s32 max_samples;
} g_ogl_config;
class Renderer : public ::Renderer
{
public:

View File

@ -19,6 +19,7 @@
#include "GLUtil.h"
#include "StreamBuffer.h"
#include "MemoryUtil.h"
#include "Render.h"
namespace OGL
{
@ -31,19 +32,19 @@ StreamBuffer::StreamBuffer(u32 type, size_t size, StreamType uploadType)
{
glGenBuffers(1, &m_buffer);
bool nvidia = !strcmp((const char*)glGetString(GL_VENDOR), "NVIDIA Corporation");
bool nvidia = !strcmp(g_ogl_config.gl_vendor, "NVIDIA Corporation");
if(m_uploadtype & STREAM_DETECT)
{
if(!g_Config.backend_info.bSupportsGLBaseVertex && (m_uploadtype & BUFFERSUBDATA))
if(!g_ogl_config.bSupportsGLBaseVertex && (m_uploadtype & BUFFERSUBDATA))
m_uploadtype = BUFFERSUBDATA;
else if(g_Config.backend_info.bSupportsGLSync && g_Config.bHackedBufferUpload && (m_uploadtype & MAP_AND_RISK))
else if(g_ogl_config.bSupportsGLSync && g_Config.bHackedBufferUpload && (m_uploadtype & MAP_AND_RISK))
m_uploadtype = MAP_AND_RISK;
else if(g_Config.backend_info.bSupportsGLSync && g_Config.backend_info.bSupportsGLPinnedMemory && (m_uploadtype & PINNED_MEMORY))
else if(g_ogl_config.bSupportsGLSync && g_ogl_config.bSupportsGLPinnedMemory && (m_uploadtype & PINNED_MEMORY))
m_uploadtype = PINNED_MEMORY;
else if(nvidia && (m_uploadtype & BUFFERSUBDATA))
m_uploadtype = BUFFERSUBDATA;
else if(g_Config.backend_info.bSupportsGLSync && (m_uploadtype & MAP_AND_SYNC))
else if(g_ogl_config.bSupportsGLSync && (m_uploadtype & MAP_AND_SYNC))
m_uploadtype = MAP_AND_SYNC;
else
m_uploadtype = MAP_AND_ORPHAN;
@ -191,7 +192,7 @@ void StreamBuffer::Init()
// on error, switch to another backend. some old catalyst seems to have broken pinned memory support
if(glGetError() != GL_NO_ERROR) {
ERROR_LOG(VIDEO, "pinned memory detected, but not working. Please report this: %s, %s, %s", glGetString(GL_VENDOR), glGetString(GL_RENDERER), glGetString(GL_VERSION));
ERROR_LOG(VIDEO, "pinned memory detected, but not working. Please report this: %s, %s, %s", g_ogl_config.gl_vendor, g_ogl_config.gl_renderer, g_ogl_config.gl_version);
Shutdown();
m_uploadtype = MAP_AND_SYNC;
Init();

View File

@ -41,6 +41,7 @@
#include "Debugger.h"
#include "StreamBuffer.h"
#include "PerfQueryBase.h"
#include "Render.h"
#include "main.h"
@ -123,7 +124,7 @@ void VertexManager::Draw(u32 stride)
u32 triangle_index_size = IndexGenerator::GetTriangleindexLen();
u32 line_index_size = IndexGenerator::GetLineindexLen();
u32 point_index_size = IndexGenerator::GetPointindexLen();
if(g_Config.backend_info.bSupportsGLBaseVertex) {
if(g_ogl_config.bSupportsGLBaseVertex) {
if (triangle_index_size > 0)
{
glDrawElementsBaseVertex(GL_TRIANGLES, triangle_index_size, GL_UNSIGNED_SHORT, (u8*)NULL+s_offset[0], s_baseVertex);