* add some code to support OGL4 debugging. Not enable on CmakeList.txt by default
* LOAD_PS/LOAD_VS were using a "std" argument instead of a reference.
* reuse the opengl context creation developed for GSdx. For the moment only enable OGL2
* Add some documentation for the zzShader API


git-svn-id: http://pcsx2.googlecode.com/svn/trunk@5188 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
gregory.hainaut 2012-04-29 18:50:07 +00:00
parent 7cdb9fc2f2
commit 8396c49eb5
13 changed files with 264 additions and 96 deletions

View File

@ -20,6 +20,7 @@ set(CommonFlags
-fno-strict-aliasing
-Wstrict-aliasing # Allow to track strict aliasing issue.
-Wunused-variable
#-DOGL4_LOG # Easier for development
)
set(OptimizationFlags

View File

@ -47,6 +47,7 @@ class GLWindow
void GetWindowSize();
void UpdateGrabKey();
void Force43Ratio();
bool CreateContextGL(int, int);
void CreateContextGL();
#endif
bool fullScreen, doubleBuffered;

View File

@ -174,54 +174,74 @@ void GLWindow::GetGLXVersion()
}
bool GLWindow::CreateContextGL(int major, int minor)
{
if (!glDisplay) return false;
if (major <= 2) {
context = glXCreateContext(glDisplay, vi, NULL, GL_TRUE);
return true;
}
// Get visual information
static int attrListDbl[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_DOUBLEBUFFER , True,
None
};
PFNGLXCHOOSEFBCONFIGPROC glXChooseFBConfig = (PFNGLXCHOOSEFBCONFIGPROC) glXGetProcAddress((GLubyte *) "glXChooseFBConfig");
int fbcount = 0;
GLXFBConfig *fbc = glXChooseFBConfig(glDisplay, DefaultScreen(glDisplay), attrListDbl, &fbcount);
if (!fbc || fbcount < 1) return false;
PFNGLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribsARB = (PFNGLXCREATECONTEXTATTRIBSARBPROC)glXGetProcAddress((const GLubyte*) "glXCreateContextAttribsARB");
if (!glXCreateContextAttribsARB) return false;
// Create a context
int context_attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, major,
GLX_CONTEXT_MINOR_VERSION_ARB, minor,
// Keep compatibility for old cruft
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB,
//GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_DEBUG_BIT_ARB | GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
// FIXME : Request a debug context to ease opengl development
GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_DEBUG_BIT_ARB,
None
};
context = glXCreateContextAttribsARB(glDisplay, fbc[0], 0, true, context_attribs);
if (!context) return false;
XSync( glDisplay, false);
return true;
}
void GLWindow::CreateContextGL()
{
if (!glDisplay) return;
// Create a 2.0 opengl context. My understanding, you need it to call the gl function to get the 3.0 context
context = glXCreateContext(glDisplay, vi, NULL, GL_TRUE);
// FIXME
// On Geforce7, the context 3.0 creation crashes with BadAlloc (insufficient resources for operation)
// So until a better solution is found, keep the 2.0 context -- Gregory
return;
PFNGLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribsARB = (PFNGLXCREATECONTEXTATTRIBSARBPROC) glXGetProcAddress((GLubyte *) "glXCreateContextAttribsARB");
PFNGLXCHOOSEFBCONFIGPROC glXChooseFBConfig = (PFNGLXCHOOSEFBCONFIGPROC) glXGetProcAddress((GLubyte *) "glXChooseFBConfig");
if (!glXCreateContextAttribsARB or !glXChooseFBConfig) {
ZZLog::Error_Log("No support of OpenGL 3.0\n");
return;
}
// Note this part seems linux specific
int fbcount = 0;
GLXFBConfig *framebuffer_config = glXChooseFBConfig(glDisplay, DefaultScreen(glDisplay), NULL, &fbcount);
if (!framebuffer_config or !fbcount) return;
#if 1
// At least create a 3.0 context with compatibility profile
int attribs[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 0,
// GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB,
0
};
#ifdef OGL4_LOG
// We need to define a debug context. So we need at a 3.0 context (if not 3.2)
CreateContextGL(4, 1);
#else
// Create a 3.2 core context without compatibility profile
int attribs[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 2,
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
0
};
// FIXME there was some issue with previous context creation on Geforce7. Code was rewritten
// for GSdx unfortunately it was not tested on Geforce7 so keep the 2.0 context for now.
#if 0
if (! CreateContextGL(3, 0) )
CreateContextGL(2, 0);
#else
CreateContextGL(2, 0);
#endif
#endif
GLXContext context_temp = glXCreateContextAttribsARB(glDisplay, framebuffer_config[0], NULL, true, attribs);
if (context_temp) {
ZZLog::Error_Log("Create a 3.0 opengl context");
glXDestroyContext(glDisplay, context);
context = context_temp;
}
}
#ifdef USE_GSOPEN2
@ -231,8 +251,9 @@ bool GLWindow::DisplayWindow(int _width, int _height)
if (!CreateVisual()) return false;
// connect the glx-context to the window
CreateContextGL();
// connect the glx-context to the window
glXMakeCurrent(glDisplay, glWindow, context);
GetGLXVersion();
@ -289,6 +310,8 @@ bool GLWindow::DisplayWindow(int _width, int _height)
void GLWindow::SwapGLBuffers()
{
if (glGetError() != GL_NO_ERROR) ZZLog::Debug_Log("glError before swap!");
ZZLog::Check_GL_Error();
// FIXME I think we need to flush when there is only 1 visual buffer
glXSwapBuffers(glDisplay, glWindow);
// glClear(GL_COLOR_BUFFER_BIT);

View File

@ -333,6 +333,7 @@ EXPORT_C_(s32) GSopen2( void* pDsp, u32 flags )
InitMisc();
ZZLog::GS_Log("GSopen2 finished.");
ZZLog::Check_GL_Error();
return 0;
}

View File

@ -226,7 +226,11 @@ void CDepthTarget::Update(int context, CRenderTarget* prndr)
vdepth.z = vdepth.w = 0;
}
#ifdef GLSL_API
assert(ppsBitBltDepth.sBitBltZ != -1);
#else
assert(ppsBitBltDepth.sBitBltZ != 0);
#endif
ZZshSetParameter4fv(ppsBitBltDepth.prog, ppsBitBltDepth.sBitBltZ, (vdepth*(255.0f / 256.0f)), "g_fBitBltZ");

View File

@ -61,6 +61,7 @@ namespace ZZLog
{
std::string s_strLogPath("logs");
FILE *gsLog;
FILE *gsLogGL; // I create a separate file because it could be very verbose
bool IsLogging()
{
@ -72,6 +73,7 @@ bool IsLogging()
void Open()
{
const std::string LogFile(s_strLogPath + "/GSzzogl.log");
const std::string LogFileGL(s_strLogPath + "/GSzzogl_GL.log");
gsLog = fopen(LogFile.c_str(), "w");
if (gsLog != NULL)
@ -79,6 +81,13 @@ void Open()
else
SysMessage("Can't create log file %s\n", LogFile.c_str());
gsLogGL = fopen(LogFileGL.c_str(), "w");
if (gsLogGL != NULL)
setvbuf(gsLogGL, NULL, _IONBF, 0);
else
SysMessage("Can't create log file %s\n", LogFileGL.c_str());
}
void Close()
@ -87,6 +96,10 @@ void Close()
fclose(gsLog);
gsLog = NULL;
}
if (gsLogGL != NULL) {
fclose(gsLogGL);
gsLogGL = NULL;
}
}
void SetDir(const char* dir)
@ -353,4 +366,83 @@ void Error_Log(const char *fmt, ...)
va_end(list);
}
#define LOUD_DEBUGGING
#ifdef OGL4_LOG
void Check_GL_Error()
{
unsigned int count = 64; // max. num. of messages that will be read from the log
int bufsize = 2048;
unsigned int* sources = new unsigned int[count];
unsigned int* types = new unsigned int[count];
unsigned int* ids = new unsigned int[count];
unsigned int* severities = new unsigned int[count];
int* lengths = new int[count];
char* messageLog = new char[bufsize];
unsigned int retVal = glGetDebugMessageLogARB(count, bufsize, sources, types, ids, severities, lengths, messageLog);
if(retVal > 0)
{
unsigned int pos = 0;
for(unsigned int i=0; i<retVal; i++)
{
GL_Error_Log(sources[i], types[i], ids[i], severities[i],
&messageLog[pos]);
pos += lengths[i];
}
}
delete [] sources;
delete [] types;
delete [] ids;
delete [] severities;
delete [] lengths;
delete [] messageLog;
}
void GL_Error_Log(unsigned int source, unsigned int type, unsigned int id, unsigned int severity, const char* message)
{
char debType[20], debSev[5];
static int sev_counter = 0;
if(type == GL_DEBUG_TYPE_ERROR_ARB)
strcpy(debType, "Error");
else if(type == GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB)
strcpy(debType, "Deprecated behavior");
else if(type == GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB)
strcpy(debType, "Undefined behavior");
else if(type == GL_DEBUG_TYPE_PORTABILITY_ARB)
strcpy(debType, "Portability");
else if(type == GL_DEBUG_TYPE_PERFORMANCE_ARB)
strcpy(debType, "Performance");
else if(type == GL_DEBUG_TYPE_OTHER_ARB)
strcpy(debType, "Other");
else
strcpy(debType, "UNKNOWN");
if(severity == GL_DEBUG_SEVERITY_HIGH_ARB) {
strcpy(debSev, "High");
sev_counter++;
}
else if(severity == GL_DEBUG_SEVERITY_MEDIUM_ARB)
strcpy(debSev, "Med");
else if(severity == GL_DEBUG_SEVERITY_LOW_ARB)
strcpy(debSev, "Low");
#ifdef LOUD_DEBUGGING
fprintf(stderr,"Type:%s\tSeverity:%s\tMessage:%s\n", debType, debSev,message);
#endif
if(gsLogGL)
{
fprintf(gsLogGL,"Type:%s\tSeverity:%s\tMessage:%s\n", debType, debSev,message);
}
//if (sev_counter > 2) assert(0);
}
#else
void Check_GL_Error() {}
void GL_Error_Log(unsigned int source, unsigned int type, unsigned int id, unsigned int severity, const char* message) {}
#endif
};

View File

@ -95,6 +95,8 @@ static bool SPAM_PASS;
#define B_RETURNX(x, rtype) { if( !(x) ) { ZZLog::Error_Log("%s:%d: %s", __FILE__, (u32)__LINE__, #x); return (##rtype); } }
#define B_G(x, action) { if( !(x) ) { ZZLog::Error_Log("%s:%d: %s", __FILE__, (u32)__LINE__, #x); action; } }
#ifndef OGL4_LOG
#define GL_REPORT_ERROR() \
{ \
GLenum err = glGetError(); \
@ -119,6 +121,11 @@ static bool SPAM_PASS;
# define GL_REPORT_ERRORD()
#endif
#else
#define GL_REPORT_ERROR()
#define GL_REPORT_ERRORD()
#endif
inline const char *error_name(int err)
{
@ -196,6 +203,10 @@ extern void Debug_Log(const char *fmt, ...);
extern void Dev_Log(const char *fmt, ...);
extern void Warn_Log(const char *fmt, ...);
extern void Error_Log(const char *fmt, ...);
extern void Check_GL_Error();
extern void GL_Error_Log(unsigned int source, unsigned int type, unsigned int id, unsigned int severity, const char* message);
};
#endif // ZZLOG_H_INCLUDED

View File

@ -147,10 +147,12 @@ inline bool CreateImportantCheck()
#ifndef _WIN32
int const glew_ok = glewInit();
if (glew_ok != GLEW_OK)
{
if (glew_ok != GLEW_OK) {
ZZLog::Error_Log("glewInit() is not ok!");
bSuccess = false;
} else {
const GLubyte* gl_version = glGetString(GL_VERSION);
ZZLog::Error_Log("Supported Opengl version : %s\n", gl_version);
}
#endif
@ -773,6 +775,10 @@ bool ZZCreate(int _width, int _height)
g_vsprog = g_psprog = sZero;
#ifdef OGL4_LOG
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB);
#endif
if (glGetError() == GL_NO_ERROR)
{
return bSuccess;

View File

@ -2123,7 +2123,7 @@ inline void NeedFactor(int w)
template<bool SIGN, bool NEED_FACTOR>
__forceinline int Set_Alpha_Color_Factor(const alphaInfo& a)
{
int usec;
int usec = 0;
switch(a.c)
{
case 0:

View File

@ -292,20 +292,52 @@ inline void ResetShaderCounters() {
// g_vsprog = g_psprog = sZero;
}
/////////////////////////////////////////////////////////////////
// Improvement:
// * store the location of uniform. Avoid to call glGetUniformLocation a lots of time
// * Use separate shader build pipeline: current code emulate this behavior but with the recent opengl4
// it would be much more easier to code it.
/////////////////////////////////////////////////////////////////
// GLSL: Stub
extern bool ZZshCheckProfilesSupport();
// Try various Shader to choose supported configuration
// g_nPixelShaderVer -> SHADER_ACCURATE and SHADER_REDUCED
// Honestly we can probably stop supporting those cards.
extern bool ZZshStartUsingShaders();
// Open the shader file into an source array
extern bool ZZshCreateOpenShadersFile();
// Those 2 functions are used to stop/start shader. The idea is to draw the HUD text.
// Enable is not implemented and it is likely to stop everythings
extern void ZZshGLDisableProfile();
extern void ZZshGLEnableProfile();
// Set the Uniform parameter in host (NOT GL)
// Param seem to be an absolute index inside a table of uniform
extern void ZZshSetParameter4fv(ZZshShaderLink prog, ZZshParameter param, const float* v, const char* name);
extern void ZZshSetParameter4fv(ZZshParameter param, const float* v, const char* name);
extern void ZZshSetParameter4fvWithRetry(ZZshParameter* param, ZZshShaderLink prog, const float* v, const char* name);
// Set the Texture parameter in host (NOT GL)
extern void ZZshGLSetTextureParameter(ZZshShaderLink prog, ZZshParameter param, GLuint texobj, const char* name);
extern void ZZshGLSetTextureParameter(ZZshParameter param, GLuint texobj, const char* name);
// Set a default value for 1 uniform in host (NOT GL)
extern void ZZshDefaultOneColor( FRAGMENTSHADER ptr );
// Link then run with the new Vertex/Fragment Shader
extern void ZZshSetVertexShader(ZZshShaderLink prog);
extern void ZZshSetPixelShader(ZZshShaderLink prog);
// Compile standalone Fragment/Vertex shader program
// Note It also init all the Uniform parameter in host (NOT GL)
extern bool ZZshLoadExtraEffects();
// Clean some stuff on exit
extern void ZZshExitCleaning();
extern FRAGMENTSHADER* ZZshLoadShadeEffect(int type, int texfilter, int fog, int testaem, int exactcolor, const clampInfo& clamp, int context, bool* pbFailed);

View File

@ -456,7 +456,7 @@ inline ZZshProgram madeProgram(ZZshShader shader, ZZshShader shader2, char* name
return prog;
}
void PutParametersInProgam(int start, int finish) {
static void PutParametersInProgam(int start, int finish) {
for (int i = start; i < finish; i++) {
ZZshParamInfo param = UniformsIndex[i];
GLint location = glGetUniformLocation(ZZshMainProgram, param.ShName);
@ -517,7 +517,7 @@ void PutSInProgam(int start, int finish) {
GL_REPORT_ERRORD();
}
bool ValidateProgram(ZZshProgram Prog) {
static bool ValidateProgram(ZZshProgram Prog) {
GLint isValid;
glGetProgramiv(Prog, GL_VALIDATE_STATUS, &isValid);
@ -532,7 +532,7 @@ bool ValidateProgram(ZZshProgram Prog) {
return (isValid != 0);
}
void PutParametersAndRun(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
static void PutParametersAndRun(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
UNIFORM_ERROR_LOG("Run program %s(%d) \t+\t%s(%d)", ShaderNames[vs->Shader], vs->Shader, ShaderNames[ps->Shader], ps->Shader);
glUseProgram(ZZshMainProgram);
@ -554,7 +554,7 @@ void PutParametersAndRun(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
GL_REPORT_ERRORD();
}
void CreateAndRunMain(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
static void CreateNewProgram(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
ZZLog::Error_Log("\n---> New shader program %d, %s(%d) \t+\t%s(%d).", ZZshMainProgram, ShaderNames[vs->Shader], vs->Shader, ShaderNames[ps->Shader], ps->Shader);
if (vs->Shader != 0)
@ -569,9 +569,6 @@ void CreateAndRunMain(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
}
GL_REPORT_ERRORD();
PutParametersAndRun(vs, ps);
GL_REPORT_ERRORD();
}
inline bool ZZshCheckShaderCompatibility(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
@ -582,7 +579,7 @@ inline bool ZZshCheckShaderCompatibility(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
return (vs->ShaderType == ps->ShaderType);
}
void ZZshSetShader(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
static void ZZshSetShader(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
if (!ZZshCheckShaderCompatibility(vs, ps)) // We don't need to link uncompatible shaders
return;
@ -592,14 +589,16 @@ void ZZshSetShader(VERTEXSHADER* vs, FRAGMENTSHADER* ps) {
if (vss !=0 && pss != 0) {
if (CompiledPrograms[vss][pss] != 0 && glIsProgram(CompiledPrograms[vss][pss])) {
ZZshMainProgram = CompiledPrograms[vs->Shader][ps->Shader];
PutParametersAndRun(vs, ps);
}
else {
ZZshProgram NewProgram = glCreateProgram();
ZZshMainProgram = NewProgram;
CompiledPrograms[vss][pss] = NewProgram;
CreateAndRunMain(vs, ps) ;
CreateNewProgram(vs, ps) ;
}
PutParametersAndRun(vs, ps);
GL_REPORT_ERRORD();
}
}
@ -637,8 +636,7 @@ inline int SetUniformParam(ZZshProgram prog, ZZshParameter* param, const char* n
NumActiveUniforms++;
}
else
*param = -1;
else *param = -1;
return p;
}
@ -666,7 +664,7 @@ char* AddContextToName(const char* name, int context) {
return newname;
}
void SetupFragmentProgramParameters(FRAGMENTSHADER* pf, int context, int type)
static void SetupFragmentProgramParameters(FRAGMENTSHADER* pf, int context, int type)
{
// uniform parameters
GLint p;
@ -782,7 +780,7 @@ static __forceinline void GlslHeaderString(char* header_string, const char* name
sprintf(header_string, "#version %d\n#define %s main\n%s\n", GLSL_VERSION, name, depth);
}
static __forceinline bool LOAD_VS(char* DefineString, const char* name, VERTEXSHADER vertex, int shaderver, ZZshProfile context, const char* depth)
static __forceinline bool LOAD_VS(char* DefineString, const char* name, VERTEXSHADER& vertex, int shaderver, ZZshProfile context, const char* depth)
{
bool flag;
char temp[200];
@ -794,7 +792,7 @@ static __forceinline bool LOAD_VS(char* DefineString, const char* name, VERTEXSH
return flag;
}
static __forceinline bool LOAD_PS(char* DefineString, const char* name, FRAGMENTSHADER fragment, int shaderver, ZZshProfile context, const char* depth)
static __forceinline bool LOAD_PS(char* DefineString, const char* name, FRAGMENTSHADER& fragment, int shaderver, ZZshProfile context, const char* depth)
{
bool flag;
char temp[200];

View File

@ -55,7 +55,7 @@ static __forceinline int GET_SHADER_INDEX(int type, int texfilter, int texwrap,
extern ZZshContext g_cgcontext;
static __forceinline CGprogram LoadShaderFromType(const char* srcdir, const char* srcfile, int type, int texfilter, int texwrap, int fog, int writedepth, int testaem, int exactcolor, int ps, int context)
static CGprogram LoadShaderFromType(const char* srcdir, const char* srcfile, int type, int texfilter, int texwrap, int fog, int writedepth, int testaem, int exactcolor, int ps, int context)
{
assert( texwrap < NUM_TEXWRAPS);
assert( type < NUM_TYPES );

View File

@ -624,17 +624,17 @@ void BitBltDepthMRTPS() {
gl_FragDepth = (log(g_fc0.y + dot(data, g_fBitBltZ)) * g_fOneColor.w) * g_fZMin.y + dot(data, g_fBitBltZ) * g_fZMin.x ;
}
/*static const float BlurKernel[9] = {
0.027601,
0.066213,
0.123701,
0.179952,
0.205065,
0.179952,
0.123701,
0.066213,
0.027601
};*/
// static const float BlurKernel[9] = {
// 0.027601,
// 0.066213,
// 0.123701,
// 0.179952,
// 0.205065,
// 0.179952,
// 0.123701,
// 0.066213,
// 0.027601
// };
half4 BilinearFloat16(float2 tex0)
{
@ -808,5 +808,4 @@ void BitBltVS() {
gl_TexCoord[1].xy = position.xy * g_fBitBltTrans.xy + g_fBitBltTrans.zw;
}
#endif VERTEX_SHADER
#endif