2013-04-09 13:31:46 +00:00
|
|
|
#include "opengl/opengl.hpp"
|
2010-08-09 13:28:56 +00:00
|
|
|
|
2013-07-29 09:42:45 +00:00
|
|
|
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
|
|
|
|
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
|
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
namespace ruby {
|
|
|
|
|
2013-04-09 13:31:46 +00:00
|
|
|
struct pVideoGLX : OpenGL {
|
2013-07-29 09:42:45 +00:00
|
|
|
GLXContext (*glXCreateContextAttribs)(Display*, GLXFBConfig, GLXContext, int, const int*) = nullptr;
|
|
|
|
int (*glXSwapInterval)(int) = nullptr;
|
2010-08-09 13:28:56 +00:00
|
|
|
|
2013-05-02 11:25:45 +00:00
|
|
|
Display* display;
|
2010-08-09 13:28:56 +00:00
|
|
|
int screen;
|
|
|
|
Window xwindow;
|
|
|
|
Colormap colormap;
|
|
|
|
GLXContext glxcontext;
|
|
|
|
GLXWindow glxwindow;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
int version_major, version_minor;
|
|
|
|
bool double_buffer;
|
|
|
|
bool is_direct;
|
|
|
|
} glx;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
Window handle;
|
|
|
|
bool synchronize;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
unsigned depth;
|
2010-08-09 13:28:56 +00:00
|
|
|
unsigned filter;
|
2013-04-09 13:31:46 +00:00
|
|
|
string shader;
|
2010-08-09 13:28:56 +00:00
|
|
|
} settings;
|
|
|
|
|
|
|
|
bool cap(const string& name) {
|
|
|
|
if(name == Video::Handle) return true;
|
|
|
|
if(name == Video::Synchronize) return true;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
if(name == Video::Depth) return true;
|
2010-08-09 13:28:56 +00:00
|
|
|
if(name == Video::Filter) return true;
|
2010-09-29 00:05:36 +00:00
|
|
|
if(name == Video::Shader) return true;
|
2010-08-09 13:28:56 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
any get(const string& name) {
|
|
|
|
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
|
|
|
if(name == Video::Synchronize) return settings.synchronize;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
if(name == Video::Depth) return settings.depth;
|
2010-08-09 13:28:56 +00:00
|
|
|
if(name == Video::Filter) return settings.filter;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool set(const string& name, const any& value) {
|
|
|
|
if(name == Video::Handle) {
|
|
|
|
settings.handle = any_cast<uintptr_t>(value);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(name == Video::Synchronize) {
|
|
|
|
if(settings.synchronize != any_cast<bool>(value)) {
|
|
|
|
settings.synchronize = any_cast<bool>(value);
|
2013-07-29 09:42:45 +00:00
|
|
|
if(glXSwapInterval) glXSwapInterval(settings.synchronize);
|
2010-08-09 13:28:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
if(name == Video::Depth) {
|
|
|
|
unsigned depth = any_cast<unsigned>(value);
|
2012-03-10 12:37:36 +00:00
|
|
|
if(depth > DefaultDepth(display, screen)) return false;
|
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
switch(depth) {
|
2013-04-09 13:31:46 +00:00
|
|
|
case 24: format = GL_RGBA8; inputFormat = GL_UNSIGNED_INT_8_8_8_8_REV; break;
|
|
|
|
case 30: format = GL_RGB10_A2; inputFormat = GL_UNSIGNED_INT_2_10_10_10_REV; break;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
default: return false;
|
|
|
|
}
|
2012-03-10 12:37:36 +00:00
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
settings.depth = depth;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
if(name == Video::Filter) {
|
|
|
|
settings.filter = any_cast<unsigned>(value);
|
2013-04-09 13:31:46 +00:00
|
|
|
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
|
2010-08-09 13:28:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-29 00:05:36 +00:00
|
|
|
if(name == Video::Shader) {
|
2013-04-09 13:31:46 +00:00
|
|
|
settings.shader = any_cast<const char*>(value);
|
|
|
|
OpenGL::shader(settings.shader);
|
|
|
|
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
|
2010-08-09 13:28:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-05-02 11:25:45 +00:00
|
|
|
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
|
2013-04-09 13:31:46 +00:00
|
|
|
OpenGL::size(width, height);
|
2010-08-09 13:28:56 +00:00
|
|
|
return OpenGL::lock(data, pitch);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unlock() {
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear() {
|
|
|
|
OpenGL::clear();
|
|
|
|
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
|
|
|
|
}
|
|
|
|
|
|
|
|
void refresh() {
|
|
|
|
//we must ensure that the child window is the same size as the parent window.
|
|
|
|
//unfortunately, we cannot hook the parent window resize event notification,
|
|
|
|
//as we did not create the parent window, nor have any knowledge of the toolkit used.
|
|
|
|
//therefore, inelegant as it may be, we query each window size and resize as needed.
|
|
|
|
XWindowAttributes parent, child;
|
|
|
|
XGetWindowAttributes(display, settings.handle, &parent);
|
|
|
|
XGetWindowAttributes(display, xwindow, &child);
|
|
|
|
if(child.width != parent.width || child.height != parent.height) {
|
|
|
|
XResizeWindow(display, xwindow, parent.width, parent.height);
|
|
|
|
}
|
|
|
|
|
2013-04-09 13:31:46 +00:00
|
|
|
outputWidth = parent.width, outputHeight = parent.height;
|
|
|
|
OpenGL::refresh();
|
2010-08-09 13:28:56 +00:00
|
|
|
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool init() {
|
|
|
|
term();
|
|
|
|
|
|
|
|
glXQueryVersion(display, &glx.version_major, &glx.version_minor);
|
|
|
|
//require GLX 1.2+ API
|
|
|
|
if(glx.version_major < 1 || (glx.version_major == 1 && glx.version_minor < 2)) return false;
|
|
|
|
|
|
|
|
XWindowAttributes window_attributes;
|
|
|
|
XGetWindowAttributes(display, settings.handle, &window_attributes);
|
|
|
|
|
|
|
|
//let GLX determine the best Visual to use for GL output; provide a few hints
|
|
|
|
//note: some video drivers will override double buffering attribute
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
int attributeList[] = {
|
|
|
|
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
|
|
|
|
GLX_RENDER_TYPE, GLX_RGBA_BIT,
|
|
|
|
GLX_DOUBLEBUFFER, True,
|
2012-03-26 10:13:02 +00:00
|
|
|
GLX_RED_SIZE, (signed)(settings.depth / 3),
|
|
|
|
GLX_GREEN_SIZE, (signed)(settings.depth / 3) + (signed)(settings.depth % 3),
|
|
|
|
GLX_BLUE_SIZE, (signed)(settings.depth / 3),
|
2013-07-29 09:42:45 +00:00
|
|
|
None
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int fbCount;
|
2013-05-02 11:25:45 +00:00
|
|
|
GLXFBConfig* fbConfig = glXChooseFBConfig(display, screen, attributeList, &fbCount);
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
if(fbCount == 0) return false;
|
|
|
|
|
2013-05-02 11:25:45 +00:00
|
|
|
XVisualInfo* vi = glXGetVisualFromFBConfig(display, fbConfig[0]);
|
2010-08-09 13:28:56 +00:00
|
|
|
|
|
|
|
//Window settings.handle has already been realized, most likely with DefaultVisual.
|
|
|
|
//GLX requires that the GL output window has the same Visual as the GLX context.
|
|
|
|
//it is not possible to change the Visual of an already realized (created) window.
|
|
|
|
//therefore a new child window, using the same GLX Visual, must be created and binded to settings.handle.
|
|
|
|
colormap = XCreateColormap(display, RootWindow(display, vi->screen), vi->visual, AllocNone);
|
|
|
|
XSetWindowAttributes attributes;
|
|
|
|
attributes.colormap = colormap;
|
|
|
|
attributes.border_pixel = 0;
|
|
|
|
xwindow = XCreateWindow(display, /* parent = */ settings.handle,
|
|
|
|
/* x = */ 0, /* y = */ 0, window_attributes.width, window_attributes.height,
|
|
|
|
/* border_width = */ 0, vi->depth, InputOutput, vi->visual,
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
CWColormap | CWBorderPixel, &attributes);
|
2010-08-09 13:28:56 +00:00
|
|
|
XSetWindowBackground(display, xwindow, /* color = */ 0);
|
|
|
|
XMapWindow(display, xwindow);
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
XFlush(display);
|
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
//window must be realized (appear onscreen) before we make the context current
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
while(XPending(display)) {
|
|
|
|
XEvent event;
|
|
|
|
XNextEvent(display, &event);
|
|
|
|
}
|
2010-08-09 13:28:56 +00:00
|
|
|
|
|
|
|
glxcontext = glXCreateContext(display, vi, /* sharelist = */ 0, /* direct = */ GL_TRUE);
|
|
|
|
glXMakeCurrent(display, glxwindow = xwindow, glxcontext);
|
|
|
|
|
2013-07-29 09:42:45 +00:00
|
|
|
glXCreateContextAttribs = (GLXContext (*)(Display*, GLXFBConfig, GLXContext, int, const int*))glGetProcAddress("glXCreateContextAttribsARB");
|
|
|
|
glXSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalSGI");
|
|
|
|
if(!glXSwapInterval) glXSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalMESA");
|
|
|
|
|
|
|
|
if(glXCreateContextAttribs) {
|
|
|
|
int attributes[] = {
|
|
|
|
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
|
|
|
|
GLX_CONTEXT_MINOR_VERSION_ARB, 2,
|
|
|
|
None
|
|
|
|
};
|
|
|
|
GLXContext context = glXCreateContextAttribs(display, fbConfig[0], nullptr, true, attributes);
|
|
|
|
if(context) {
|
|
|
|
glXMakeCurrent(display, 0, nullptr);
|
|
|
|
glXDestroyContext(display, glxcontext);
|
|
|
|
glXMakeCurrent(display, glxwindow, glxcontext = context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(glXSwapInterval) {
|
|
|
|
glXSwapInterval(settings.synchronize);
|
|
|
|
}
|
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
//read attributes of frame buffer for later use, as requested attributes from above are not always granted
|
|
|
|
int value = 0;
|
|
|
|
glXGetConfig(display, vi, GLX_DOUBLEBUFFER, &value);
|
|
|
|
glx.double_buffer = value;
|
|
|
|
glx.is_direct = glXIsDirect(display, glxcontext);
|
|
|
|
|
|
|
|
OpenGL::init();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void term() {
|
|
|
|
OpenGL::term();
|
|
|
|
|
|
|
|
if(glxcontext) {
|
|
|
|
glXDestroyContext(display, glxcontext);
|
2013-07-29 09:42:45 +00:00
|
|
|
glxcontext = nullptr;
|
2010-08-09 13:28:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if(xwindow) {
|
|
|
|
XUnmapWindow(display, xwindow);
|
|
|
|
xwindow = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(colormap) {
|
|
|
|
XFreeColormap(display, colormap);
|
|
|
|
colormap = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-29 09:42:45 +00:00
|
|
|
pVideoGLX() {
|
2012-03-10 12:37:36 +00:00
|
|
|
display = XOpenDisplay(0);
|
|
|
|
screen = DefaultScreen(display);
|
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
settings.handle = 0;
|
|
|
|
settings.synchronize = false;
|
2013-04-09 13:31:46 +00:00
|
|
|
settings.depth = 24;
|
|
|
|
settings.filter = 1; //linear
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
|
2010-08-09 13:28:56 +00:00
|
|
|
xwindow = 0;
|
|
|
|
colormap = 0;
|
2013-07-29 09:42:45 +00:00
|
|
|
glxcontext = nullptr;
|
2010-08-09 13:28:56 +00:00
|
|
|
glxwindow = 0;
|
|
|
|
}
|
|
|
|
|
2012-03-10 12:37:36 +00:00
|
|
|
~pVideoGLX() {
|
|
|
|
term();
|
|
|
|
XCloseDisplay(display);
|
|
|
|
}
|
2010-08-09 13:28:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
DeclareVideo(GLX)
|
|
|
|
|
|
|
|
};
|