2011-08-06 14:03:52 +00:00
|
|
|
#ifndef NALL_PNG_HPP
|
|
|
|
#define NALL_PNG_HPP
|
|
|
|
|
|
|
|
//PNG image decoder
|
|
|
|
//author: byuu
|
|
|
|
|
|
|
|
#include <nall/inflate.hpp>
|
|
|
|
#include <nall/string.hpp>
|
|
|
|
|
|
|
|
namespace nall {
|
|
|
|
|
|
|
|
struct png {
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
//colorType:
|
|
|
|
//0 = L
|
|
|
|
//2 = R,G,B
|
|
|
|
//3 = P
|
|
|
|
//4 = L,A
|
|
|
|
//6 = R,G,B,A
|
2011-08-06 14:03:52 +00:00
|
|
|
struct Info {
|
|
|
|
unsigned width;
|
|
|
|
unsigned height;
|
|
|
|
unsigned bitDepth;
|
|
|
|
unsigned colorType;
|
|
|
|
unsigned compressionMethod;
|
|
|
|
unsigned filterType;
|
|
|
|
unsigned interlaceMethod;
|
|
|
|
|
|
|
|
unsigned bytesPerPixel;
|
|
|
|
unsigned pitch;
|
|
|
|
|
|
|
|
uint8_t palette[256][3];
|
|
|
|
} info;
|
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
uint8_t *data;
|
|
|
|
unsigned size;
|
2011-08-06 14:03:52 +00:00
|
|
|
|
|
|
|
inline bool decode(const string &filename);
|
|
|
|
inline bool decode(const uint8_t *sourceData, unsigned sourceSize);
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
inline unsigned readbits(const uint8_t *&data);
|
|
|
|
unsigned bitpos;
|
|
|
|
|
2011-08-08 12:01:09 +00:00
|
|
|
inline png();
|
|
|
|
inline ~png();
|
2011-08-06 14:03:52 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
enum class FourCC : unsigned {
|
|
|
|
IHDR = 0x49484452,
|
|
|
|
PLTE = 0x504c5445,
|
|
|
|
IDAT = 0x49444154,
|
|
|
|
IEND = 0x49454e44,
|
|
|
|
};
|
|
|
|
|
2011-09-05 03:48:23 +00:00
|
|
|
inline unsigned interlace(unsigned pass, unsigned index);
|
2011-08-06 14:03:52 +00:00
|
|
|
inline unsigned inflateSize();
|
|
|
|
inline bool deinterlace(const uint8_t *&inputData, unsigned pass);
|
|
|
|
inline bool filter(uint8_t *outputData, const uint8_t *inputData, unsigned width, unsigned height);
|
|
|
|
inline unsigned read(const uint8_t *data, unsigned length);
|
|
|
|
};
|
|
|
|
|
|
|
|
bool png::decode(const string &filename) {
|
Update to v088r03 release.
byuu says:
static vector<uint8_t> file::read(const string &filename); replaces:
static bool file::read(const string &filename, uint8_t *&data, unsigned
&size); This allows automatic deletion of the underlying data.
Added vectorstream, which is obviously a vector<uint8_t> wrapper for
a data stream. Plan is for all data accesses inside my emulation cores
to take stream objects, especially MSU1. This lets you feed the core
anything: memorystream, filestream, zipstream, gzipstream, httpstream,
etc. There will still be exceptions for link and serial, those need
actual library files on disk. But those aren't official hardware devices
anyway.
So to help with speed a bit, I'm rethinking the video rendering path.
Previous system:
- core outputs system-native samples (SNES = 19-bit LRGB, NES = 9-bit
emphasis+palette, DMG = 2-bit grayscale, etc.)
- interfaceSystem transforms samples to 30-bit via lookup table inside
the emulation core
- interfaceSystem masks off overscan areas, if enabled
- interfaceUI runs filter to produce new target buffer, if enabled
- interfaceUI transforms 30-bit video to native display depth (24-bit or
30-bit), and applies color-adjustments (gamma, etc) at the same time
New system:
- all cores now generate an internal palette, and call
Interface::videoColor(uint32_t source, uint16_t red, uint16_t green,
uint16_t blue) to get native display color post-adjusted (gamma, etc
applied already.)
- all cores output to uint32_t* buffer now (output video.palette[color]
instead of just color)
- interfaceUI runs filter to produce new target buffer, if enabled
- interfaceUI memcpy()'s buffer to the video card
videoColor() is pretty neat. source is the raw pixel (as per the
old-format, 19-bit SNES, 9-bit NES, etc), and you can create a color
from that if you really want to. Or return that value to get a buffer
just like v088 and below. red, green, blue are 16-bits per channel,
because why the hell not, right? Just lop off all the bits you don't
want. If you have more bits on your display than that, fuck you :P
The last step is extremely difficult to avoid. Video cards can and do
have pitches that differ from the width of the texture. Trying to make
the core account for this would be really awful. And even if we did
that, the emulation routine would need to write directly to a video card
RAM buffer. Some APIs require you to lock the video buffer while
writing, so this would leave the video buffer locked for a long time.
Probably not catastrophic, but still awful. And lastly, if the
emulation core tried writing directly to the display texture, software
filters would no longer be possible (unless you -really- jump through
hooks and divert to a memory buffer when a filter is enabled, but ...
fuck.)
Anyway, the point of all that work was to eliminate an extra video copy,
and the need for a really painful 30-bit to 24-bit conversion (three
shifts, three masks, three array indexes.) So this basically reverts us,
performance-wise, to where we were pre-30 bit support.
[...]
The downside to this is that we're going to need a filter for each
output depth. Since the array type is uint32_t*, and I don't intend to
support higher or lower depths, we really only need 24+30-bit versions
of each filter. Kinda shitty, but oh well.
2012-04-27 12:12:53 +00:00
|
|
|
if(auto memory = file::read(filename)) {
|
|
|
|
return decode(memory.data(), memory.size());
|
|
|
|
}
|
|
|
|
return false;
|
2011-08-06 14:03:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool png::decode(const uint8_t *sourceData, unsigned sourceSize) {
|
|
|
|
if(sourceSize < 8) return false;
|
|
|
|
if(read(sourceData + 0, 4) != 0x89504e47) return false;
|
|
|
|
if(read(sourceData + 4, 4) != 0x0d0a1a0a) return false;
|
|
|
|
|
2013-03-15 13:11:33 +00:00
|
|
|
uint8_t *compressedData = nullptr;
|
2011-08-06 14:03:52 +00:00
|
|
|
unsigned compressedSize = 0;
|
|
|
|
|
|
|
|
unsigned offset = 8;
|
|
|
|
while(offset < sourceSize) {
|
|
|
|
unsigned length = read(sourceData + offset + 0, 4);
|
|
|
|
unsigned fourCC = read(sourceData + offset + 4, 4);
|
|
|
|
unsigned checksum = read(sourceData + offset + 8 + length, 4);
|
|
|
|
|
|
|
|
if(fourCC == (unsigned)FourCC::IHDR) {
|
|
|
|
info.width = read(sourceData + offset + 8, 4);
|
|
|
|
info.height = read(sourceData + offset + 12, 4);
|
|
|
|
info.bitDepth = read(sourceData + offset + 16, 1);
|
|
|
|
info.colorType = read(sourceData + offset + 17, 1);
|
|
|
|
info.compressionMethod = read(sourceData + offset + 18, 1);
|
|
|
|
info.filterType = read(sourceData + offset + 19, 1);
|
|
|
|
info.interlaceMethod = read(sourceData + offset + 20, 1);
|
|
|
|
|
|
|
|
if(info.bitDepth == 0 || info.bitDepth > 16) return false;
|
|
|
|
if(info.bitDepth & (info.bitDepth - 1)) return false; //not a power of two
|
|
|
|
if(info.compressionMethod != 0) return false;
|
|
|
|
if(info.filterType != 0) return false;
|
|
|
|
if(info.interlaceMethod != 0 && info.interlaceMethod != 1) return false;
|
|
|
|
|
|
|
|
switch(info.colorType) {
|
|
|
|
case 0: info.bytesPerPixel = info.bitDepth * 1; break; //L
|
|
|
|
case 2: info.bytesPerPixel = info.bitDepth * 3; break; //R,G,B
|
|
|
|
case 3: info.bytesPerPixel = info.bitDepth * 1; break; //P
|
|
|
|
case 4: info.bytesPerPixel = info.bitDepth * 2; break; //L,A
|
|
|
|
case 6: info.bytesPerPixel = info.bitDepth * 4; break; //R,G,B,A
|
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(info.colorType == 2 || info.colorType == 4 || info.colorType == 6)
|
|
|
|
if(info.bitDepth != 8 && info.bitDepth != 16) return false;
|
|
|
|
if(info.colorType == 3 && info.bitDepth == 16) return false;
|
|
|
|
|
|
|
|
info.bytesPerPixel = (info.bytesPerPixel + 7) / 8;
|
|
|
|
info.pitch = (int)info.width * info.bytesPerPixel;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(fourCC == (unsigned)FourCC::PLTE) {
|
|
|
|
if(length % 3) return false;
|
|
|
|
for(unsigned n = 0, p = offset + 8; n < length / 3; n++) {
|
|
|
|
info.palette[n][0] = sourceData[p++];
|
|
|
|
info.palette[n][1] = sourceData[p++];
|
|
|
|
info.palette[n][2] = sourceData[p++];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(fourCC == (unsigned)FourCC::IDAT) {
|
|
|
|
compressedData = (uint8_t*)realloc(compressedData, compressedSize + length);
|
|
|
|
memcpy(compressedData + compressedSize, sourceData + offset + 8, length);
|
|
|
|
compressedSize += length;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(fourCC == (unsigned)FourCC::IEND) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += 4 + 4 + length + 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned interlacedSize = inflateSize();
|
|
|
|
uint8_t *interlacedData = new uint8_t[interlacedSize];
|
|
|
|
|
|
|
|
bool result = inflate(interlacedData, interlacedSize, compressedData + 2, compressedSize - 6);
|
|
|
|
delete[] compressedData;
|
|
|
|
|
|
|
|
if(result == false) {
|
|
|
|
delete[] interlacedData;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
size = info.width * info.height * info.bytesPerPixel;
|
|
|
|
data = new uint8_t[size];
|
2011-08-06 14:03:52 +00:00
|
|
|
|
|
|
|
if(info.interlaceMethod == 0) {
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
if(filter(data, interlacedData, info.width, info.height) == false) {
|
2011-08-06 14:03:52 +00:00
|
|
|
delete[] interlacedData;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
delete[] data;
|
2013-03-15 13:11:33 +00:00
|
|
|
data = nullptr;
|
2011-08-06 14:03:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const uint8_t *passData = interlacedData;
|
|
|
|
for(unsigned pass = 0; pass < 7; pass++) {
|
|
|
|
if(deinterlace(passData, pass) == false) {
|
|
|
|
delete[] interlacedData;
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
delete[] data;
|
2013-03-15 13:11:33 +00:00
|
|
|
data = nullptr;
|
2011-08-06 14:03:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete[] interlacedData;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-09-05 03:48:23 +00:00
|
|
|
unsigned png::interlace(unsigned pass, unsigned index) {
|
|
|
|
static const unsigned data[7][4] = {
|
|
|
|
//x-distance, y-distance, x-origin, y-origin
|
2013-03-15 13:11:33 +00:00
|
|
|
{8, 8, 0, 0},
|
|
|
|
{8, 8, 4, 0},
|
|
|
|
{4, 8, 0, 4},
|
|
|
|
{4, 4, 2, 0},
|
|
|
|
{2, 4, 0, 2},
|
|
|
|
{2, 2, 1, 0},
|
|
|
|
{1, 2, 0, 1},
|
2011-09-05 03:48:23 +00:00
|
|
|
};
|
|
|
|
return data[pass][index];
|
|
|
|
}
|
2011-08-06 14:03:52 +00:00
|
|
|
|
|
|
|
unsigned png::inflateSize() {
|
|
|
|
if(info.interlaceMethod == 0) {
|
|
|
|
return info.width * info.height * info.bytesPerPixel + info.height;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned size = 0;
|
|
|
|
for(unsigned pass = 0; pass < 7; pass++) {
|
2011-09-05 03:48:23 +00:00
|
|
|
unsigned xd = interlace(pass, 0), yd = interlace(pass, 1);
|
|
|
|
unsigned xo = interlace(pass, 2), yo = interlace(pass, 3);
|
2011-08-06 14:03:52 +00:00
|
|
|
unsigned width = (info.width + (xd - xo - 1)) / xd;
|
|
|
|
unsigned height = (info.height + (yd - yo - 1)) / yd;
|
|
|
|
if(width == 0 || height == 0) continue;
|
|
|
|
size += width * height * info.bytesPerPixel + height;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool png::deinterlace(const uint8_t *&inputData, unsigned pass) {
|
2011-09-05 03:48:23 +00:00
|
|
|
unsigned xd = interlace(pass, 0), yd = interlace(pass, 1);
|
|
|
|
unsigned xo = interlace(pass, 2), yo = interlace(pass, 3);
|
2011-08-06 14:03:52 +00:00
|
|
|
unsigned width = (info.width + (xd - xo - 1)) / xd;
|
|
|
|
unsigned height = (info.height + (yd - yo - 1)) / yd;
|
|
|
|
if(width == 0 || height == 0) return true;
|
|
|
|
|
|
|
|
unsigned outputSize = width * height * info.bytesPerPixel;
|
|
|
|
uint8_t *outputData = new uint8_t[outputSize];
|
|
|
|
bool result = filter(outputData, inputData, width, height);
|
|
|
|
|
|
|
|
const uint8_t *rd = outputData;
|
|
|
|
for(unsigned y = yo; y < info.height; y += yd) {
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
uint8_t *wr = data + y * info.pitch;
|
2011-08-06 14:03:52 +00:00
|
|
|
for(unsigned x = xo; x < info.width; x += xd) {
|
|
|
|
for(unsigned b = 0; b < info.bytesPerPixel; b++) {
|
|
|
|
wr[x * info.bytesPerPixel + b] = *rd++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inputData += outputSize + height;
|
|
|
|
delete[] outputData;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool png::filter(uint8_t *outputData, const uint8_t *inputData, unsigned width, unsigned height) {
|
|
|
|
uint8_t *wr = outputData;
|
|
|
|
const uint8_t *rd = inputData;
|
|
|
|
int bpp = info.bytesPerPixel, pitch = width * bpp;
|
|
|
|
for(int y = 0; y < height; y++) {
|
|
|
|
uint8_t filter = *rd++;
|
|
|
|
|
|
|
|
switch(filter) {
|
|
|
|
case 0x00: //None
|
|
|
|
for(int x = 0; x < pitch; x++) {
|
|
|
|
wr[x] = rd[x];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x01: //Subtract
|
|
|
|
for(int x = 0; x < pitch; x++) {
|
|
|
|
wr[x] = rd[x] + (x - bpp < 0 ? 0 : wr[x - bpp]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x02: //Above
|
|
|
|
for(int x = 0; x < pitch; x++) {
|
|
|
|
wr[x] = rd[x] + (y - 1 < 0 ? 0 : wr[x - pitch]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x03: //Average
|
|
|
|
for(int x = 0; x < pitch; x++) {
|
|
|
|
short a = x - bpp < 0 ? 0 : wr[x - bpp];
|
|
|
|
short b = y - 1 < 0 ? 0 : wr[x - pitch];
|
|
|
|
|
|
|
|
wr[x] = rd[x] + (uint8_t)((a + b) / 2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x04: //Paeth
|
|
|
|
for(int x = 0; x < pitch; x++) {
|
|
|
|
short a = x - bpp < 0 ? 0 : wr[x - bpp];
|
|
|
|
short b = y - 1 < 0 ? 0 : wr[x - pitch];
|
|
|
|
short c = x - bpp < 0 || y - 1 < 0 ? 0 : wr[x - pitch - bpp];
|
|
|
|
|
|
|
|
short p = a + b - c;
|
|
|
|
short pa = p > a ? p - a : a - p;
|
|
|
|
short pb = p > b ? p - b : b - p;
|
|
|
|
short pc = p > c ? p - c : c - p;
|
|
|
|
|
|
|
|
uint8_t paeth = (uint8_t)((pa <= pb && pa <= pc) ? a : (pb <= pc) ? b : c);
|
|
|
|
|
|
|
|
wr[x] = rd[x] + paeth;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: //Invalid
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
rd += pitch;
|
|
|
|
wr += pitch;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned png::read(const uint8_t *data, unsigned length) {
|
|
|
|
unsigned result = 0;
|
|
|
|
while(length--) result = (result << 8) | (*data++);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned png::readbits(const uint8_t *&data) {
|
|
|
|
unsigned result = 0;
|
|
|
|
switch(info.bitDepth) {
|
|
|
|
case 1:
|
|
|
|
result = (*data >> bitpos) & 1;
|
|
|
|
bitpos++;
|
|
|
|
if(bitpos == 8) { data++; bitpos = 0; }
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
result = (*data >> bitpos) & 3;
|
|
|
|
bitpos += 2;
|
|
|
|
if(bitpos == 8) { data++; bitpos = 0; }
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
result = (*data >> bitpos) & 15;
|
|
|
|
bitpos += 4;
|
|
|
|
if(bitpos == 8) { data++; bitpos = 0; }
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
result = *data++;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
result = (data[0] << 8) | (data[1] << 0);
|
|
|
|
data += 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
Update to v084r03 release.
(r02 was not posted to the WIP thread)
byuu says:
Internally, all color is processed with 30-bit precision. The filters
also operate at 30-bit depth.
There's a new config file setting, video.depth, which defaults to 24.
This causes the final output to downsample to 24-bit, as most will
require.
If you set it to 30-bit, the downsampling will not occur, and bsnes will
ask ruby for a 30-bit surface. If you don't have one available, you're
going to get bad colors. Or maybe even a crash with OpenGL.
I don't yet have detection code to make sure you have an appropriate
visual in place.
30-bit mode will really only work if you are running Linux, running Xorg
at Depth 30, use the OpenGL or XShm driver, have an nVidia Quadro or AMD
FireGL card with the official drivers, and have a 30-bit capable
monitor.
Lots of planning and work for very little gain here, but it's nice that
it's finally finished.
Oh, I had to change the contrast/brightness formulas a tiny bit, but
they still work and look nice.
2011-12-03 03:22:54 +00:00
|
|
|
png::png() : data(nullptr) {
|
2011-08-06 14:03:52 +00:00
|
|
|
bitpos = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
png::~png() {
|
|
|
|
if(data) delete[] data;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|