Do all arbitrary mipmap detection in integer space

This no longer converts from sRGB to linear for the reference mip
downsample - even if the original mipmap creation tool used an sRGB
colorspace (which isn't really guaranteed, and may even change per
game), this is a "fast" heuristic that's only an estimate anyway.

The average diff is also now stored in a u64, avoiding floating point
calculations in the per-pixel hot loop.

This should speed up the detection significantly, hopefully fixing
jank when loading in new textures.
This commit is contained in:
Jonathan Hamilton 2018-06-25 22:01:37 -07:00
parent 4eeef6e5b3
commit ac05d1a593
1 changed files with 35 additions and 16 deletions

View File

@ -486,6 +486,7 @@ class ArbitraryMipmapDetector
{ {
private: private:
using PixelRGBAf = std::array<float, 4>; using PixelRGBAf = std::array<float, 4>;
using PixelRGBAu8 = std::array<u8, 4>;
public: public:
explicit ArbitraryMipmapDetector() = default; explicit ArbitraryMipmapDetector() = default;
@ -519,6 +520,12 @@ public:
const auto& level = levels[i]; const auto& level = levels[i];
const auto& mip = levels[i + 1]; const auto& mip = levels[i + 1];
u64 level_pixel_count = level.shape.width;
level_pixel_count *= level.shape.height;
// AverageDiff stores the difference sum in a u64, so make sure we can't overflow
ASSERT(level_pixel_count < (std::numeric_limits<u64>::max() / (255 * 255 * 4)));
// Manually downsample the past downsample with a simple box blur // Manually downsample the past downsample with a simple box blur
// This is not necessarily close to whatever the original artists used, however // This is not necessarily close to whatever the original artists used, however
// It should still be closer than a thing that's not a downscale at all // It should still be closer than a thing that's not a downscale at all
@ -568,6 +575,12 @@ private:
return {{SRGBToLinear(p[0]), SRGBToLinear(p[1]), SRGBToLinear(p[2]), SRGBToLinear(p[3])}}; return {{SRGBToLinear(p[0]), SRGBToLinear(p[1]), SRGBToLinear(p[2]), SRGBToLinear(p[3])}};
} }
static PixelRGBAu8 SampleLinear(const u8* src, const Shape& src_shape, u32 x, u32 y)
{
const auto* p = src + (x + y * src_shape.row_length) * 4;
return {{p[0], p[1], p[2], p[3]}};
}
// Puts a downsampled image in dst. dst must be at least width*height*4 // Puts a downsampled image in dst. dst must be at least width*height*4
static void Downsample(const u8* src, const Shape& src_shape, u8* dst, const Shape& dst_shape) static void Downsample(const u8* src, const Shape& src_shape, u8* dst, const Shape& dst_shape)
{ {
@ -577,29 +590,32 @@ private:
{ {
auto x = j * 2; auto x = j * 2;
auto y = i * 2; auto y = i * 2;
const std::array<PixelRGBAf, 4> samples{{ const std::array<PixelRGBAu8, 4> samples{{
Sample(src, src_shape, x, y), SampleLinear(src, src_shape, x, y),
Sample(src, src_shape, x + 1, y), SampleLinear(src, src_shape, x + 1, y),
Sample(src, src_shape, x, y + 1), SampleLinear(src, src_shape, x, y + 1),
Sample(src, src_shape, x + 1, y + 1), SampleLinear(src, src_shape, x + 1, y + 1),
}}; }};
auto* dst_pixel = dst + (j + i * dst_shape.row_length) * 4; auto* dst_pixel = dst + (j + i * dst_shape.row_length) * 4;
dst_pixel[0] = for (int channel = 0; channel < 4; channel++)
LinearToSRGB((samples[0][0] + samples[1][0] + samples[2][0] + samples[3][0]) * 0.25f); {
dst_pixel[1] = uint32_t channel_value = samples[0][channel] + samples[1][channel] +
LinearToSRGB((samples[0][1] + samples[1][1] + samples[2][1] + samples[3][1]) * 0.25f); samples[2][channel] + samples[3][channel];
dst_pixel[2] = dst_pixel[channel] = (channel_value + 2) / 4;
LinearToSRGB((samples[0][2] + samples[1][2] + samples[2][2] + samples[3][2]) * 0.25f); }
dst_pixel[3] =
LinearToSRGB((samples[0][3] + samples[1][3] + samples[2][3] + samples[3][3]) * 0.25f);
} }
} }
} }
float AverageDiff(const u8* other) const float AverageDiff(const u8* other) const
{ {
float average_diff = 0.f; // As textures are stored in (at most) 8 bit precision, each channel can
// have a max diff of (2^8)^2, multiply by 4 channels = 2^18 per pixel.
// That means to overflow, we must have a texture with more than 2^46
// pixels - which is way beyond anything the original hardware could do,
// and likely a sane assumption going forward for some significant time.
u64 current_diff_sum = 0;
const auto* ptr1 = pixels; const auto* ptr1 = pixels;
const auto* ptr2 = other; const auto* ptr2 = other;
for (u32 i = 0; i < shape.height; ++i) for (u32 i = 0; i < shape.height; ++i)
@ -615,13 +631,16 @@ private:
const int diff_squared = diff * diff; const int diff_squared = diff * diff;
pixel_diff += diff_squared; pixel_diff += diff_squared;
} }
average_diff += pixel_diff; current_diff_sum += pixel_diff;
} }
ptr1 += shape.row_length; ptr1 += shape.row_length;
ptr2 += shape.row_length; ptr2 += shape.row_length;
} }
// calculate the MSE over all pixels, divide by 2.56 to make it a percent
// (IE scale to 0..100 instead of 0..256)
return average_diff / (shape.width * shape.height * 4) / 2.56f; return std::sqrt(static_cast<float>(current_diff_sum) / (shape.width * shape.height * 4)) /
2.56f;
} }
}; };
std::vector<Level> levels; std::vector<Level> levels;