nv2a: Add ARL-bias to work around OpenGL float behaviour

This commit is contained in:
Jannik Vogel 2018-07-10 19:08:27 +02:00 committed by Matt
parent 2497e2d7c4
commit 4ffe83b696
1 changed files with 10 additions and 1 deletions

View File

@ -627,7 +627,16 @@ static const char* vsh_header =
"#define ARL(dest, src) dest = _ARL(_in(src).x)\n" "#define ARL(dest, src) dest = _ARL(_in(src).x)\n"
"int _ARL(float src)\n" "int _ARL(float src)\n"
"{\n" "{\n"
" return int(floor(src));\n" " /* Xbox GPU does specify rounding, OpenGL doesn't; so we need a bias.\n"
" * Example: We probably want to floor 16.99.. to 17, not 16.\n"
" * Source of error (why we get 16.99.. instead of 17.0) is typically\n"
" * vertex-attributes being normalized from a byte value to float:\n"
" * 17 / 255 = 0.06666.. so is this 0.06667 (ceil) or 0.06666 (floor)?\n"
" * Which value we get depends on the host GPU.\n"
" * If we multiply these rounded values by 255 later, we get:\n"
" * 17.00 (ARL result = 17) or 16.99 (ARL result = 16).\n"
" * We assume the intend was to get 17, so we add our bias to fix it. */\n"
" return int(floor(src + 0.001));\n"
"}\n" "}\n"
"\n" "\n"
"#define SGE(dest, mask, src0, src1) dest.mask = _SGE(_in(src0), _in(src1)).mask\n" "#define SGE(dest, mask, src0, src1) dest.mask = _SGE(_in(src0), _in(src1)).mask\n"