Applied the logic from r6691 to the LLE dec/add/sub functions so they work without ToMask. This should give a modest speedup for these.

Pierre's AR inc was already perfect and I only adjusted its logic a bit for visual consistency between the interpreter and JIT code.

Also applied Pierre's optimization from the LLE inc to the Int inc.

git-svn-id: https://dolphin-emu.googlecode.com/svn/trunk@6707 8ced0084-cf51-0410-be5f-012b33b47a6e
This commit is contained in:
mylek4 2011-01-01 13:19:07 +00:00
parent 60082853ec
commit 2f7da5d5d1
2 changed files with 159 additions and 250 deletions

View File

@ -105,7 +105,7 @@ inline u16 dsp_increment_addr_reg(u16 reg)
u32 nar = ar + 1; u32 nar = ar + 1;
if (((nar ^ ar) & ((wr | 1) << 1)) > wr) if ((nar ^ ar) > ((wr | 1) << 1))
nar -= wr + 1; nar -= wr + 1;
return nar; return nar;
} }

View File

@ -23,140 +23,47 @@
using namespace Gen; using namespace Gen;
// Performs the hashing required by increment/increase/decrease_addr_reg // addr math
// clobbers RCX //
void DSPEmitter::ToMask(X64Reg value_reg) // These functions detect overflow by checking if
{ // the bit past the top of the mask(WR) has changed in AR.
//microbenchmarking results(1 000 000 000 iterations): // They detect values under the minimum for a mask by adding wr + 1
// cpu\variant| 1 | 2 // and checking if the bit past the top of the mask doesn't change.
//intel mobile C2D@2.5GHz | 5.5s | 4.0s // Both are done while ignoring changes due to values/holes in IX
//amd athlon64x2@3GHz | 6.1s | 6.4s // above the mask.
#if 0
MOV(16, R(CX), R(value_reg));
SHR(16, R(CX), Imm8(8));
OR(16, R(value_reg), R(CX));
MOV(16, R(CX), R(value_reg));
SHR(16, R(CX), Imm8(4));
OR(16, R(value_reg), R(CX));
MOV(16, R(CX), R(value_reg));
SHR(16, R(CX), Imm8(2));
OR(16, R(value_reg), R(CX));
MOV(16, R(CX), R(value_reg));
SHR(16, R(CX), Imm8(1));
OR(16, R(value_reg), R(CX));
MOVZX(32, 16, value_reg, R(value_reg));
#else
BSR(16, CX, R(value_reg));
FixupBranch undef = J_CC(CC_Z); //CX is written, but undefined
MOV(32, R(value_reg), Imm32(2));
SHL(32, R(value_reg), R(CL));
SUB(32, R(value_reg), Imm32(1));
//don't waste an instruction on jumping over an effective noop
SetJumpTarget(undef); // EAX = g_dsp.r.ar[reg]
#endif // EDX = g_dsp.r.wr[reg]
OR(16, R(value_reg), Imm16(1)); // EDI = temp
} // ECX = temp
// EAX = g_dsp.r[reg]
// EDX = g_dsp.r[DSP_REG_WR0 + reg]
//clobbers RCX
void DSPEmitter::increment_addr_reg(int reg) void DSPEmitter::increment_addr_reg(int reg)
{ {
/*
u16 ar = g_dsp.r[reg];
u16 wr = g_dsp.r[reg+8];
u16 nar = ar+1;
//this works, because nar^ar will have all the bits from the highest
//changed bit downwards set(true only for +1!)
//based on an idea by Mylek
if((nar^ar)>=((wr<<1)|3))
nar -= wr+1;
*/
// s16 tmp = g_dsp.r[reg];
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOVZX(32, 16, EAX, M(&g_dsp.r.ar[reg])); MOVZX(32, 16, EAX, M(&g_dsp.r.ar[reg]));
MOV(16, R(DX), M(&g_dsp.r.wr[reg]));
#else
// MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOVZX(32, 16, EAX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOV(16, R(DX), MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg])));
#endif
MOV(32, R(EDI), R(EAX));
ADD(32, R(EAX), Imm32(1));
XOR(32, R(EDI), R(EAX));
MOVZX(32, 16, ESI, R(DX));
SHL(32, R(ESI), Imm8(1));
OR(32, R(ESI), Imm32(3));
CMP(32, R(EDI), R(ESI));
FixupBranch nowrap = J_CC(CC_B);
SUB(16, R(AX), R(DX));
SUB(16, R(AX), Imm16(1));
SetJumpTarget(nowrap);
// g_dsp.r[reg] = tmp;
#ifdef _M_IX86 // All32
MOV(16, M(&g_dsp.r.ar[reg]), R(AX));
#else
// MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(AX));
#endif
}
// EAX = g_dsp.r[reg]
// EDX = g_dsp.r[DSP_REG_WR0 + reg]
//clobbers RCX
void DSPEmitter::decrement_addr_reg(int reg)
{
/*
u16 ar = g_dsp.r[reg];
u16 wr = g_dsp.r[reg+8];
u16 m = ToMask(wr) | 1;
u16 nar = ar-1;
if((ar&m) - 1 < m-wr)
nar += wr+1;
return nar;
*/
// s16 ar = g_dsp.r[reg];
#ifdef _M_IX86 // All32
MOV(16, R(AX), M(&g_dsp.r.ar[reg]));
MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg])); MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg]));
#else #else
// MOV(64, R(R11), ImmPtr(&g_dsp.r)); // MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, R(AX), MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg]))); MOVZX(32, 16, EAX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg]))); MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg])));
#endif #endif
// ToMask(WR0), calculating it into EDI //u32 nar = ar + 1;
//u16 m = ToMask(wr) | 1; MOV(32, R(EDI), R(EAX));
MOV(32, R(EDI), R(EDX)); ADD(32, R(EAX), Imm8(1));
ToMask(DI);
//u16 nar = ar-1; // if ((nar ^ ar) > ((wr | 1) << 1))
MOV(16, R(CX), R(AX)); // nar -= wr + 1;
SUB(16, R(AX), Imm16(1)); XOR(32, R(EDI), R(EAX));
LEA(32, ECX, MComplex(EDX, EDX, 1, 0));
OR(32, R(ECX), Imm8(2));
CMP(32, R(EDI), R(ECX));
FixupBranch nowrap = J_CC(CC_BE);
SUB(16, R(AX), R(DX));
SUB(16, R(AX), Imm8(1));
SetJumpTarget(nowrap);
// g_dsp.r.ar[reg] = nar;
//(ar&m) - 1
AND(32, R(ECX), R(EDI)); //extension of ECX to 32 bit
SUB(32, R(ECX), Imm32(1));
//m-wr
SUB(32, R(EDI), R(EDX));
CMP(32, R(ECX), R(EDI));
FixupBranch out1 = J_CC(CC_GE);
ADD(16, R(AX), R(DX));
ADD(16, R(AX), Imm16(1));
SetJumpTarget(out1);
// g_dsp.r[reg] = tmp;
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOV(16, M(&g_dsp.r.ar[reg]), R(AX)); MOV(16, M(&g_dsp.r.ar[reg]), R(AX));
#else #else
@ -165,174 +72,176 @@ void DSPEmitter::decrement_addr_reg(int reg)
#endif #endif
} }
// EAX = g_dsp.r.ar[reg]
// EDX = g_dsp.r.wr[reg]
// EDI = temp
// ECX = temp
void DSPEmitter::decrement_addr_reg(int reg)
{
#ifdef _M_IX86 // All32
MOVZX(32, 16, EAX, M(&g_dsp.r.ar[reg]));
MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg]));
#else
// MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOVZX(32, 16, EAX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg])));
#endif
// u32 nar = ar + wr;
// edi = nar
LEA(32, EDI, MComplex(EAX, EDX, 1, 0));
// if (((nar ^ ar) & ((wr | 1) << 1)) > wr)
// nar -= wr + 1;
XOR(32, R(EAX), R(EDI));
LEA(32, ECX, MComplex(EDX, EDX, 1, 0));
OR(32, R(ECX), Imm8(2));
AND(32, R(EAX), R(ECX));
CMP(32, R(EAX), R(EDX));
FixupBranch nowrap = J_CC(CC_BE);
SUB(16, R(DI), R(DX));
SUB(16, R(DI), Imm8(1));
SetJumpTarget(nowrap);
// g_dsp.r.ar[reg] = nar;
#ifdef _M_IX86 // All32
MOV(16, M(&g_dsp.r.ar[reg]), R(DI));
#else
// MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(DI));
#endif
}
// Increase addr register according to the correspond ix register // Increase addr register according to the correspond ix register
// EAX = g_dsp.r[reg] // EAX = g_dsp.r.ar[reg]
// ECX = g_dsp.r[DSP_REG_IX0 + reg] // EDX = g_dsp.r.wr[reg]
// EDX = g_dsp.r[DSP_REG_WR0 + reg] // ESI = g_dsp.r.ix[reg]
// EDI = tomask(EDX) // ECX = temp
// EDI = temp
void DSPEmitter::increase_addr_reg(int reg) void DSPEmitter::increase_addr_reg(int reg)
{ {
/*
u16 ar = g_dsp.r[reg];
u16 wr = g_dsp.r[reg+8];
u16 ix = g_dsp.r[reg+4];
u16 m = ToMask(wr) | 1;
u16 nar = ar+ix;
if ((s16)ix >= 0) {
if((ar&m) + (ix&m) -(int)m-1 >= 0)
nar -= wr+1;
} else {
if((ar&m) + (ix&m) -(int)m-1 < m-wr)
nar += wr+1;
}
return nar;
*/
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOV(16, R(SI), M(&g_dsp.r.ix[reg])); MOVZX(32, 16, EAX, M(&g_dsp.r.ar[reg]));
MOV(16, R(AX), M(&g_dsp.r.ar[reg]));
MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg])); MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg]));
MOVSX(32, 16, ESI, M(&g_dsp.r.ix[reg]));
#else #else
// MOV(64, R(R11), ImmPtr(&g_dsp.r)); // MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, R(SI), MDisp(R11, STRUCT_OFFSET(g_dsp.r, ix[reg]))); MOVZX(32, 16, EAX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOV(16, R(AX), MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg]))); MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg])));
MOVSX(32, 16, ESI, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ix[reg])));
#endif #endif
//u32 nar = ar + ix;
//edi = nar
LEA(32, EDI, MComplex(EAX, ESI, 1, 0));
// ToMask(WR0), calculating it into EDI //u32 dar = (nar ^ ar ^ ix) & ((wr | 1) << 1);
//u16 m = ToMask(wr) | 1; //eax = dar
MOV(32, R(EDI), R(EDX)); XOR(32, R(EAX), R(ESI));
ToMask(DI); XOR(32, R(EAX), R(EDI));
LEA(32, ECX, MComplex(EDX, EDX, 1, 0));
OR(32, R(ECX), Imm8(2));
AND(32, R(EAX), R(ECX));
//u16 nar = ar+ix; //if (ix >= 0)
MOV(16, R(CX), R(AX)); TEST(32, R(ESI), R(ESI));
ADD(16, R(AX), R(SI)); FixupBranch negative = J_CC(CC_S);
//if (dar > wr)
TEST(16, R(SI), Imm16(0x8000)); CMP(32, R(EAX), R(EDX));
FixupBranch negative = J_CC(CC_NZ); FixupBranch done = J_CC(CC_BE);
//nar -= wr + 1;
//(ar&m) + (ix&m) -(int)m-1 SUB(16, R(DI), R(DX));
AND(32, R(ECX), R(EDI)); //extension of ECX to 32 bit SUB(16, R(DI), Imm8(1));
AND(32, R(ESI), R(EDI)); //extension of ESI to 32 bit FixupBranch done2 = J();
ADD(32, R(ECX), R(ESI));
SUB(32, R(ECX), R(EDI));
SUB(32, R(ECX), Imm32(1));
CMP(32, R(ECX), Imm32(0));
FixupBranch out1 = J_CC(CC_L);
SUB(16, R(AX), R(DX));
SUB(16, R(AX), Imm16(1));
FixupBranch out2 = J();
//else
SetJumpTarget(negative); SetJumpTarget(negative);
//if ((((nar + wr + 1) ^ nar) & dar) <= wr)
LEA(32, ECX, MComplex(EDI, EDX, 1, 1));
XOR(32, R(ECX), R(EDI));
AND(32, R(ECX), R(EAX));
CMP(32, R(ECX), R(EDX));
FixupBranch done3 = J_CC(CC_A);
//nar += wr + 1;
LEA(16, DI, MComplex(DI, DX, 1, 1));
//(ar&m) + (ix&m) -(int)m-1 SetJumpTarget(done);
AND(32, R(ECX), R(EDI)); //extension of ECX to 32 bit SetJumpTarget(done2);
AND(32, R(ESI), R(EDI)); //extension of ESI to 32 bit SetJumpTarget(done3);
ADD(32, R(ECX), R(ESI)); // g_dsp.r.ar[reg] = nar;
SUB(32, R(ECX), R(EDI));
SUB(32, R(ECX), Imm32(1));
//m-wr
SUB(32, R(EDI), R(EDX));
CMP(32, R(ECX), R(EDI));
FixupBranch out3 = J_CC(CC_GE);
ADD(16, R(AX), R(DX));
ADD(16, R(AX), Imm16(1));
SetJumpTarget(out1);
SetJumpTarget(out2);
SetJumpTarget(out3);
// g_dsp.r[reg] = tmp;
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOV(16, M(&g_dsp.r.ar[reg]), R(EAX)); MOV(16, M(&g_dsp.r.ar[reg]), R(DI));
#else #else
// MOV(64, R(R11), ImmPtr(&g_dsp.r)); // MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(EAX)); MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(DI));
#endif #endif
} }
// Decrease addr register according to the correspond ix register // Decrease addr register according to the correspond ix register
// EAX = g_dsp.r[reg] // EAX = g_dsp.r.ar[reg]
// ECX = g_dsp.r[DSP_REG_IX0 + reg] // EDX = g_dsp.r.wr[reg]
// EDX = g_dsp.r[DSP_REG_WR0 + reg] // ESI = g_dsp.r.ix[reg]
// EDI = tomask(EDX) // ECX = temp
// EDI = temp
void DSPEmitter::decrease_addr_reg(int reg) void DSPEmitter::decrease_addr_reg(int reg)
{ {
/*
u16 ar = g_dsp.r[reg];
u16 wr = g_dsp.r[reg+8];
u16 ix = g_dsp.r[reg+4];
u16 m = ToMask(wr) | 1;
u16 nar = ar-ix; //!!
if ((u16)ix > 0x8000) { // equiv: ix < 0 && ix != -0x8000 //!!
if((ar&m) - (int)(ix&m) >= 0) //!!
nar -= wr+1;
} else {
if((ar&m) - (int)(ix&m) < m-wr) //!!
nar += wr+1;
}
return nar;
*/
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOV(16, R(SI), M(&g_dsp.r.ix[reg])); MOVZX(32, 16, EAX, M(&g_dsp.r.ar[reg]));
MOV(16, R(AX), M(&g_dsp.r.ar[reg]));
MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg])); MOVZX(32, 16, EDX, M(&g_dsp.r.wr[reg]));
MOVSX(32, 16, ESI, M(&g_dsp.r.ix[reg]));
#else #else
// MOV(64, R(R11), ImmPtr(&g_dsp.r)); // MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, R(SI), MDisp(R11, STRUCT_OFFSET(g_dsp.r, ix[reg]))); MOVZX(32, 16, EAX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOV(16, R(AX), MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])));
MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg]))); MOVZX(32, 16, EDX, MDisp(R11, STRUCT_OFFSET(g_dsp.r, wr[reg])));
MOVSX(32, 16, ESI, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ix[reg])));
#endif #endif
// ToMask(WR0), calculating it into EDI NOT(32, R(ESI)); //esi = ~ix
//u16 m = ToMask(wr) | 1;
MOV(32, R(EDI), R(EDX));
ToMask(DI);
//u16 nar = ar-ix; //u32 nar = ar - ix; (ar + ~ix + 1)
MOV(16, R(CX), R(AX)); LEA(32, EDI, MComplex(EAX, ESI, 1, 1));
SUB(16, R(AX), R(SI));
CMP(16, R(SI), Imm16(0x8000)); //u32 dar = (nar ^ ar ^ ~ix) & ((wr | 1) << 1);
FixupBranch negative = J_CC(CC_BE); //eax = dar
XOR(32, R(EAX), R(ESI));
XOR(32, R(EAX), R(EDI));
LEA(32, ECX, MComplex(EDX, EDX, 1, 0));
OR(32, R(ECX), Imm8(2));
AND(32, R(EAX), R(ECX));
//(ar&m) + (ix&m) //if ((u32)ix > 0xFFFF8000) ==> (~ix < 0x00007FFF)
AND(32, R(ECX), R(EDI)); CMP(32, R(ESI), Imm32(0x00007FFF));
AND(32, R(ESI), R(EDI)); FixupBranch positive = J_CC(CC_AE);
SUB(32, R(ECX), R(ESI)); //if (dar > wr)
CMP(32, R(EAX), R(EDX));
FixupBranch done = J_CC(CC_BE);
//nar -= wr + 1;
SUB(16, R(DI), R(DX));
SUB(16, R(DI), Imm8(1));
FixupBranch done2 = J();
CMP(32, R(ECX), Imm32(0)); //else
FixupBranch out1 = J_CC(CC_L); SetJumpTarget(positive);
SUB(16, R(AX), R(DX)); //if ((((nar + wr + 1) ^ nar) & dar) <= wr)
SUB(16, R(AX), Imm16(1)); LEA(32, ECX, MComplex(EDI, EDX, 1, 1));
FixupBranch out2 = J(); XOR(32, R(ECX), R(EDI));
AND(32, R(ECX), R(EAX));
CMP(32, R(ECX), R(EDX));
FixupBranch done3 = J_CC(CC_A);
//nar += wr + 1;
LEA(16, DI, MComplex(DI, DX, 1, 1));
SetJumpTarget(negative); SetJumpTarget(done);
SetJumpTarget(done2);
SetJumpTarget(done3);
//return nar
//(ar&m) + (ix&m)
AND(32, R(ECX), R(EDI));
AND(32, R(ESI), R(EDI));
SUB(32, R(ECX), R(ESI));
//m-wr
SUB(32, R(EDI), R(EDX));
CMP(32, R(ECX), R(EDI));
FixupBranch out3 = J_CC(CC_GE);
ADD(16, R(AX), R(DX));
ADD(16, R(AX), Imm16(1));
SetJumpTarget(out1);
SetJumpTarget(out2);
SetJumpTarget(out3);
// g_dsp.r[reg] = tmp;
#ifdef _M_IX86 // All32 #ifdef _M_IX86 // All32
MOV(16, M(&g_dsp.r.ar[reg]), R(EAX)); MOV(16, M(&g_dsp.r.ar[reg]), R(DI));
#else #else
// MOV(64, R(R11), ImmPtr(&g_dsp.r)); // MOV(64, R(R11), ImmPtr(&g_dsp.r));
MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(EAX)); MOV(16, MDisp(R11, STRUCT_OFFSET(g_dsp.r, ar[reg])), R(DI));
#endif #endif
} }