- It took a while, but I managed to convert 100's of template functions to normal functions, reducing release link times by about ~50% again.
pcsx2 takes about ~2 mins to link on release build, which is a lot more tolerable than ~10 minutes from a few days ago...


git-svn-id: http://pcsx2.googlecode.com/svn/trunk@1291 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
cottonvibes 2009-05-30 04:16:45 +00:00
parent 1600dc1764
commit 51ab9a6888
13 changed files with 924 additions and 1098 deletions

View File

@ -35,7 +35,7 @@ declareAllVariables // Declares All Global Variables :D
//------------------------------------------------------------------ //------------------------------------------------------------------
// Only run this once per VU! ;) // Only run this once per VU! ;)
microVUt(void) mVUinit(VURegs* vuRegsPtr) { microVUf(void) mVUinit(VURegs* vuRegsPtr) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
mVU->regs = vuRegsPtr; mVU->regs = vuRegsPtr;
@ -71,8 +71,8 @@ microVUx(void) mVUreset() {
// Setup Entrance/Exit Points // Setup Entrance/Exit Points
x86SetPtr(mVU->cache); x86SetPtr(mVU->cache);
mVUdispatcherA<vuIndex>(); mVUdispatcherA(mVU);
mVUdispatcherB<vuIndex>(); mVUdispatcherB(mVU);
// Clear All Program Data // Clear All Program Data
memset(&mVU->prog, 0, sizeof(mVU->prog)); memset(&mVU->prog, 0, sizeof(mVU->prog));
@ -98,7 +98,7 @@ microVUx(void) mVUreset() {
} }
// Free Allocated Resources // Free Allocated Resources
microVUt(void) mVUclose() { microVUf(void) mVUclose() {
microVU* mVU = mVUx; microVU* mVU = mVUx;
mVUprint((vuIndex) ? "microVU1: close" : "microVU0: close"); mVUprint((vuIndex) ? "microVU1: close" : "microVU0: close");
@ -116,7 +116,7 @@ microVUt(void) mVUclose() {
} }
// Clears Block Data in specified range // Clears Block Data in specified range
microVUt(void) mVUclear(u32 addr, u32 size) { microVUf(void) mVUclear(u32 addr, u32 size) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
if (!mVU->prog.cleared) { if (!mVU->prog.cleared) {
memset(&mVU->prog.lpState, 0, sizeof(mVU->prog.lpState)); memset(&mVU->prog.lpState, 0, sizeof(mVU->prog.lpState));
@ -129,7 +129,7 @@ microVUt(void) mVUclear(u32 addr, u32 size) {
//------------------------------------------------------------------ //------------------------------------------------------------------
// Clears program data (Sets used to 1 because calling this function implies the program will be used at least once) // Clears program data (Sets used to 1 because calling this function implies the program will be used at least once)
microVUt(void) mVUclearProg(int progIndex) { microVUf(void) mVUclearProg(int progIndex) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
mVU->prog.prog[progIndex].used = 1; mVU->prog.prog[progIndex].used = 1;
mVU->prog.prog[progIndex].last_used = 3; mVU->prog.prog[progIndex].last_used = 3;
@ -144,22 +144,22 @@ microVUt(void) mVUclearProg(int progIndex) {
} }
// Caches Micro Program // Caches Micro Program
microVUt(void) mVUcacheProg(int progIndex) { microVUf(void) mVUcacheProg(int progIndex) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
memcpy_fast(mVU->prog.prog[progIndex].data, mVU->regs->Micro, mVU->microSize); memcpy_fast(mVU->prog.prog[progIndex].data, mVU->regs->Micro, mVU->microSize);
mVUdumpProg(progIndex); mVUdumpProg(progIndex);
mVUcheckSflag<vuIndex>(progIndex); mVUcheckSflag(mVU, progIndex);
} }
// Finds the least used program, (if program list full clears and returns an old program; if not-full, returns free program) // Finds the least used program, (if program list full clears and returns an old program; if not-full, returns free program)
microVUt(int) mVUfindLeastUsedProg() { microVUf(int) mVUfindLeastUsedProg() {
microVU* mVU = mVUx; microVU* mVU = mVUx;
if (mVU->prog.total < mVU->prog.max) { if (mVU->prog.total < mVU->prog.max) {
mVU->prog.total++; mVU->prog.total++;
mVUcacheProg<vuIndex>(mVU->prog.total); // Cache Micro Program mVUcacheProg<vuIndex>(mVU->prog.total); // Cache Micro Program
mVU->prog.prog[mVU->prog.total].used = 1; mVU->prog.prog[mVU->prog.total].used = 1;
mVU->prog.prog[mVU->prog.total].last_used = 3; mVU->prog.prog[mVU->prog.total].last_used = 3;
DevCon::Notice("microVU%d: Cached MicroPrograms = %d", params vuIndex, mVU->prog.total+1); Console::Notice("microVU%d: Cached MicroPrograms = %d", params vuIndex, mVU->prog.total+1);
return mVU->prog.total; return mVU->prog.total;
} }
else { else {
@ -192,7 +192,7 @@ microVUt(int) mVUfindLeastUsedProg() {
// frame-based decrementing system in combination with a program-execution-based incrementing // frame-based decrementing system in combination with a program-execution-based incrementing
// system. In english: if last_used >= 2 it means the program has been used for the current // system. In english: if last_used >= 2 it means the program has been used for the current
// or prev frame. if it's 0, the program hasn't been used for a while. // or prev frame. if it's 0, the program hasn't been used for a while.
microVUt(void) mVUvsyncUpdate() { microVUf(void) mVUvsyncUpdate() {
microVU* mVU = mVUx; microVU* mVU = mVUx;
if (mVU->prog.total < mVU->prog.max) return; if (mVU->prog.total < mVU->prog.max) return;
@ -211,7 +211,7 @@ microVUt(void) mVUvsyncUpdate() {
} }
// Compare Cached microProgram to mVU->regs->Micro // Compare Cached microProgram to mVU->regs->Micro
microVUt(int) mVUcmpProg(int progIndex, bool progUsed, bool needOverflowCheck, bool cmpWholeProg) { microVUf(int) mVUcmpProg(int progIndex, bool progUsed, bool needOverflowCheck, bool cmpWholeProg) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
if (progUsed) { if (progUsed) {
@ -231,7 +231,7 @@ microVUt(int) mVUcmpProg(int progIndex, bool progUsed, bool needOverflowCheck, b
} }
// Searches for Cached Micro Program and sets prog.cur to it (returns 1 if program found, else returns 0) // Searches for Cached Micro Program and sets prog.cur to it (returns 1 if program found, else returns 0)
microVUt(int) mVUsearchProg() { microVUf(int) mVUsearchProg() {
microVU* mVU = mVUx; microVU* mVU = mVUx;
if (mVU->prog.cleared) { // If cleared, we need to search for new program if (mVU->prog.cleared) { // If cleared, we need to search for new program

View File

@ -137,24 +137,24 @@ extern void (*mVU_LOWER_OPCODE[128])( VURegs* VU, s32 info );
extern int mVUdebugNow; extern int mVUdebugNow;
// Main Functions // Main Functions
microVUt(void) mVUinit(VURegs*); microVUf(void) mVUinit(VURegs*);
microVUx(void) mVUreset(); microVUx(void) mVUreset();
microVUt(void) mVUclose(); microVUf(void) mVUclose();
microVUt(void) mVUclear(u32, u32); microVUf(void) mVUclear(u32, u32);
// Prototypes for Linux // Prototypes for Linux
void __fastcall mVUcleanUpVU0(); void __fastcall mVUcleanUpVU0();
void __fastcall mVUcleanUpVU1(); void __fastcall mVUcleanUpVU1();
void* __fastcall mVUcompileVU0(u32 startPC, uptr pState); void* __fastcall mVUcompileVU0(u32 startPC, uptr pState);
void* __fastcall mVUcompileVU1(u32 startPC, uptr pState); void* __fastcall mVUcompileVU1(u32 startPC, uptr pState);
microVUf(void) mVUopU(mF); mVUop(mVUopU);
microVUf(void) mVUopL(mF); mVUop(mVUopL);
// Private Functions // Private Functions
microVUt(void) mVUclearProg(microVU* mVU, int progIndex); microVUf(void) mVUclearProg(microVU* mVU, int progIndex);
microVUt(int) mVUfindLeastUsedProg(microVU* mVU); microVUf(int) mVUfindLeastUsedProg(microVU* mVU);
microVUt(int) mVUsearchProg(); microVUf(int) mVUsearchProg();
microVUt(void) mVUcacheProg(int progIndex); microVUf(void) mVUcacheProg(int progIndex);
void* __fastcall mVUexecuteVU0(u32 startPC, u32 cycles); void* __fastcall mVUexecuteVU0(u32 startPC, u32 cycles);
void* __fastcall mVUexecuteVU1(u32 startPC, u32 cycles); void* __fastcall mVUexecuteVU1(u32 startPC, u32 cycles);

View File

@ -27,12 +27,12 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getReg(reg, _reg_) { \ #define getReg(reg, _reg_) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _X_Y_Z_W); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _X_Y_Z_W); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, _X_Y_Z_W); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, _X_Y_Z_W); \
} }
#define getZero(reg) { \ #define getZero(reg) { \
if (_W) { mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[0].UL[0], _X_Y_Z_W); } \ if (_W) { mVUloadReg(reg, (uptr)&mVU->regs->VF[0].UL[0], _X_Y_Z_W); } \
else { SSE_XORPS_XMM_to_XMM(reg, reg); } \ else { SSE_XORPS_XMM_to_XMM(reg, reg); } \
} }
@ -41,8 +41,7 @@
else { getReg(reg, _reg_); } \ else { getReg(reg, _reg_); } \
} }
microVUt(void) mVUallocFMAC1a(int& Fd, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC1a(mV, int& Fd, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -51,29 +50,26 @@ microVUt(void) mVUallocFMAC1a(int& Fd, int& Fs, int& Ft) {
else { getReg6(Ft, _Ft_); } else { getReg6(Ft, _Ft_); }
} }
microVUt(void) mVUallocFMAC1b(int& Fd) { microVUt(void) mVUallocFMAC1b(mV, int& Fd) {
microVU* mVU = mVUx;
if (!_Fd_) return; if (!_Fd_) return;
if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Fd, xmmT1, _X_Y_Z_W); if (CHECK_VU_OVERFLOW) mVUclamp1(Fd, xmmT1, _X_Y_Z_W);
mVUsaveReg<vuIndex>(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 1); mVUsaveReg(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 1);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC2 - ABS/FTOI/ITOF Opcodes // FMAC2 - ABS/FTOI/ITOF Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC2a(int& Fs, int& Ft) { microVUt(void) mVUallocFMAC2a(mV, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFs; Ft = xmmFs;
getReg6(Fs, _Fs_); getReg6(Fs, _Fs_);
} }
microVUt(void) mVUallocFMAC2b(int& Ft) { microVUt(void) mVUallocFMAC2b(mV, int& Ft) {
microVU* mVU = mVUx;
if (!_Ft_) return; if (!_Ft_) return;
//if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Ft, xmmT1, _X_Y_Z_W); //if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Ft, xmmT1, _X_Y_Z_W);
mVUsaveReg<vuIndex>(Ft, (uptr)&mVU->regs->VF[_Ft_].UL[0], _X_Y_Z_W, 1); mVUsaveReg(Ft, (uptr)&mVU->regs->VF[_Ft_].UL[0], _X_Y_Z_W, 1);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -81,31 +77,30 @@ microVUt(void) mVUallocFMAC2b(int& Ft) {
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getReg3SS(reg, _reg_) { \ #define getReg3SS(reg, _reg_) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _bc_))); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _bc_))); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, (1 << (3 - _bc_))); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, (1 << (3 - _bc_))); \
} }
#define getReg3(reg, _reg_) { \ #define getReg3(reg, _reg_) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _bc_))); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _bc_))); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, (1 << (3 - _bc_))); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, (1 << (3 - _bc_))); \
mVUunpack_xyzw<vuIndex>(reg, reg, 0); \ mVUunpack_xyzw(reg, reg, 0); \
} }
#define getZero3SS(reg) { \ #define getZero3SS(reg) { \
if (_bc_w) { mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[0].UL[0], 1); } \ if (_bc_w) { mVUloadReg(reg, (uptr)&mVU->regs->VF[0].UL[0], 1); } \
else { SSE_XORPS_XMM_to_XMM(reg, reg); } \ else { SSE_XORPS_XMM_to_XMM(reg, reg); } \
} }
#define getZero3(reg) { \ #define getZero3(reg) { \
if (_bc_w) { \ if (_bc_w) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[0].UL[0], 1); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[0].UL[0], 1); \
mVUunpack_xyzw<vuIndex>(reg, reg, 0); \ mVUunpack_xyzw(reg, reg, 0); \
} \ } \
else { SSE_XORPS_XMM_to_XMM(reg, reg); } \ else { SSE_XORPS_XMM_to_XMM(reg, reg); } \
} }
microVUt(void) mVUallocFMAC3a(int& Fd, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC3a(mV, int& Fd, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -124,8 +119,8 @@ microVUt(void) mVUallocFMAC3a(int& Fd, int& Fs, int& Ft) {
} }
} }
microVUt(void) mVUallocFMAC3b(int& Fd) { microVUt(void) mVUallocFMAC3b(mV, int& Fd) {
mVUallocFMAC1b<vuIndex>(Fd); mVUallocFMAC1b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -133,17 +128,16 @@ microVUt(void) mVUallocFMAC3b(int& Fd) {
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getReg4(reg, _reg_) { \ #define getReg4(reg, _reg_) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _xyzw_ACC); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _xyzw_ACC); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, _xyzw_ACC); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, _xyzw_ACC); \
} }
#define getZero4(reg) { \ #define getZero4(reg) { \
if (_W) { mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[0].UL[0], _xyzw_ACC); } \ if (_W) { mVUloadReg(reg, (uptr)&mVU->regs->VF[0].UL[0], _xyzw_ACC); } \
else { SSE_XORPS_XMM_to_XMM(reg, reg); } \ else { SSE_XORPS_XMM_to_XMM(reg, reg); } \
} }
microVUt(void) mVUallocFMAC4a(int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC4a(mV, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
ACC = xmmACC; ACC = xmmACC;
Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs; Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs;
Ft = xmmFt; Ft = xmmFt;
@ -162,9 +156,8 @@ microVUt(void) mVUallocFMAC4a(int& ACC, int& Fs, int& Ft) {
} }
} }
microVUt(void) mVUallocFMAC4b(int& ACC, int& Fs) { microVUt(void) mVUallocFMAC4b(mV, int& ACC, int& Fs) {
microVU* mVU = mVUx; if (CHECK_VU_OVERFLOW) mVUclamp1(Fs, xmmT1, _xyzw_ACC);
if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Fs, xmmT1, _xyzw_ACC);
mVUmergeRegs(ACC, Fs, _X_Y_Z_W); mVUmergeRegs(ACC, Fs, _X_Y_Z_W);
} }
@ -172,8 +165,7 @@ microVUt(void) mVUallocFMAC4b(int& ACC, int& Fs) {
// FMAC5 - FMAC BC(xyzw) Opcodes Storing Result to ACC // FMAC5 - FMAC BC(xyzw) Opcodes Storing Result to ACC
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC5a(int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC5a(mV, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
ACC = xmmACC; ACC = xmmACC;
Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs; Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs;
Ft = xmmFt; Ft = xmmFt;
@ -192,8 +184,8 @@ microVUt(void) mVUallocFMAC5a(int& ACC, int& Fs, int& Ft) {
} }
} }
microVUt(void) mVUallocFMAC5b(int& ACC, int& Fs) { microVUt(void) mVUallocFMAC5b(mV, int& ACC, int& Fs) {
mVUallocFMAC4b<vuIndex>(ACC, Fs); mVUallocFMAC4b(mVU, ACC, Fs);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -203,12 +195,11 @@ microVUt(void) mVUallocFMAC5b(int& ACC, int& Fs) {
#define getIreg(reg, modXYZW) { \ #define getIreg(reg, modXYZW) { \
MOV32MtoR(gprT1, (uptr)&mVU->regs->VI[REG_I].UL); \ MOV32MtoR(gprT1, (uptr)&mVU->regs->VI[REG_I].UL); \
SSE2_MOVD_R_to_XMM(reg, gprT1); \ SSE2_MOVD_R_to_XMM(reg, gprT1); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, 8); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, 8); \
if (!((_XYZW_SS && modXYZW) || (_X_Y_Z_W == 8))) { mVUunpack_xyzw<vuIndex>(reg, reg, 0); } \ if (!((_XYZW_SS && modXYZW) || (_X_Y_Z_W == 8))) { mVUunpack_xyzw(reg, reg, 0); } \
} }
microVUt(void) mVUallocFMAC6a(int& Fd, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC6a(mV, int& Fd, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -216,16 +207,15 @@ microVUt(void) mVUallocFMAC6a(int& Fd, int& Fs, int& Ft) {
getReg6(Fs, _Fs_); getReg6(Fs, _Fs_);
} }
microVUt(void) mVUallocFMAC6b(int& Fd) { microVUt(void) mVUallocFMAC6b(mV, int& Fd) {
mVUallocFMAC1b<vuIndex>(Fd); mVUallocFMAC1b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC7 - FMAC Opcodes Storing Result to ACC (I Reg) // FMAC7 - FMAC Opcodes Storing Result to ACC (I Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC7a(int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC7a(mV, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
ACC = xmmACC; ACC = xmmACC;
Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs; Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs;
Ft = xmmFt; Ft = xmmFt;
@ -235,16 +225,15 @@ microVUt(void) mVUallocFMAC7a(int& ACC, int& Fs, int& Ft) {
else { getReg4(Fs, _Fs_); } else { getReg4(Fs, _Fs_); }
} }
microVUt(void) mVUallocFMAC7b(int& ACC, int& Fs) { microVUt(void) mVUallocFMAC7b(mV, int& ACC, int& Fs) {
mVUallocFMAC4b<vuIndex>(ACC, Fs); mVUallocFMAC4b(mVU, ACC, Fs);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC8 - MADD FMAC Opcode Storing Result to Fd // FMAC8 - MADD FMAC Opcode Storing Result to Fd
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC8a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC8a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -264,19 +253,17 @@ microVUt(void) mVUallocFMAC8a(int& Fd, int& ACC, int& Fs, int& Ft) {
} }
} }
microVUt(void) mVUallocFMAC8b(int& Fd) { microVUt(void) mVUallocFMAC8b(mV, int& Fd) {
microVU* mVU = mVUx;
if (!_Fd_) return; if (!_Fd_) return;
if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Fd, xmmT1, _xyzw_ACC); if (CHECK_VU_OVERFLOW) mVUclamp1(Fd, xmmT1, _xyzw_ACC);
mVUsaveReg<vuIndex>(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 0); mVUsaveReg(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 0);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC9 - MSUB FMAC Opcode Storing Result to Fd // FMAC9 - MSUB FMAC Opcode Storing Result to Fd
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC9a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC9a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmT1; Fd = xmmT1;
@ -297,19 +284,17 @@ microVUt(void) mVUallocFMAC9a(int& Fd, int& ACC, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC);
} }
microVUt(void) mVUallocFMAC9b(int& Fd) { microVUt(void) mVUallocFMAC9b(mV, int& Fd) {
microVU* mVU = mVUx;
if (!_Fd_) return; if (!_Fd_) return;
if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(Fd, xmmFt, _xyzw_ACC); if (CHECK_VU_OVERFLOW) mVUclamp1(Fd, xmmFt, _xyzw_ACC);
mVUsaveReg<vuIndex>(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 0); mVUsaveReg(Fd, (uptr)&mVU->regs->VF[_Fd_].UL[0], _X_Y_Z_W, 0);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC10 - MADD FMAC BC(xyzw) Opcode Storing Result to Fd // FMAC10 - MADD FMAC BC(xyzw) Opcode Storing Result to Fd
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC10a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC10a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -329,16 +314,15 @@ microVUt(void) mVUallocFMAC10a(int& Fd, int& ACC, int& Fs, int& Ft) {
} }
} }
microVUt(void) mVUallocFMAC10b(int& Fd) { microVUt(void) mVUallocFMAC10b(mV, int& Fd) {
mVUallocFMAC8b<vuIndex>(Fd); mVUallocFMAC8b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC11 - MSUB FMAC BC(xyzw) Opcode Storing Result to Fd // FMAC11 - MSUB FMAC BC(xyzw) Opcode Storing Result to Fd
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC11a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC11a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmT1; Fd = xmmT1;
@ -359,16 +343,15 @@ microVUt(void) mVUallocFMAC11a(int& Fd, int& ACC, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC);
} }
microVUt(void) mVUallocFMAC11b(int& Fd) { microVUt(void) mVUallocFMAC11b(mV, int& Fd) {
mVUallocFMAC9b<vuIndex>(Fd); mVUallocFMAC9b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC12 - MADD FMAC Opcode Storing Result to Fd (I Reg) // FMAC12 - MADD FMAC Opcode Storing Result to Fd (I Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC12a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC12a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -379,16 +362,15 @@ microVUt(void) mVUallocFMAC12a(int& Fd, int& ACC, int& Fs, int& Ft) {
else { getReg4(Fs, _Fs_); } else { getReg4(Fs, _Fs_); }
} }
microVUt(void) mVUallocFMAC12b(int& Fd) { microVUt(void) mVUallocFMAC12b(mV, int& Fd) {
mVUallocFMAC8b<vuIndex>(Fd); mVUallocFMAC8b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC13 - MSUB FMAC Opcode Storing Result to Fd (I Reg) // FMAC13 - MSUB FMAC Opcode Storing Result to Fd (I Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC13a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC13a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmT1; Fd = xmmT1;
@ -400,16 +382,15 @@ microVUt(void) mVUallocFMAC13a(int& Fd, int& ACC, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC);
} }
microVUt(void) mVUallocFMAC13b(int& Fd) { microVUt(void) mVUallocFMAC13b(mV, int& Fd) {
mVUallocFMAC9b<vuIndex>(Fd); mVUallocFMAC9b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC14 - MADDA/MSUBA FMAC Opcodes // FMAC14 - MADDA/MSUBA FMAC Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC14a(int& ACCw, int& ACCr, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC14a(mV, int& ACCw, int& ACCr, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
ACCw = xmmACC; ACCw = xmmACC;
@ -431,9 +412,8 @@ microVUt(void) mVUallocFMAC14a(int& ACCw, int& ACCr, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC);
} }
microVUt(void) mVUallocFMAC14b(int& ACCw, int& ACCr) { microVUt(void) mVUallocFMAC14b(mV, int& ACCw, int& ACCr) {
microVU* mVU = mVUx; if (CHECK_VU_OVERFLOW) mVUclamp1(ACCr, xmmFt, _xyzw_ACC);
if (CHECK_VU_OVERFLOW) mVUclamp1<vuIndex>(ACCr, xmmFt, _xyzw_ACC);
mVUmergeRegs(ACCw, ACCr, _X_Y_Z_W); mVUmergeRegs(ACCw, ACCr, _X_Y_Z_W);
} }
@ -441,8 +421,7 @@ microVUt(void) mVUallocFMAC14b(int& ACCw, int& ACCr) {
// FMAC15 - MADDA/MSUBA BC(xyzw) FMAC Opcode // FMAC15 - MADDA/MSUBA BC(xyzw) FMAC Opcode
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC15a(int& ACCw, int& ACCr, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC15a(mV, int& ACCw, int& ACCr, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
ACCw = xmmACC; ACCw = xmmACC;
@ -464,16 +443,15 @@ microVUt(void) mVUallocFMAC15a(int& ACCw, int& ACCr, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC);
} }
microVUt(void) mVUallocFMAC15b(int& ACCw, int& ACCr) { microVUt(void) mVUallocFMAC15b(mV, int& ACCw, int& ACCr) {
mVUallocFMAC14b<vuIndex>(ACCw, ACCr); mVUallocFMAC14b(mVU, ACCw, ACCr);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC16 - MADDA/MSUBA FMAC Opcode (I Reg) // FMAC16 - MADDA/MSUBA FMAC Opcode (I Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC16a(int& ACCw, int& ACCr, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC16a(mV, int& ACCw, int& ACCr, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
ACCw = xmmACC; ACCw = xmmACC;
@ -485,8 +463,8 @@ microVUt(void) mVUallocFMAC16a(int& ACCw, int& ACCr, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC);
} }
microVUt(void) mVUallocFMAC16b(int& ACCw, int& ACCr) { microVUt(void) mVUallocFMAC16b(mV, int& ACCw, int& ACCr) {
mVUallocFMAC14b<vuIndex>(ACCw, ACCr); mVUallocFMAC14b(mVU, ACCw, ACCr);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -494,13 +472,12 @@ microVUt(void) mVUallocFMAC16b(int& ACCw, int& ACCr) {
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getReg9(reg, _reg_) { \ #define getReg9(reg, _reg_) { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], 1); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], 1); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, 1); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, 1); \
mVUunpack_xyzw<vuIndex>(reg, reg, 0); \ mVUunpack_xyzw(reg, reg, 0); \
} }
microVUt(void) mVUallocFMAC17a(int& Fs, int& Ft) { microVUt(void) mVUallocFMAC17a(mV, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
getReg6(Fs, _Fs_); getReg6(Fs, _Fs_);
@ -511,8 +488,7 @@ microVUt(void) mVUallocFMAC17a(int& Fs, int& Ft) {
// FMAC18 - OPMULA FMAC Opcode // FMAC18 - OPMULA FMAC Opcode
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC18a(int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC18a(mV, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
ACC = xmmACC; ACC = xmmACC;
@ -527,16 +503,15 @@ microVUt(void) mVUallocFMAC18a(int& ACC, int& Fs, int& Ft) {
SSE2_PSHUFD_XMM_to_XMM(Ft, Ft, 0xD2); // WYXZ SSE2_PSHUFD_XMM_to_XMM(Ft, Ft, 0xD2); // WYXZ
} }
microVUt(void) mVUallocFMAC18b(int& ACC, int& Fs) { microVUt(void) mVUallocFMAC18b(mV, int& ACC, int& Fs) {
mVUallocFMAC4b<vuIndex>(ACC, Fs); mVUallocFMAC4b(mVU, ACC, Fs);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC19 - OPMSUB FMAC Opcode // FMAC19 - OPMSUB FMAC Opcode
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC19a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC19a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmT1; Fd = xmmT1;
@ -553,8 +528,8 @@ microVUt(void) mVUallocFMAC19a(int& Fd, int& ACC, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC);
} }
microVUt(void) mVUallocFMAC19b(int& Fd) { microVUt(void) mVUallocFMAC19b(mV, int& Fd) {
mVUallocFMAC9b<vuIndex>(Fd); mVUallocFMAC9b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -562,12 +537,11 @@ microVUt(void) mVUallocFMAC19b(int& Fd) {
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getQreg(reg) { \ #define getQreg(reg) { \
mVUunpack_xyzw<vuIndex>(reg, xmmPQ, readQ); \ mVUunpack_xyzw(reg, xmmPQ, readQ); \
/*if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT1, 15);*/ \ /*if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT1, 15);*/ \
} }
microVUt(void) mVUallocFMAC22a(int& Fd, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC22a(mV, int& Fd, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -575,16 +549,15 @@ microVUt(void) mVUallocFMAC22a(int& Fd, int& Fs, int& Ft) {
getReg6(Fs, _Fs_); getReg6(Fs, _Fs_);
} }
microVUt(void) mVUallocFMAC22b(int& Fd) { microVUt(void) mVUallocFMAC22b(mV, int& Fd) {
mVUallocFMAC1b<vuIndex>(Fd); mVUallocFMAC1b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC23 - FMAC Opcodes Storing Result to ACC (Q Reg) // FMAC23 - FMAC Opcodes Storing Result to ACC (Q Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC23a(int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC23a(mV, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
ACC = xmmACC; ACC = xmmACC;
Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs; Fs = (_X_Y_Z_W == 15) ? xmmACC : xmmFs;
Ft = xmmFt; Ft = xmmFt;
@ -594,16 +567,15 @@ microVUt(void) mVUallocFMAC23a(int& ACC, int& Fs, int& Ft) {
else { getReg4(Fs, _Fs_); } else { getReg4(Fs, _Fs_); }
} }
microVUt(void) mVUallocFMAC23b(int& ACC, int& Fs) { microVUt(void) mVUallocFMAC23b(mV, int& ACC, int& Fs) {
mVUallocFMAC4b<vuIndex>(ACC, Fs); mVUallocFMAC4b(mVU, ACC, Fs);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC24 - MADD FMAC Opcode Storing Result to Fd (Q Reg) // FMAC24 - MADD FMAC Opcode Storing Result to Fd (Q Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC24a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC24a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmFs; Fd = xmmFs;
@ -614,16 +586,15 @@ microVUt(void) mVUallocFMAC24a(int& Fd, int& ACC, int& Fs, int& Ft) {
else { getReg4(Fs, _Fs_); } else { getReg4(Fs, _Fs_); }
} }
microVUt(void) mVUallocFMAC24b(int& Fd) { microVUt(void) mVUallocFMAC24b(mV, int& Fd) {
mVUallocFMAC8b<vuIndex>(Fd); mVUallocFMAC8b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC25 - MSUB FMAC Opcode Storing Result to Fd (Q Reg) // FMAC25 - MSUB FMAC Opcode Storing Result to Fd (Q Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC25a(int& Fd, int& ACC, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC25a(mV, int& Fd, int& ACC, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
Fd = xmmT1; Fd = xmmT1;
@ -635,16 +606,15 @@ microVUt(void) mVUallocFMAC25a(int& Fd, int& ACC, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACC, xmmACC);
} }
microVUt(void) mVUallocFMAC25b(int& Fd) { microVUt(void) mVUallocFMAC25b(mV, int& Fd) {
mVUallocFMAC9b<vuIndex>(Fd); mVUallocFMAC9b(mVU, Fd);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// FMAC26 - MADDA/MSUBA FMAC Opcode (Q Reg) // FMAC26 - MADDA/MSUBA FMAC Opcode (Q Reg)
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocFMAC26a(int& ACCw, int& ACCr, int& Fs, int& Ft) { microVUt(void) mVUallocFMAC26a(mV, int& ACCw, int& ACCr, int& Fs, int& Ft) {
microVU* mVU = mVUx;
Fs = xmmFs; Fs = xmmFs;
Ft = xmmFt; Ft = xmmFt;
ACCw = xmmACC; ACCw = xmmACC;
@ -656,8 +626,8 @@ microVUt(void) mVUallocFMAC26a(int& ACCw, int& ACCr, int& Fs, int& Ft) {
SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC); SSE_MOVAPS_XMM_to_XMM(ACCr, xmmACC);
} }
microVUt(void) mVUallocFMAC26b(int& ACCw, int& ACCr) { microVUt(void) mVUallocFMAC26b(mV, int& ACCw, int& ACCr) {
mVUallocFMAC14b<vuIndex>(ACCw, ACCr); mVUallocFMAC14b(mVU, ACCw, ACCr);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -688,24 +658,20 @@ microVUt(void) mVUallocSFLAGb(int reg, int fInstance) {
MOV32RtoR(fInstance, reg); MOV32RtoR(fInstance, reg);
} }
microVUt(void) mVUallocMFLAGa(int reg, int fInstance) { microVUt(void) mVUallocMFLAGa(mV, int reg, int fInstance) {
microVU* mVU = mVUx;
MOVZX32M16toR(reg, (uptr)&mVU->macFlag[fInstance]); MOVZX32M16toR(reg, (uptr)&mVU->macFlag[fInstance]);
} }
microVUt(void) mVUallocMFLAGb(int reg, int fInstance) { microVUt(void) mVUallocMFLAGb(mV, int reg, int fInstance) {
microVU* mVU = mVUx;
//AND32ItoR(reg, 0xffff); //AND32ItoR(reg, 0xffff);
MOV32RtoM((uptr)&mVU->macFlag[fInstance], reg); MOV32RtoM((uptr)&mVU->macFlag[fInstance], reg);
} }
microVUt(void) mVUallocCFLAGa(int reg, int fInstance) { microVUt(void) mVUallocCFLAGa(mV, int reg, int fInstance) {
microVU* mVU = mVUx;
MOV32MtoR(reg, (uptr)&mVU->clipFlag[fInstance]); MOV32MtoR(reg, (uptr)&mVU->clipFlag[fInstance]);
} }
microVUt(void) mVUallocCFLAGb(int reg, int fInstance) { microVUt(void) mVUallocCFLAGb(mV, int reg, int fInstance) {
microVU* mVU = mVUx;
MOV32RtoM((uptr)&mVU->clipFlag[fInstance], reg); MOV32RtoM((uptr)&mVU->clipFlag[fInstance], reg);
} }
@ -713,18 +679,16 @@ microVUt(void) mVUallocCFLAGb(int reg, int fInstance) {
// VI Reg Allocators // VI Reg Allocators
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUallocVIa(int GPRreg, int _reg_) { microVUt(void) mVUallocVIa(mV, int GPRreg, int _reg_) {
microVU* mVU = mVUx;
if (!_reg_) { XOR32RtoR(GPRreg, GPRreg); } if (!_reg_) { XOR32RtoR(GPRreg, GPRreg); }
else if (isMMX(_reg_)) { MOVD32MMXtoR(GPRreg, mmVI(_reg_)); } else if (isMMX(_reg_)) { MOVD32MMXtoR(GPRreg, mmVI(_reg_)); }
else { MOVZX32Rm16toR(GPRreg, gprR, (_reg_ - 9) * 16); } else { MOVZX32Rm16toR(GPRreg, gprR, (_reg_ - 9) * 16); }
} }
microVUt(void) mVUallocVIb(int GPRreg, int _reg_) { microVUt(void) mVUallocVIb(mV, int GPRreg, int _reg_) {
microVU* mVU = mVUx;
if (backupVI) { // Backs up reg to memory (used when VI is modified b4 a branch) if (backupVI) { // Backs up reg to memory (used when VI is modified b4 a branch)
MOV32RtoM((uptr)&mVU->VIbackup[1], GPRreg); MOV32RtoM((uptr)&mVU->VIbackup[1], GPRreg);
mVUallocVIa<vuIndex>(GPRreg, _reg_); mVUallocVIa(mVU, GPRreg, _reg_);
MOV32RtoM((uptr)&mVU->VIbackup[0], GPRreg); MOV32RtoM((uptr)&mVU->VIbackup[0], GPRreg);
MOV32MtoR(GPRreg, (uptr)&mVU->VIbackup[1]); MOV32MtoR(GPRreg, (uptr)&mVU->VIbackup[1]);
} }
@ -738,8 +702,8 @@ microVUt(void) mVUallocVIb(int GPRreg, int _reg_) {
//------------------------------------------------------------------ //------------------------------------------------------------------
#define getPreg(reg) { \ #define getPreg(reg) { \
mVUunpack_xyzw<vuIndex>(reg, xmmPQ, (2 + readP)); \ mVUunpack_xyzw(reg, xmmPQ, (2 + readP)); \
/*if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT1, 15);*/ \ /*if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT1, 15);*/ \
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -749,18 +713,18 @@ microVUt(void) mVUallocVIb(int GPRreg, int _reg_) {
#define getReg5(reg, _reg_, _fxf_) { \ #define getReg5(reg, _reg_, _fxf_) { \
if (!_reg_) { \ if (!_reg_) { \
if (_fxf_ < 3) { SSE_XORPS_XMM_to_XMM(reg, reg); } \ if (_fxf_ < 3) { SSE_XORPS_XMM_to_XMM(reg, reg); } \
else { mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], 1); } \ else { mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], 1); } \
} \ } \
else { \ else { \
mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _fxf_))); \ mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], (1 << (3 - _fxf_))); \
if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2<vuIndex>(reg, xmmT2, (1 << (3 - _fxf_))); \ if (CHECK_VU_EXTRA_OVERFLOW) mVUclamp2(reg, xmmT2, (1 << (3 - _fxf_))); \
} \ } \
} }
// Doesn't Clamp // Doesn't Clamp
#define getReg7(reg, _reg_) { \ #define getReg7(reg, _reg_) { \
if (!_reg_) { getZero(reg); } \ if (!_reg_) { getZero(reg); } \
else { mVUloadReg<vuIndex>(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _X_Y_Z_W); } \ else { mVUloadReg(reg, (uptr)&mVU->regs->VF[_reg_].UL[0], _X_Y_Z_W); } \
} }
// VF to GPR // VF to GPR

View File

@ -61,8 +61,7 @@
} \ } \
} }
microVUt(void) mVUanalyzeFMAC1(int Fd, int Fs, int Ft) { microVUt(void) mVUanalyzeFMAC1(mV, int Fd, int Fs, int Ft) {
microVU* mVU = mVUx;
mVUinfo |= _doStatus; mVUinfo |= _doStatus;
analyzeReg1(Fs); analyzeReg1(Fs);
analyzeReg1(Ft); analyzeReg1(Ft);
@ -73,8 +72,7 @@ microVUt(void) mVUanalyzeFMAC1(int Fd, int Fs, int Ft) {
// FMAC2 - ABS/FTOI/ITOF Opcodes // FMAC2 - ABS/FTOI/ITOF Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeFMAC2(int Fs, int Ft) { microVUt(void) mVUanalyzeFMAC2(mV, int Fs, int Ft) {
microVU* mVU = mVUx;
analyzeReg1(Fs); analyzeReg1(Fs);
analyzeReg2(Ft, 0); analyzeReg2(Ft, 0);
} }
@ -92,8 +90,7 @@ microVUt(void) mVUanalyzeFMAC2(int Fs, int Ft) {
} \ } \
} }
microVUt(void) mVUanalyzeFMAC3(int Fd, int Fs, int Ft) { microVUt(void) mVUanalyzeFMAC3(mV, int Fd, int Fs, int Ft) {
microVU* mVU = mVUx;
mVUinfo |= _doStatus; mVUinfo |= _doStatus;
analyzeReg1(Fs); analyzeReg1(Fs);
analyzeReg3(Ft); analyzeReg3(Ft);
@ -108,8 +105,7 @@ microVUt(void) mVUanalyzeFMAC3(int Fd, int Fs, int Ft) {
if (reg) { mVUstall = aMax(mVUstall, aReg(reg).w); } \ if (reg) { mVUstall = aMax(mVUstall, aReg(reg).w); } \
} }
microVUt(void) mVUanalyzeFMAC4(int Fs, int Ft) { microVUt(void) mVUanalyzeFMAC4(mV, int Fs, int Ft) {
microVU* mVU = mVUx;
mVUinfo |= _doClip; mVUinfo |= _doClip;
analyzeReg1(Fs); analyzeReg1(Fs);
analyzeReg4(Ft); analyzeReg4(Ft);
@ -123,16 +119,14 @@ microVUt(void) mVUanalyzeFMAC4(int Fs, int Ft) {
#define analyzeVIreg2(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; mVUinfo |= _writesVI; mVU->VIbackup[0] = reg; } } #define analyzeVIreg2(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; mVUinfo |= _writesVI; mVU->VIbackup[0] = reg; } }
#define analyzeVIreg3(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; } } #define analyzeVIreg3(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; } }
microVUt(void) mVUanalyzeIALU1(int Id, int Is, int It) { microVUt(void) mVUanalyzeIALU1(mV, int Id, int Is, int It) {
microVU* mVU = mVUx;
if (!Id) { mVUinfo |= _isNOP; } if (!Id) { mVUinfo |= _isNOP; }
analyzeVIreg1(Is); analyzeVIreg1(Is);
analyzeVIreg1(It); analyzeVIreg1(It);
analyzeVIreg2(Id, 1); analyzeVIreg2(Id, 1);
} }
microVUt(void) mVUanalyzeIALU2(int Is, int It) { microVUt(void) mVUanalyzeIALU2(mV, int Is, int It) {
microVU* mVU = mVUx;
if (!It) { mVUinfo |= _isNOP; } if (!It) { mVUinfo |= _isNOP; }
analyzeVIreg1(Is); analyzeVIreg1(Is);
analyzeVIreg2(It, 1); analyzeVIreg2(It, 1);
@ -159,8 +153,7 @@ microVUt(void) mVUanalyzeIALU2(int Is, int It) {
} \ } \
} }
microVUt(void) mVUanalyzeMR32(int Fs, int Ft) { microVUt(void) mVUanalyzeMR32(mV, int Fs, int Ft) {
microVU* mVU = mVUx;
if (!Ft) { mVUinfo |= _isNOP; } if (!Ft) { mVUinfo |= _isNOP; }
analyzeReg6(Fs); analyzeReg6(Fs);
analyzeReg2(Ft, 1); analyzeReg2(Ft, 1);
@ -191,8 +184,7 @@ microVUt(void) mVUanalyzeMR32(int Fs, int Ft) {
#define analyzeQreg(x) { mVUregsTemp.q = x; mVUstall = aMax(mVUstall, mVUregs.q); } #define analyzeQreg(x) { mVUregsTemp.q = x; mVUstall = aMax(mVUstall, mVUregs.q); }
#define analyzePreg(x) { mVUregsTemp.p = x; mVUstall = aMax(mVUstall, ((mVUregs.p) ? (mVUregs.p - 1) : 0)); } #define analyzePreg(x) { mVUregsTemp.p = x; mVUstall = aMax(mVUstall, ((mVUregs.p) ? (mVUregs.p - 1) : 0)); }
microVUt(void) mVUanalyzeFDIV(int Fs, int Fsf, int Ft, int Ftf, u8 xCycles) { microVUt(void) mVUanalyzeFDIV(mV, int Fs, int Fsf, int Ft, int Ftf, u8 xCycles) {
microVU* mVU = mVUx;
mVUprint("microVU: DIV Opcode"); mVUprint("microVU: DIV Opcode");
analyzeReg5(Fs, Fsf); analyzeReg5(Fs, Fsf);
analyzeReg5(Ft, Ftf); analyzeReg5(Ft, Ftf);
@ -203,15 +195,13 @@ microVUt(void) mVUanalyzeFDIV(int Fs, int Fsf, int Ft, int Ftf, u8 xCycles) {
// EFU - EFU Opcodes // EFU - EFU Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeEFU1(int Fs, int Fsf, u8 xCycles) { microVUt(void) mVUanalyzeEFU1(mV, int Fs, int Fsf, u8 xCycles) {
microVU* mVU = mVUx;
mVUprint("microVU: EFU Opcode"); mVUprint("microVU: EFU Opcode");
analyzeReg5(Fs, Fsf); analyzeReg5(Fs, Fsf);
analyzePreg(xCycles); analyzePreg(xCycles);
} }
microVUt(void) mVUanalyzeEFU2(int Fs, u8 xCycles) { microVUt(void) mVUanalyzeEFU2(mV, int Fs, u8 xCycles) {
microVU* mVU = mVUx;
mVUprint("microVU: EFU Opcode"); mVUprint("microVU: EFU Opcode");
analyzeReg1b(Fs); analyzeReg1b(Fs);
analyzePreg(xCycles); analyzePreg(xCycles);
@ -221,8 +211,7 @@ microVUt(void) mVUanalyzeEFU2(int Fs, u8 xCycles) {
// MFP - MFP Opcode // MFP - MFP Opcode
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeMFP(int Ft) { microVUt(void) mVUanalyzeMFP(mV, int Ft) {
microVU* mVU = mVUx;
if (!Ft) { mVUinfo |= _isNOP; } if (!Ft) { mVUinfo |= _isNOP; }
analyzeReg2(Ft, 1); analyzeReg2(Ft, 1);
} }
@ -231,8 +220,7 @@ microVUt(void) mVUanalyzeMFP(int Ft) {
// MOVE - MOVE Opcode // MOVE - MOVE Opcode
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeMOVE(int Fs, int Ft) { microVUt(void) mVUanalyzeMOVE(mV, int Fs, int Ft) {
microVU* mVU = mVUx;
if (!Ft || (Ft == Fs)) { mVUinfo |= _isNOP; } if (!Ft || (Ft == Fs)) { mVUinfo |= _isNOP; }
analyzeReg1b(Fs); analyzeReg1b(Fs);
analyzeReg2(Ft, 1); analyzeReg2(Ft, 1);
@ -243,8 +231,7 @@ microVUt(void) mVUanalyzeMOVE(int Fs, int Ft) {
// LQx - LQ/LQD/LQI Opcodes // LQx - LQ/LQD/LQI Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeLQ(int Ft, int Is, bool writeIs) { microVUt(void) mVUanalyzeLQ(mV, int Ft, int Is, bool writeIs) {
microVU* mVU = mVUx;
analyzeVIreg1(Is); analyzeVIreg1(Is);
analyzeReg2(Ft, 1); analyzeReg2(Ft, 1);
if (!Ft) { mVUinfo |= (writeIs && Is) ? _noWriteVF : _isNOP; } if (!Ft) { mVUinfo |= (writeIs && Is) ? _noWriteVF : _isNOP; }
@ -255,8 +242,7 @@ microVUt(void) mVUanalyzeLQ(int Ft, int Is, bool writeIs) {
// SQx - SQ/SQD/SQI Opcodes // SQx - SQ/SQD/SQI Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeSQ(int Fs, int It, bool writeIt) { microVUt(void) mVUanalyzeSQ(mV, int Fs, int It, bool writeIt) {
microVU* mVU = mVUx;
analyzeReg1b(Fs); analyzeReg1b(Fs);
analyzeVIreg1(It); analyzeVIreg1(It);
if (writeIt) { analyzeVIreg2(It, 1); } if (writeIt) { analyzeVIreg2(It, 1); }
@ -268,14 +254,12 @@ microVUt(void) mVUanalyzeSQ(int Fs, int It, bool writeIt) {
#define analyzeRreg() { mVUregsTemp.r = 1; } #define analyzeRreg() { mVUregsTemp.r = 1; }
microVUt(void) mVUanalyzeR1(int Fs, int Fsf) { microVUt(void) mVUanalyzeR1(mV, int Fs, int Fsf) {
microVU* mVU = mVUx;
analyzeReg5(Fs, Fsf); analyzeReg5(Fs, Fsf);
analyzeRreg(); analyzeRreg();
} }
microVUt(void) mVUanalyzeR2(int Ft, bool canBeNOP) { microVUt(void) mVUanalyzeR2(mV, int Ft, bool canBeNOP) {
microVU* mVU = mVUx;
if (!Ft) { mVUinfo |= ((canBeNOP) ? _isNOP : _noWriteVF); } if (!Ft) { mVUinfo |= ((canBeNOP) ? _isNOP : _noWriteVF); }
analyzeReg2(Ft, 1); analyzeReg2(Ft, 1);
analyzeRreg(); analyzeRreg();
@ -285,8 +269,7 @@ microVUt(void) mVUanalyzeR2(int Ft, bool canBeNOP) {
// Sflag - Status Flag Opcodes // Sflag - Status Flag Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeSflag(int It) { microVUt(void) mVUanalyzeSflag(mV, int It) {
microVU* mVU = mVUx;
if (!It) { mVUinfo |= _isNOP; } if (!It) { mVUinfo |= _isNOP; }
else { else {
mVUinfo |= _swapOps; mVUinfo |= _swapOps;
@ -300,8 +283,7 @@ microVUt(void) mVUanalyzeSflag(int It) {
analyzeVIreg3(It, 1); analyzeVIreg3(It, 1);
} }
microVUt(void) mVUanalyzeFSSET() { microVUt(void) mVUanalyzeFSSET(mV) {
microVU* mVU = mVUx;
mVUinfo |= _isFSSET; mVUinfo |= _isFSSET;
// mVUinfo &= ~_doStatus; // mVUinfo &= ~_doStatus;
// Note: I'm not entirely sure if the non-sticky flags // Note: I'm not entirely sure if the non-sticky flags
@ -314,8 +296,7 @@ microVUt(void) mVUanalyzeFSSET() {
// Mflag - Mac Flag Opcodes // Mflag - Mac Flag Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeMflag(int Is, int It) { microVUt(void) mVUanalyzeMflag(mV, int Is, int It) {
microVU* mVU = mVUx;
if (!It) { mVUinfo |= _isNOP; } if (!It) { mVUinfo |= _isNOP; }
else { // Need set _doMac for 4 previous Ops (need to do all 4 because stalls could change the result needed) else { // Need set _doMac for 4 previous Ops (need to do all 4 because stalls could change the result needed)
mVUinfo |= _swapOps; mVUinfo |= _swapOps;
@ -335,8 +316,7 @@ microVUt(void) mVUanalyzeMflag(int Is, int It) {
// Cflag - Clip Flag Opcodes // Cflag - Clip Flag Opcodes
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUanalyzeCflag(int It) { microVUt(void) mVUanalyzeCflag(mV, int It) {
microVU* mVU = mVUx;
mVUinfo |= _swapOps; mVUinfo |= _swapOps;
if (mVUcount < 4) { mVUpBlock->pState.needExactMatch |= 0xf << (/*mVUcount +*/ 8); } if (mVUcount < 4) { mVUpBlock->pState.needExactMatch |= 0xf << (/*mVUcount +*/ 8); }
analyzeVIreg3(It, 1); analyzeVIreg3(It, 1);
@ -349,8 +329,7 @@ microVUt(void) mVUanalyzeCflag(int It) {
#define analyzeXGkick1() { mVUstall = aMax(mVUstall, mVUregs.xgkick); } #define analyzeXGkick1() { mVUstall = aMax(mVUstall, mVUregs.xgkick); }
#define analyzeXGkick2(x) { mVUregsTemp.xgkick = x; } #define analyzeXGkick2(x) { mVUregsTemp.xgkick = x; }
microVUt(void) mVUanalyzeXGkick(int Fs, int xCycles) { microVUt(void) mVUanalyzeXGkick(mV, int Fs, int xCycles) {
microVU* mVU = mVUx;
analyzeVIreg1(Fs); analyzeVIreg1(Fs);
analyzeXGkick1(); analyzeXGkick1();
analyzeXGkick2(xCycles); analyzeXGkick2(xCycles);
@ -378,14 +357,12 @@ microVUt(void) mVUanalyzeXGkick(int Fs, int xCycles) {
} \ } \
} }
microVUt(void) mVUanalyzeBranch1(int Is) { microVUt(void) mVUanalyzeBranch1(mV, int Is) {
microVU* mVU = mVUx;
if (mVUregs.VI[Is] || mVUstall) { analyzeVIreg1(Is); } if (mVUregs.VI[Is] || mVUstall) { analyzeVIreg1(Is); }
else { analyzeBranchVI(Is, _memReadIs); } else { analyzeBranchVI(Is, _memReadIs); }
} }
microVUt(void) mVUanalyzeBranch2(int Is, int It) { microVUt(void) mVUanalyzeBranch2(mV, int Is, int It) {
microVU* mVU = mVUx;
if (mVUregs.VI[Is] || mVUregs.VI[It] || mVUstall) { analyzeVIreg1(Is); analyzeVIreg1(It); } if (mVUregs.VI[Is] || mVUregs.VI[It] || mVUstall) { analyzeVIreg1(Is); analyzeVIreg1(It); }
else { analyzeBranchVI(Is, _memReadIs); analyzeBranchVI(It, _memReadIt);} else { analyzeBranchVI(Is, _memReadIs); analyzeBranchVI(It, _memReadIt);}
} }

View File

@ -23,7 +23,7 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
#define branchCase(JMPcc, nJMPcc) \ #define branchCase(JMPcc, nJMPcc) \
mVUsetupBranch<vuIndex>(xStatus, xMac, xClip, xCycles); \ mVUsetupBranch(mVU, xStatus, xMac, xClip, xCycles); \
CMP16ItoM((uptr)&mVU->branch, 0); \ CMP16ItoM((uptr)&mVU->branch, 0); \
incPC2(1); \ incPC2(1); \
if (!mVUblocks[iPC/2]) { mVUblocks[iPC/2] = new microBlockManager(); } \ if (!mVUblocks[iPC/2]) { mVUblocks[iPC/2] = new microBlockManager(); } \
@ -54,8 +54,8 @@
#define tCycles(dest, src) { dest = aMax(dest, src); } #define tCycles(dest, src) { dest = aMax(dest, src); }
#define incP() { mVU->p = (mVU->p+1) & 1; } #define incP() { mVU->p = (mVU->p+1) & 1; }
#define incQ() { mVU->q = (mVU->q+1) & 1; } #define incQ() { mVU->q = (mVU->q+1) & 1; }
#define doUpperOp() { mVUopU<vuIndex>(1); mVUdivSet<vuIndex>(); } #define doUpperOp() { mVUopU(mVU, 1); mVUdivSet(mVU); }
#define doLowerOp() { incPC(-1); mVUopL<vuIndex>(1); incPC(1); } #define doLowerOp() { incPC(-1); mVUopL(mVU, 1); incPC(1); }
#define doIbit() { if (curI & _Ibit_) { incPC(-1); MOV32ItoM((uptr)&mVU->regs->VI[REG_I].UL, curI); incPC(1); } } #define doIbit() { if (curI & _Ibit_) { incPC(-1); MOV32ItoM((uptr)&mVU->regs->VI[REG_I].UL, curI); incPC(1); } }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -63,20 +63,19 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
// Used by mVUsetupRange // Used by mVUsetupRange
microVUt(void) mVUcheckIsSame() { microVUt(void) mVUcheckIsSame(mV) {
microVU* mVU = mVUx;
if (mVU->prog.isSame == -1) { if (mVU->prog.isSame == -1) {
mVU->prog.isSame = !memcmp_mmx(mVU->prog.prog[mVU->prog.cur].data, mVU->regs->Micro, mVU->microSize); mVU->prog.isSame = !memcmp_mmx(mVU->prog.prog[mVU->prog.cur].data, mVU->regs->Micro, mVU->microSize);
} }
if (mVU->prog.isSame == 0) { if (mVU->prog.isSame == 0) {
mVUcacheProg<vuIndex>(mVU->prog.cur); if (!isVU1) mVUcacheProg<0>(mVU->prog.cur);
else mVUcacheProg<1>(mVU->prog.cur);
} }
} }
// Sets up microProgram PC ranges based on whats been recompiled // Sets up microProgram PC ranges based on whats been recompiled
microVUt(void) mVUsetupRange(u32 pc) { microVUt(void) mVUsetupRange(mV, u32 pc) {
microVU* mVU = mVUx;
if (mVUcurProg.range[0] == -1) { if (mVUcurProg.range[0] == -1) {
mVUcurProg.range[0] = (s32)pc; mVUcurProg.range[0] = (s32)pc;
@ -84,28 +83,26 @@ microVUt(void) mVUsetupRange(u32 pc) {
} }
else if (mVUcurProg.range[0] > (s32)pc) { else if (mVUcurProg.range[0] > (s32)pc) {
mVUcurProg.range[0] = (s32)pc; mVUcurProg.range[0] = (s32)pc;
mVUcheckIsSame<vuIndex>(); mVUcheckIsSame(mVU);
} }
else if (mVUcurProg.range[1] < (s32)pc) { else if (mVUcurProg.range[1] < (s32)pc) {
mVUcurProg.range[1] = (s32)pc; mVUcurProg.range[1] = (s32)pc;
mVUcheckIsSame<vuIndex>(); mVUcheckIsSame(mVU);
} }
} }
// Recompiles Code for Proper Flags and Q/P regs on Block Linkings // Recompiles Code for Proper Flags and Q/P regs on Block Linkings
microVUt(void) mVUsetupBranch(int* xStatus, int* xMac, int* xClip, int xCycles) { microVUt(void) mVUsetupBranch(mV, int* xStatus, int* xMac, int* xClip, int xCycles) {
microVU* mVU = mVUx;
mVUprint("mVUsetupBranch"); mVUprint("mVUsetupBranch");
// Shuffle Flag Instances // Shuffle Flag Instances
mVUsetupFlags<vuIndex>(xStatus, xMac, xClip, xCycles); mVUsetupFlags(mVU, xStatus, xMac, xClip, xCycles);
// Shuffle P/Q regs since every block starts at instance #0 // Shuffle P/Q regs since every block starts at instance #0
if (mVU->p || mVU->q) { SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, shufflePQ); } if (mVU->p || mVU->q) { SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, shufflePQ); }
} }
microVUt(void) mVUincCycles(int x) { microVUt(void) mVUincCycles(mV, int x) {
microVU* mVU = mVUx;
mVUcycles += x; mVUcycles += x;
for (int z = 31; z > 0; z--) { for (int z = 31; z > 0; z--) {
calcCycles(mVUregs.VF[z].x, x); calcCycles(mVUregs.VF[z].x, x);
@ -132,8 +129,7 @@ microVUt(void) mVUincCycles(int x) {
calcCycles(mVUregs.r, x); calcCycles(mVUregs.r, x);
} }
microVUt(void) mVUsetCycles() { microVUt(void) mVUsetCycles(mV) {
microVU* mVU = mVUx;
incCycles(mVUstall); incCycles(mVUstall);
if (mVUregsTemp.VFreg[0] == mVUregsTemp.VFreg[1] && mVUregsTemp.VFreg[0]) { // If upper Op && lower Op write to same VF reg if (mVUregsTemp.VFreg[0] == mVUregsTemp.VFreg[1] && mVUregsTemp.VFreg[0]) { // If upper Op && lower Op write to same VF reg
mVUinfo |= (mVUregsTemp.r || mVUregsTemp.VI) ? _noWriteVF : _isNOP; // If lower Op doesn't modify anything else, then make it a NOP mVUinfo |= (mVUregsTemp.r || mVUregsTemp.VI) ? _noWriteVF : _isNOP; // If lower Op doesn't modify anything else, then make it a NOP
@ -155,13 +151,12 @@ microVUt(void) mVUsetCycles() {
tCycles(mVUregs.xgkick, mVUregsTemp.xgkick); tCycles(mVUregs.xgkick, mVUregsTemp.xgkick);
} }
microVUt(void) mVUendProgram(int qInst, int pInst, int fStatus, int fMac, int fClip) { microVUt(void) mVUendProgram(mV, int qInst, int pInst, int fStatus, int fMac, int fClip) {
microVU* mVU = mVUx;
// Save P/Q Regs // Save P/Q Regs
if (qInst) { SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, 0xe5); } if (qInst) { SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, 0xe5); }
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_Q].UL, xmmPQ); SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_Q].UL, xmmPQ);
if (vuIndex) { if (isVU1) {
SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, pInst ? 3 : 2); SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, pInst ? 3 : 2);
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_P].UL, xmmPQ); SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_P].UL, xmmPQ);
} }
@ -171,13 +166,13 @@ microVUt(void) mVUendProgram(int qInst, int pInst, int fStatus, int fMac, int fC
getFlagReg(fStatus, fStatus); getFlagReg(fStatus, fStatus);
MOV32RtoM((uptr)&mVU->regs->VI[REG_STATUS_FLAG].UL, fStatus); MOV32RtoM((uptr)&mVU->regs->VI[REG_STATUS_FLAG].UL, fStatus);
} }
mVUallocMFLAGa<vuIndex>(gprT1, fMac); mVUallocMFLAGa(mVU, gprT1, fMac);
mVUallocCFLAGa<vuIndex>(gprT2, fClip); mVUallocCFLAGa(mVU, gprT2, fClip);
MOV32RtoM((uptr)&mVU->regs->VI[REG_MAC_FLAG].UL, gprT1); MOV32RtoM((uptr)&mVU->regs->VI[REG_MAC_FLAG].UL, gprT1);
MOV32RtoM((uptr)&mVU->regs->VI[REG_CLIP_FLAG].UL, gprT2); MOV32RtoM((uptr)&mVU->regs->VI[REG_CLIP_FLAG].UL, gprT2);
// Clear 'is busy' Flags, Save PC, and Jump to Exit Point // Clear 'is busy' Flags, Save PC, and Jump to Exit Point
AND32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, (vuIndex ? ~0x100 : ~0x001)); // VBS0/VBS1 flag AND32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, (isVU1 ? ~0x100 : ~0x001)); // VBS0/VBS1 flag
AND32ItoM((uptr)&mVU->regs->vifRegs->stat, ~0x4); // Clear VU 'is busy' signal for vif AND32ItoM((uptr)&mVU->regs->vifRegs->stat, ~0x4); // Clear VU 'is busy' signal for vif
MOV32ItoM((uptr)&mVU->regs->VI[REG_TPC].UL, xPC); MOV32ItoM((uptr)&mVU->regs->VI[REG_TPC].UL, xPC);
JMP32((uptr)mVU->exitFunct - ((uptr)x86Ptr + 5)); JMP32((uptr)mVU->exitFunct - ((uptr)x86Ptr + 5));
@ -191,17 +186,16 @@ void __fastcall mVUwarning1(u32 PC) { Console::Error("microVU1 Warning: Exiting
void __fastcall mVUprintPC1(u32 PC) { Console::Write("Block PC [%04x] ", params PC); } void __fastcall mVUprintPC1(u32 PC) { Console::Write("Block PC [%04x] ", params PC); }
void __fastcall mVUprintPC2(u32 PC) { Console::Write("[%04x]\n", params PC); } void __fastcall mVUprintPC2(u32 PC) { Console::Write("[%04x]\n", params PC); }
microVUt(void) mVUtestCycles() { microVUt(void) mVUtestCycles(mV) {
microVU* mVU = mVUx;
iPC = mVUstartPC; iPC = mVUstartPC;
mVUdebugNOW(0); mVUdebugNOW(0);
SUB32ItoM((uptr)&mVU->cycles, mVUcycles); SUB32ItoM((uptr)&mVU->cycles, mVUcycles);
u8* jmp8 = JG8(0); u8* jmp8 = JG8(0);
MOV32ItoR(gprT2, xPC); MOV32ItoR(gprT2, xPC);
if (!vuIndex) CALLFunc((uptr)mVUwarning0); if (!isVU1) CALLFunc((uptr)mVUwarning0);
else CALLFunc((uptr)mVUwarning1); else CALLFunc((uptr)mVUwarning1);
MOV32ItoR(gprR, Roffset); // Restore gprR MOV32ItoR(gprR, Roffset); // Restore gprR
mVUendProgram<vuIndex>(0, 0, sI, 0, cI); mVUendProgram(mVU, 0, 0, sI, 0, cI);
x86SetJ8(jmp8); x86SetJ8(jmp8);
} }
@ -209,7 +203,7 @@ microVUt(void) mVUtestCycles() {
// Recompiler // Recompiler
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) { microVUf(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
u8* thisPtr = x86Ptr; u8* thisPtr = x86Ptr;
@ -225,7 +219,7 @@ microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
if (pBlock) { return pBlock->x86ptrStart; } if (pBlock) { return pBlock->x86ptrStart; }
// Setup Program Bounds/Range // Setup Program Bounds/Range
mVUsetupRange<vuIndex>(startPC); mVUsetupRange(mVU, startPC);
// First Pass // First Pass
iPC = startPC / 4; iPC = startPC / 4;
@ -250,25 +244,25 @@ microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
mVUinfo = 0; mVUinfo = 0;
startLoop(); startLoop();
incCycles(1); incCycles(1);
mVUopU<vuIndex>(0); mVUopU(mVU, 0);
if (curI & _Ebit_) { branch = 1; } if (curI & _Ebit_) { branch = 1; }
if (curI & _MDTbit_) { branch = 4; } if (curI & _MDTbit_) { branch = 4; }
if (curI & _Ibit_) { mVUinfo |= _isNOP; } if (curI & _Ibit_) { mVUinfo |= _isNOP; }
else { incPC(-1); mVUopL<vuIndex>(0); incPC(1); } else { incPC(-1); mVUopL(mVU, 0); incPC(1); }
mVUsetCycles<vuIndex>(); mVUsetCycles(mVU);
if (mVU->p) { mVUinfo |= _readP; } if (mVU->p) { mVUinfo |= _readP; }
if (mVU->q) { mVUinfo |= _readQ; } if (mVU->q) { mVUinfo |= _readQ; }
if (branch >= 2) { mVUinfo |= _isEOB | ((branch == 3) ? _isBdelay : 0); mVUcount++; branchWarning(); break; } if (branch >= 2) { mVUinfo |= _isEOB | ((branch == 3) ? _isBdelay : 0); mVUcount++; branchWarning(); break; }
else if (branch == 1) { branch = 2; } else if (branch == 1) { branch = 2; }
if (mVUbranch) { mVUsetFlagInfo<vuIndex>(); branchEbit(); branch = 3; mVUbranch = 0; mVUinfo |= _isBranch; } if (mVUbranch) { mVUsetFlagInfo(mVU); branchEbit(); branch = 3; mVUbranch = 0; mVUinfo |= _isBranch; }
incPC(1); incPC(1);
mVUcount++; mVUcount++;
} }
// Sets Up Flag instances // Sets Up Flag instances
int xStatus[4], xMac[4], xClip[4]; int xStatus[4], xMac[4], xClip[4];
int xCycles = mVUsetFlags<vuIndex>(xStatus, xMac, xClip); int xCycles = mVUsetFlags(mVU, xStatus, xMac, xClip);
mVUtestCycles<vuIndex>(); mVUtestCycles(mVU);
// Second Pass // Second Pass
iPC = mVUstartPC; iPC = mVUstartPC;
@ -279,14 +273,14 @@ microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
if (isEOB) { x = 0xffff; } if (isEOB) { x = 0xffff; }
if (isNOP) { incPC(1); doUpperOp(); doIbit(); } if (isNOP) { incPC(1); doUpperOp(); doIbit(); }
else if (!swapOps) { incPC(1); doUpperOp(); doLowerOp(); } else if (!swapOps) { incPC(1); doUpperOp(); doLowerOp(); }
else { mVUopL<vuIndex>(1); incPC(1); doUpperOp(); } else { mVUopL(mVU, 1); incPC(1); doUpperOp(); }
if (doXGKICK) { mVU_XGKICK_DELAY<vuIndex>(); } if (doXGKICK) { mVU_XGKICK_DELAY(mVU); }
if (!isBdelay) { incPC(1); } if (!isBdelay) { incPC(1); }
else { else {
microBlock* bBlock = NULL; microBlock* bBlock = NULL;
u32* ajmp = 0; u32* ajmp = 0;
mVUsetupRange<vuIndex>(xPC); mVUsetupRange(mVU, xPC);
mVUdebugNOW(1); mVUdebugNOW(1);
switch (mVUbranch) { switch (mVUbranch) {
@ -302,7 +296,7 @@ microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
incPC(-3); // Go back to branch opcode (to get branch imm addr) incPC(-3); // Go back to branch opcode (to get branch imm addr)
if (eBitBranch) { iPC = branchAddr/4; goto eBitTemination; } // E-bit Was Set on Branch if (eBitBranch) { iPC = branchAddr/4; goto eBitTemination; } // E-bit Was Set on Branch
mVUsetupBranch<vuIndex>(xStatus, xMac, xClip, xCycles); mVUsetupBranch(mVU, xStatus, xMac, xClip, xCycles);
if (mVUblocks[branchAddr/8] == NULL) if (mVUblocks[branchAddr/8] == NULL)
mVUblocks[branchAddr/8] = new microBlockManager(); mVUblocks[branchAddr/8] = new microBlockManager();
@ -317,15 +311,15 @@ microVUt(void*) __fastcall mVUcompile(u32 startPC, uptr pState) {
mVUprint("mVUcompile JR/JALR"); mVUprint("mVUcompile JR/JALR");
memcpy_fast(&pBlock->pStateEnd, &mVUregs, sizeof(microRegInfo)); memcpy_fast(&pBlock->pStateEnd, &mVUregs, sizeof(microRegInfo));
mVUsetupBranch<vuIndex>(xStatus, xMac, xClip, xCycles); mVUsetupBranch(mVU, xStatus, xMac, xClip, xCycles);
mVUbackupRegs<vuIndex>(); mVUbackupRegs(mVU);
MOV32MtoR(gprT2, (uptr)&mVU->branch); // Get startPC (ECX first argument for __fastcall) MOV32MtoR(gprT2, (uptr)&mVU->branch); // Get startPC (ECX first argument for __fastcall)
MOV32ItoR(gprR, (u32)&pBlock->pStateEnd); // Get pState (EDX second argument for __fastcall) MOV32ItoR(gprR, (u32)&pBlock->pStateEnd); // Get pState (EDX second argument for __fastcall)
if (!vuIndex) CALLFunc((uptr)mVUcompileVU0); //(u32 startPC, uptr pState) if (!isVU1) CALLFunc((uptr)mVUcompileVU0); //(u32 startPC, uptr pState)
else CALLFunc((uptr)mVUcompileVU1); else CALLFunc((uptr)mVUcompileVU1);
mVUrestoreRegs<vuIndex>(); mVUrestoreRegs(mVU);
JMPR(gprT1); // Jump to rec-code address JMPR(gprT1); // Jump to rec-code address
return thisPtr; return thisPtr;
} }
@ -380,11 +374,11 @@ eBitTemination:
AND32ItoR (flagReg, 0x0fcf); AND32ItoR (flagReg, 0x0fcf);
OR32MtoR (flagReg, (uptr)&mVU->divFlag); OR32MtoR (flagReg, (uptr)&mVU->divFlag);
} }
if (doXGKICK) { mVU_XGKICK_DELAY<vuIndex>(); } if (doXGKICK) { mVU_XGKICK_DELAY(mVU); }
// Do E-bit end stuff here // Do E-bit end stuff here
mVUsetupRange<vuIndex>(xPC - 8); mVUsetupRange(mVU, xPC - 8);
mVUendProgram<vuIndex>(mVU->q, mVU->p, lStatus, lMac, lClip); mVUendProgram(mVU, mVU->q, mVU->p, lStatus, lMac, lClip);
return thisPtr; return thisPtr;
} }

View File

@ -23,12 +23,11 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
// Generates the code for entering recompiled blocks // Generates the code for entering recompiled blocks
microVUt(void) mVUdispatcherA() { microVUt(void) mVUdispatcherA(mV) {
microVU* mVU = mVUx;
mVU->startFunct = x86Ptr; mVU->startFunct = x86Ptr;
// __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left. // __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left.
if (!vuIndex) { CALLFunc((uptr)mVUexecuteVU0); } if (!isVU1) { CALLFunc((uptr)mVUexecuteVU0); }
else { CALLFunc((uptr)mVUexecuteVU1); } else { CALLFunc((uptr)mVUexecuteVU1); }
// Backup cpu state // Backup cpu state
@ -72,8 +71,7 @@ microVUt(void) mVUdispatcherA() {
} }
// Generates the code to exit from recompiled blocks // Generates the code to exit from recompiled blocks
microVUt(void) mVUdispatcherB() { microVUt(void) mVUdispatcherB(mV) {
microVU* mVU = mVUx;
mVU->exitFunct = x86Ptr; mVU->exitFunct = x86Ptr;
// Load EE's MXCSR state // Load EE's MXCSR state
@ -87,7 +85,7 @@ microVUt(void) mVUdispatcherB() {
} }
// __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left. // __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left.
if (!vuIndex) { CALLFunc((uptr)mVUcleanUpVU0); } if (!isVU1) { CALLFunc((uptr)mVUcleanUpVU0); }
else { CALLFunc((uptr)mVUcleanUpVU1); } else { CALLFunc((uptr)mVUcleanUpVU1); }
// Restore cpu state // Restore cpu state
@ -107,7 +105,7 @@ microVUt(void) mVUdispatcherB() {
//------------------------------------------------------------------ //------------------------------------------------------------------
// Executes for number of cycles // Executes for number of cycles
microVUt(void*) __fastcall mVUexecute(u32 startPC, u32 cycles) { microVUx(void*) __fastcall mVUexecute(u32 startPC, u32 cycles) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
//mVUprint("microVU%x: startPC = 0x%x, cycles = 0x%x", params vuIndex, startPC, cycles); //mVUprint("microVU%x: startPC = 0x%x, cycles = 0x%x", params vuIndex, startPC, cycles);
@ -125,7 +123,7 @@ microVUt(void*) __fastcall mVUexecute(u32 startPC, u32 cycles) {
// Cleanup Functions // Cleanup Functions
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUt(void) mVUcleanUp() { microVUx(void) mVUcleanUp() {
microVU* mVU = mVUx; microVU* mVU = mVUx;
//mVUprint("microVU: Program exited successfully!"); //mVUprint("microVU: Program exited successfully!");
//mVUprint("microVU: VF0 = {%x,%x,%x,%x}", params mVU->regs->VF[0].UL[0], mVU->regs->VF[0].UL[1], mVU->regs->VF[0].UL[2], mVU->regs->VF[0].UL[3]); //mVUprint("microVU: VF0 = {%x,%x,%x,%x}", params mVU->regs->VF[0].UL[0], mVU->regs->VF[0].UL[1], mVU->regs->VF[0].UL[2], mVU->regs->VF[0].UL[3]);

View File

@ -19,8 +19,7 @@
#pragma once #pragma once
// Sets FDIV Flags at the proper time // Sets FDIV Flags at the proper time
microVUt(void) mVUdivSet() { microVUt(void) mVUdivSet(mV) {
microVU* mVU = mVUx;
int flagReg1, flagReg2; int flagReg1, flagReg2;
if (doDivFlag) { if (doDivFlag) {
getFlagReg(flagReg1, fsInstance); getFlagReg(flagReg1, fsInstance);
@ -31,8 +30,7 @@ microVUt(void) mVUdivSet() {
} }
// Optimizes out unneeded status flag updates // Optimizes out unneeded status flag updates
microVUt(void) mVUstatusFlagOp() { microVUt(void) mVUstatusFlagOp(mV) {
microVU* mVU = mVUx;
int curPC = iPC; int curPC = iPC;
int i = mVUcount; int i = mVUcount;
bool runLoop = 1; bool runLoop = 1;
@ -52,7 +50,7 @@ microVUt(void) mVUstatusFlagOp() {
} }
} }
iPC = curPC; iPC = curPC;
DevCon::Status("microVU%d: FSSET Optimization", params vuIndex); DevCon::Status("microVU%d: FSSET Optimization", params getIndex);
} }
int findFlagInst(int* fFlag, int cycles) { int findFlagInst(int* fFlag, int cycles) {
@ -74,8 +72,7 @@ void sortFlag(int* fFlag, int* bFlag, int cycles) {
#define sFlagCond ((doStatus && !mVUsFlagHack) || isFSSET || doDivFlag) #define sFlagCond ((doStatus && !mVUsFlagHack) || isFSSET || doDivFlag)
// Note: Flag handling is 'very' complex, it requires full knowledge of how microVU recs work, so don't touch! // Note: Flag handling is 'very' complex, it requires full knowledge of how microVU recs work, so don't touch!
microVUt(int) mVUsetFlags(int* xStatus, int* xMac, int* xClip) { microVUt(int) mVUsetFlags(mV, int* xStatus, int* xMac, int* xClip) {
microVU* mVU = mVUx;
int endPC = iPC; int endPC = iPC;
u32 aCount = 1; // Amount of instructions needed to get valid mac flag instances for block linking u32 aCount = 1; // Amount of instructions needed to get valid mac flag instances for block linking
@ -120,9 +117,9 @@ microVUt(int) mVUsetFlags(int* xStatus, int* xMac, int* xClip) {
for (mVUcount = 0; mVUcount < xCount; mVUcount++) { for (mVUcount = 0; mVUcount < xCount; mVUcount++) {
if (isFSSET) { if (isFSSET) {
if (__Status) { // Don't Optimize out on the last ~4+ instructions if (__Status) { // Don't Optimize out on the last ~4+ instructions
if ((xCount - mVUcount) > aCount) { mVUstatusFlagOp<vuIndex>(); } if ((xCount - mVUcount) > aCount) { mVUstatusFlagOp(mVU); }
} }
else mVUstatusFlagOp<vuIndex>(); else mVUstatusFlagOp(mVU);
} }
cycles += mVUstall; cycles += mVUstall;
@ -151,8 +148,7 @@ microVUt(int) mVUsetFlags(int* xStatus, int* xMac, int* xClip) {
#define shuffleClip ((bClip[3]<<6)|(bClip[2]<<4)|(bClip[1]<<2)|bClip[0]) #define shuffleClip ((bClip[3]<<6)|(bClip[2]<<4)|(bClip[1]<<2)|bClip[0])
// Recompiles Code for Proper Flags on Block Linkings // Recompiles Code for Proper Flags on Block Linkings
microVUt(void) mVUsetupFlags(int* xStatus, int* xMac, int* xClip, int cycles) { microVUt(void) mVUsetupFlags(mV, int* xStatus, int* xMac, int* xClip, int cycles) {
microVU* mVU = mVUx;
if (__Status && !mVUflagHack) { if (__Status && !mVUflagHack) {
int bStatus[4]; int bStatus[4];
@ -186,15 +182,14 @@ microVUt(void) mVUsetupFlags(int* xStatus, int* xMac, int* xClip, int cycles) {
#define shortBranch() { \ #define shortBranch() { \
if (branch == 3) { \ if (branch == 3) { \
mVUflagPass<vuIndex>(aBranchAddr, (xCount - (mVUcount+1))); \ mVUflagPass(mVU, aBranchAddr, (xCount - (mVUcount+1))); \
mVUcount = 4; \ mVUcount = 4; \
} \ } \
} }
// Scan through instructions and check if flags are read (FSxxx, FMxxx, FCxxx opcodes) // Scan through instructions and check if flags are read (FSxxx, FMxxx, FCxxx opcodes)
microVUx(void) mVUflagPass(u32 startPC, u32 xCount) { void mVUflagPass(mV, u32 startPC, u32 xCount) {
microVU* mVU = mVUx;
int oldPC = iPC; int oldPC = iPC;
int oldCount = mVUcount; int oldCount = mVUcount;
int oldBranch = mVUbranch; int oldBranch = mVUbranch;
@ -206,7 +201,7 @@ microVUx(void) mVUflagPass(u32 startPC, u32 xCount) {
incPC(1); incPC(1);
if ( curI & _Ebit_ ) { branch = 1; } if ( curI & _Ebit_ ) { branch = 1; }
if ( curI & _MDTbit_ ) { branch = 4; } if ( curI & _MDTbit_ ) { branch = 4; }
if (!(curI & _Ibit_) ) { incPC(-1); mVUopL<vuIndex>(3); incPC(1); } if (!(curI & _Ibit_) ) { incPC(-1); mVUopL(mVU, 3); incPC(1); }
if (branch >= 2) { shortBranch(); break; } if (branch >= 2) { shortBranch(); break; }
else if (branch == 1) { branch = 2; } else if (branch == 1) { branch = 2; }
if (mVUbranch) { branch = (mVUbranch >= 9) ? 5 : 3; aBranchAddr = branchAddr; mVUbranch = 0; } if (mVUbranch) { branch = (mVUbranch >= 9) ? 5 : 3; aBranchAddr = branchAddr; mVUbranch = 0; }
@ -224,17 +219,16 @@ microVUx(void) mVUflagPass(u32 startPC, u32 xCount) {
#define branchType3 else // Conditional Branch #define branchType3 else // Conditional Branch
// Checks if the first 4 instructions of a block will read flags // Checks if the first 4 instructions of a block will read flags
microVUt(void) mVUsetFlagInfo() { microVUt(void) mVUsetFlagInfo(mV) {
microVU* mVU = mVUx; branchType1 { incPC(-1); mVUflagPass(mVU, branchAddr, 4); incPC(1); }
branchType1 { incPC(-1); mVUflagPass<vuIndex>(branchAddr, 4); incPC(1); }
branchType2 { mVUflagInfo |= 0xfff; } branchType2 { mVUflagInfo |= 0xfff; }
branchType3 { branchType3 {
incPC(-1); incPC(-1);
mVUflagPass<vuIndex>(branchAddr, 4); mVUflagPass(mVU, branchAddr, 4);
int backupFlagInfo = mVUflagInfo; int backupFlagInfo = mVUflagInfo;
mVUflagInfo = 0; mVUflagInfo = 0;
incPC(4); // Branch Not Taken incPC(4); // Branch Not Taken
mVUflagPass<vuIndex>(xPC, 4); mVUflagPass(mVU, xPC, 4);
incPC(-3); incPC(-3);
mVUflagInfo |= backupFlagInfo; mVUflagInfo |= backupFlagInfo;
} }

View File

@ -38,7 +38,7 @@ microVUx(void) __mVULog(const char* fmt, ...) {
#define commaIf() { if (bitX[6]) { mVUlog(","); bitX[6] = 0; } } #define commaIf() { if (bitX[6]) { mVUlog(","); bitX[6] = 0; } }
microVUt(void) __mVUdumpProgram(int progIndex) { microVUx(void) __mVUdumpProgram(int progIndex) {
microVU* mVU = mVUx; microVU* mVU = mVUx;
bool bitX[7]; bool bitX[7];
char str[30]; char str[30];
@ -46,6 +46,7 @@ microVUt(void) __mVUdumpProgram(int progIndex) {
int bBranch = mVUbranch; int bBranch = mVUbranch;
int bCode = mVU->code; int bCode = mVU->code;
int bPC = iPC; int bPC = iPC;
int vuIndex = (mVU == &microVU1) ? 1 : 0;
mVUbranch = 0; mVUbranch = 0;
sprintf(str, "%s\\microVU%d prog - %02d.html", LOGS_DIR, vuIndex, progIndex); sprintf(str, "%s\\microVU%d prog - %02d.html", LOGS_DIR, vuIndex, progIndex);
@ -88,7 +89,7 @@ microVUt(void) __mVUdumpProgram(int progIndex) {
iPC = (i+1); iPC = (i+1);
mVUlog("<a name=\"addr%04x\">", i*4); mVUlog("<a name=\"addr%04x\">", i*4);
mVUlog("[%04x] (%08x)</a> ", i*4, mVU->code); mVUlog("[%04x] (%08x)</a> ", i*4, mVU->code);
mVUopU<vuIndex, 2>(); mVUopU(mVU, 2);
if (bitX[5]) { if (bitX[5]) {
mVUlog(" ("); mVUlog(" (");
@ -110,7 +111,7 @@ microVUt(void) __mVUdumpProgram(int progIndex) {
} }
else { else {
mVUlog("<br>\n[%04x] (%08x) ", i*4, mVU->code); mVUlog("<br>\n[%04x] (%08x) ", i*4, mVU->code);
mVUopL<vuIndex, 2>(); mVUopL(mVU, 2);
mVUlog("\n\n<br><br>"); mVUlog("\n\n<br><br>");
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -105,7 +105,9 @@ declareAllVariables
#define _Tbit_ (1<<27) #define _Tbit_ (1<<27)
#define _MDTbit_ 0 //( _Mbit_ | _Dbit_ | _Tbit_ ) // ToDo: Implement this stuff... #define _MDTbit_ 0 //( _Mbit_ | _Dbit_ | _Tbit_ ) // ToDo: Implement this stuff...
#define getVUmem(x) (((vuIndex == 1) ? (x & 0x3ff) : ((x >= 0x400) ? (x & 0x43f) : (x & 0xff))) * 16) #define isVU1 (mVU == &microVU1)
#define getIndex (isVU1 ? 1 : 0)
#define getVUmem(x) (((isVU1) ? (x & 0x3ff) : ((x >= 0x400) ? (x & 0x43f) : (x & 0xff))) * 16)
#define offsetSS ((_X) ? (0) : ((_Y) ? (4) : ((_Z) ? 8: 12))) #define offsetSS ((_X) ? (0) : ((_Y) ? (4) : ((_Z) ? 8: 12)))
#define offsetReg ((_X) ? (0) : ((_Y) ? (1) : ((_Z) ? 2: 3))) #define offsetReg ((_X) ? (0) : ((_Y) ? (1) : ((_Z) ? 2: 3)))
@ -136,15 +138,19 @@ declareAllVariables
#define gprF2 6 // Status Flag 2 #define gprF2 6 // Status Flag 2
#define gprF3 7 // Status Flag 3 #define gprF3 7 // Status Flag 3
// Template Stuff
#define mVUx (vuIndex ? &microVU1 : &microVU0)
#define microVUt(aType) template<int vuIndex> __forceinline aType
#define microVUx(aType) template<int vuIndex> aType
#define microVUf(aType) template<int vuIndex> aType
#define microVUq(aType) template<int vuIndex> __forceinline aType
// Function Params // Function Params
#define mP microVU* mVU, int recPass
#define mV microVU* mVU
#define mF int recPass #define mF int recPass
#define mX mVU, recPass
// Function/Template Stuff
#define mVUx (vuIndex ? &microVU1 : &microVU0)
#define mVUop(opName) void opName (mP)
#define microVUt(aType) __forceinline aType
#define microVUx(aType) template<int vuIndex> aType
#define microVUf(aType) template<int vuIndex> __forceinline aType
// Define Passes // Define Passes
#define pass1 if (recPass == 0) #define pass1 if (recPass == 0)
@ -176,9 +182,9 @@ declareAllVariables
#define setCode() { mVU->code = curI; } #define setCode() { mVU->code = curI; }
#define incPC(x) { iPC = ((iPC + x) & (mVU->progSize-1)); setCode(); } #define incPC(x) { iPC = ((iPC + x) & (mVU->progSize-1)); setCode(); }
#define incPC2(x) { iPC = ((iPC + x) & (mVU->progSize-1)); } #define incPC2(x) { iPC = ((iPC + x) & (mVU->progSize-1)); }
#define incCycles(x) { mVUincCycles<vuIndex>(x); } #define incCycles(x) { mVUincCycles(mVU, x); }
#define bSaveAddr (((xPC + (2 * 8)) & ((vuIndex) ? 0x3ff8:0xff8)) / 8) #define bSaveAddr (((xPC + (2 * 8)) & ((isVU1) ? 0x3ff8:0xff8)) / 8)
#define branchAddr ((xPC + 8 + (_Imm11_ * 8)) & ((vuIndex) ? 0x3ff8:0xff8)) #define branchAddr ((xPC + 8 + (_Imm11_ * 8)) & ((isVU1) ? 0x3ff8 : 0xff8))
#define shufflePQ (((mVU->p) ? 0xb0 : 0xe0) | ((mVU->q) ? 0x01 : 0x04)) #define shufflePQ (((mVU->p) ? 0xb0 : 0xe0) | ((mVU->q) ? 0x01 : 0x04))
#define Rmem (uptr)&mVU->regs->VI[REG_R].UL #define Rmem (uptr)&mVU->regs->VI[REG_R].UL
#define Roffset (uptr)&mVU->regs->VI[9].UL #define Roffset (uptr)&mVU->regs->VI[9].UL
@ -288,7 +294,7 @@ declareAllVariables
// Program Logging... // Program Logging...
#ifdef mVUlogProg #ifdef mVUlogProg
#define mVUlog __mVULog<vuIndex> #define mVUlog ((isVU1) ? __mVULog<1> : __mVULog<0>)
#define mVUdumpProg __mVUdumpProgram<vuIndex> #define mVUdumpProg __mVUdumpProgram<vuIndex>
#else #else
#define mVUlog 0&& #define mVUlog 0&&
@ -309,7 +315,8 @@ declareAllVariables
uptr diff = ptr - start; \ uptr diff = ptr - start; \
if (diff >= limit) { \ if (diff >= limit) { \
Console::Error("microVU Error: Program went over its cache limit. Size = 0x%x", params diff); \ Console::Error("microVU Error: Program went over its cache limit. Size = 0x%x", params diff); \
mVUreset<vuIndex>(); \ if (!isVU1) mVUreset<0>(); \
else mVUreset<1>(); \
} \ } \
} }

View File

@ -23,7 +23,7 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
// Used for Result Clamping // Used for Result Clamping
microVUx(void) mVUclamp1(int reg, int regT1, int xyzw) { void mVUclamp1(int reg, int regT1, int xyzw) {
switch (xyzw) { switch (xyzw) {
case 1: case 2: case 4: case 8: case 1: case 2: case 4: case 8:
SSE_MINSS_XMM_to_XMM(reg, xmmMax); SSE_MINSS_XMM_to_XMM(reg, xmmMax);
@ -37,7 +37,7 @@ microVUx(void) mVUclamp1(int reg, int regT1, int xyzw) {
} }
// Used for Operand Clamping // Used for Operand Clamping
microVUx(void) mVUclamp2(int reg, int regT1, int xyzw) { void mVUclamp2(int reg, int regT1, int xyzw) {
if (CHECK_VU_SIGN_OVERFLOW) { if (CHECK_VU_SIGN_OVERFLOW) {
switch (xyzw) { switch (xyzw) {
case 1: case 2: case 4: case 8: case 1: case 2: case 4: case 8:
@ -56,14 +56,14 @@ microVUx(void) mVUclamp2(int reg, int regT1, int xyzw) {
break; break;
} }
} }
else mVUclamp1<vuIndex>(reg, regT1, xyzw); else mVUclamp1(reg, regT1, xyzw);
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
// Micro VU - Misc Functions // Micro VU - Misc Functions
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUx(void) mVUunpack_xyzw(int dstreg, int srcreg, int xyzw) { void mVUunpack_xyzw(int dstreg, int srcreg, int xyzw) {
switch ( xyzw ) { switch ( xyzw ) {
case 0: SSE2_PSHUFD_XMM_to_XMM(dstreg, srcreg, 0x00); break; case 0: SSE2_PSHUFD_XMM_to_XMM(dstreg, srcreg, 0x00); break;
case 1: SSE2_PSHUFD_XMM_to_XMM(dstreg, srcreg, 0x55); break; case 1: SSE2_PSHUFD_XMM_to_XMM(dstreg, srcreg, 0x55); break;
@ -72,7 +72,7 @@ microVUx(void) mVUunpack_xyzw(int dstreg, int srcreg, int xyzw) {
} }
} }
microVUx(void) mVUloadReg(int reg, uptr offset, int xyzw) { void mVUloadReg(int reg, uptr offset, int xyzw) {
switch( xyzw ) { switch( xyzw ) {
case 8: SSE_MOVSS_M32_to_XMM(reg, offset); break; // X case 8: SSE_MOVSS_M32_to_XMM(reg, offset); break; // X
case 4: SSE_MOVSS_M32_to_XMM(reg, offset+4); break; // Y case 4: SSE_MOVSS_M32_to_XMM(reg, offset+4); break; // Y
@ -82,7 +82,7 @@ microVUx(void) mVUloadReg(int reg, uptr offset, int xyzw) {
} }
} }
microVUx(void) mVUloadReg2(int reg, int gprReg, uptr offset, int xyzw) { void mVUloadReg2(int reg, int gprReg, uptr offset, int xyzw) {
switch( xyzw ) { switch( xyzw ) {
case 8: SSE_MOVSS_Rm_to_XMM(reg, gprReg, offset); break; // X case 8: SSE_MOVSS_Rm_to_XMM(reg, gprReg, offset); break; // X
case 4: SSE_MOVSS_Rm_to_XMM(reg, gprReg, offset+4); break; // Y case 4: SSE_MOVSS_Rm_to_XMM(reg, gprReg, offset+4); break; // Y
@ -93,7 +93,7 @@ microVUx(void) mVUloadReg2(int reg, int gprReg, uptr offset, int xyzw) {
} }
// Modifies the Source Reg! // Modifies the Source Reg!
microVUx(void) mVUsaveReg(int reg, uptr offset, int xyzw, bool modXYZW) { void mVUsaveReg(int reg, uptr offset, int xyzw, bool modXYZW) {
/*SSE_MOVAPS_M128_to_XMM(xmmT2, offset); /*SSE_MOVAPS_M128_to_XMM(xmmT2, offset);
if (modXYZW && (xyzw == 8 || xyzw == 4 || xyzw == 2 || xyzw == 1)) { if (modXYZW && (xyzw == 8 || xyzw == 4 || xyzw == 2 || xyzw == 1)) {
mVUunpack_xyzw<vuIndex>(reg, reg, 0); mVUunpack_xyzw<vuIndex>(reg, reg, 0);
@ -135,13 +135,13 @@ microVUx(void) mVUsaveReg(int reg, uptr offset, int xyzw, bool modXYZW) {
SSE_MOVHLPS_XMM_to_XMM(reg, reg); SSE_MOVHLPS_XMM_to_XMM(reg, reg);
SSE_MOVSS_XMM_to_M32(offset+8, reg); SSE_MOVSS_XMM_to_M32(offset+8, reg);
break; // XYZ break; // XYZ
case 4: if (!modXYZW) mVUunpack_xyzw<vuIndex>(reg, reg, 1); case 4: if (!modXYZW) mVUunpack_xyzw(reg, reg, 1);
SSE_MOVSS_XMM_to_M32(offset+4, reg); SSE_MOVSS_XMM_to_M32(offset+4, reg);
break; // Y break; // Y
case 2: if (!modXYZW) mVUunpack_xyzw<vuIndex>(reg, reg, 2); case 2: if (!modXYZW) mVUunpack_xyzw(reg, reg, 2);
SSE_MOVSS_XMM_to_M32(offset+8, reg); SSE_MOVSS_XMM_to_M32(offset+8, reg);
break; // Z break; // Z
case 1: if (!modXYZW) mVUunpack_xyzw<vuIndex>(reg, reg, 3); case 1: if (!modXYZW) mVUunpack_xyzw(reg, reg, 3);
SSE_MOVSS_XMM_to_M32(offset+12, reg); SSE_MOVSS_XMM_to_M32(offset+12, reg);
break; // W break; // W
case 8: SSE_MOVSS_XMM_to_M32(offset, reg); break; // X case 8: SSE_MOVSS_XMM_to_M32(offset, reg); break; // X
@ -152,7 +152,7 @@ microVUx(void) mVUsaveReg(int reg, uptr offset, int xyzw, bool modXYZW) {
} }
// Modifies the Source Reg! // Modifies the Source Reg!
microVUx(void) mVUsaveReg2(int reg, int gprReg, u32 offset, int xyzw) { void mVUsaveReg2(int reg, int gprReg, u32 offset, int xyzw) {
/*SSE_MOVAPSRmtoR(xmmT2, gprReg, offset); /*SSE_MOVAPSRmtoR(xmmT2, gprReg, offset);
if (xyzw == 8 || xyzw == 4 || xyzw == 2 || xyzw == 1) { if (xyzw == 8 || xyzw == 4 || xyzw == 2 || xyzw == 1) {
mVUunpack_xyzw<vuIndex>(reg, reg, 0); mVUunpack_xyzw<vuIndex>(reg, reg, 0);
@ -262,8 +262,8 @@ void mVUmergeRegs(int dest, int src, int xyzw) {
} }
// Transforms the Address in gprReg to valid VU0/VU1 Address // Transforms the Address in gprReg to valid VU0/VU1 Address
microVUt(void) mVUaddrFix(int gprReg) { microVUt(void) mVUaddrFix(mV, int gprReg) {
if (vuIndex) { if (mVU == &microVU1) {
AND32ItoR(gprReg, 0x3ff); // wrap around AND32ItoR(gprReg, 0x3ff); // wrap around
SHL32ItoR(gprReg, 4); SHL32ItoR(gprReg, 4);
} }
@ -281,15 +281,13 @@ microVUt(void) mVUaddrFix(int gprReg) {
} }
// Backup Volatile Regs (EAX, ECX, EDX, MM0~7, XMM0~7, are all volatile according to 32bit Win/Linux ABI) // Backup Volatile Regs (EAX, ECX, EDX, MM0~7, XMM0~7, are all volatile according to 32bit Win/Linux ABI)
microVUt(void) mVUbackupRegs() { microVUt(void) mVUbackupRegs(mV) {
microVU* mVU = mVUx;
SSE_MOVAPS_XMM_to_M128((uptr)&mVU->regs->ACC.UL[0], xmmACC); SSE_MOVAPS_XMM_to_M128((uptr)&mVU->regs->ACC.UL[0], xmmACC);
SSE_MOVAPS_XMM_to_M128((uptr)&mVU->xmmPQb[0], xmmPQ); SSE_MOVAPS_XMM_to_M128((uptr)&mVU->xmmPQb[0], xmmPQ);
} }
// Restore Volatile Regs // Restore Volatile Regs
microVUt(void) mVUrestoreRegs() { microVUt(void) mVUrestoreRegs(mV) {
microVU* mVU = mVUx;
SSE_MOVAPS_M128_to_XMM(xmmACC, (uptr)&mVU->regs->ACC.UL[0]); SSE_MOVAPS_M128_to_XMM(xmmACC, (uptr)&mVU->regs->ACC.UL[0]);
SSE_MOVAPS_M128_to_XMM(xmmPQ, (uptr)&mVU->xmmPQb[0]); SSE_MOVAPS_M128_to_XMM(xmmPQ, (uptr)&mVU->xmmPQb[0]);
SSE_MOVAPS_M128_to_XMM(xmmMax, (uptr)mVU_maxvals); SSE_MOVAPS_M128_to_XMM(xmmMax, (uptr)mVU_maxvals);
@ -298,15 +296,14 @@ microVUt(void) mVUrestoreRegs() {
} }
// Reads entire microProgram and finds out if Status Flag is Used // Reads entire microProgram and finds out if Status Flag is Used
microVUt(void) mVUcheckSflag(int progIndex) { microVUt(void) mVUcheckSflag(mV, int progIndex) {
if (CHECK_VU_FLAGHACK1) { if (CHECK_VU_FLAGHACK1) {
microVU* mVU = mVUx;
int bFlagInfo = mVUflagInfo; int bFlagInfo = mVUflagInfo;
int bCode = mVU->code; int bCode = mVU->code;
mVUsFlagHack = 1; mVUsFlagHack = 1;
for (u32 i = 0; i < mVU->progSize; i+=2) { for (u32 i = 0; i < mVU->progSize; i+=2) {
mVU->code = mVU->prog.prog[progIndex].data[i]; mVU->code = mVU->prog.prog[progIndex].data[i];
mVUopL<vuIndex>(3); mVUopL(mVU, 3);
} }
mVUflagInfo = bFlagInfo; mVUflagInfo = bFlagInfo;
mVU->code = bCode; mVU->code = bCode;

View File

@ -21,182 +21,182 @@
//------------------------------------------------------------------ //------------------------------------------------------------------
// Declarations // Declarations
//------------------------------------------------------------------ //------------------------------------------------------------------
#define mVUgetCode (vuIndex ? microVU1.code : microVU0.code) #define mVUgetCode (mVU->code)
microVUf(void) mVU_UPPER_FD_00(mF); mVUop(mVU_UPPER_FD_00);
microVUf(void) mVU_UPPER_FD_01(mF); mVUop(mVU_UPPER_FD_01);
microVUf(void) mVU_UPPER_FD_10(mF); mVUop(mVU_UPPER_FD_10);
microVUf(void) mVU_UPPER_FD_11(mF); mVUop(mVU_UPPER_FD_11);
microVUf(void) mVULowerOP(mF); mVUop(mVULowerOP);
microVUf(void) mVULowerOP_T3_00(mF); mVUop(mVULowerOP_T3_00);
microVUf(void) mVULowerOP_T3_01(mF); mVUop(mVULowerOP_T3_01);
microVUf(void) mVULowerOP_T3_10(mF); mVUop(mVULowerOP_T3_10);
microVUf(void) mVULowerOP_T3_11(mF); mVUop(mVULowerOP_T3_11);
microVUf(void) mVUunknown(mF); mVUop(mVUunknown);
//------------------------------------------------------------------ //------------------------------------------------------------------
//------------------------------------------------------------------ //------------------------------------------------------------------
// Opcode Tables // Opcode Tables
//------------------------------------------------------------------ //------------------------------------------------------------------
#define microVU_LOWER_OPCODE(x) void (*mVULOWER_OPCODE##x [128])(mF) = { \ #define microVU_LOWER_OPCODE(x) void (*mVULOWER_OPCODE##x [128])(mP) = { \
mVU_LQ<x> , mVU_SQ<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_LQ , mVU_SQ , mVUunknown , mVUunknown, \
mVU_ILW<x> , mVU_ISW<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_ILW , mVU_ISW , mVUunknown , mVUunknown, \
mVU_IADDIU<x> , mVU_ISUBIU<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_IADDIU , mVU_ISUBIU , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_FCEQ<x> , mVU_FCSET<x> , mVU_FCAND<x> , mVU_FCOR<x>, /* 0x10 */ \ mVU_FCEQ , mVU_FCSET , mVU_FCAND , mVU_FCOR, /* 0x10 */ \
mVU_FSEQ<x> , mVU_FSSET<x> , mVU_FSAND<x> , mVU_FSOR<x>, \ mVU_FSEQ , mVU_FSSET , mVU_FSAND , mVU_FSOR, \
mVU_FMEQ<x> , mVUunknown<x> , mVU_FMAND<x> , mVU_FMOR<x>, \ mVU_FMEQ , mVUunknown , mVU_FMAND , mVU_FMOR, \
mVU_FCGET<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_FCGET , mVUunknown , mVUunknown , mVUunknown, \
mVU_B<x> , mVU_BAL<x> , mVUunknown<x> , mVUunknown<x>, /* 0x20 */ \ mVU_B , mVU_BAL , mVUunknown , mVUunknown, /* 0x20 */ \
mVU_JR<x> , mVU_JALR<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_JR , mVU_JALR , mVUunknown , mVUunknown, \
mVU_IBEQ<x> , mVU_IBNE<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_IBEQ , mVU_IBNE , mVUunknown , mVUunknown, \
mVU_IBLTZ<x> , mVU_IBGTZ<x> , mVU_IBLEZ<x> , mVU_IBGEZ<x>, \ mVU_IBLTZ , mVU_IBGTZ , mVU_IBLEZ , mVU_IBGEZ, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x30 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x30 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVULowerOP<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x40*/ \ mVULowerOP , mVUunknown , mVUunknown , mVUunknown, /* 0x40*/ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x50 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x50 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x60 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x60 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x70 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x70 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
}; };
#define microVU_LowerOP_T3_00_OPCODE(x) void (*mVULowerOP_T3_00_OPCODE##x [32])(mF) = { \ #define microVU_LowerOP_T3_00_OPCODE(x) void (*mVULowerOP_T3_00_OPCODE##x [32])(mP) = { \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_MOVE<x> , mVU_LQI<x> , mVU_DIV<x> , mVU_MTIR<x>, \ mVU_MOVE , mVU_LQI , mVU_DIV , mVU_MTIR, \
mVU_RNEXT<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x10 */ \ mVU_RNEXT , mVUunknown , mVUunknown , mVUunknown, /* 0x10 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVU_MFP<x> , mVU_XTOP<x> , mVU_XGKICK<x>, \ mVUunknown , mVU_MFP , mVU_XTOP , mVU_XGKICK, \
mVU_ESADD<x> , mVU_EATANxy<x>, mVU_ESQRT<x> , mVU_ESIN<x>, \ mVU_ESADD , mVU_EATANxy , mVU_ESQRT , mVU_ESIN, \
}; };
#define microVU_LowerOP_T3_01_OPCODE(x) void (*mVULowerOP_T3_01_OPCODE##x [32])(mF) = { \ #define microVU_LowerOP_T3_01_OPCODE(x) void (*mVULowerOP_T3_01_OPCODE##x [32])(mP) = { \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_MR32<x> , mVU_SQI<x> , mVU_SQRT<x> , mVU_MFIR<x>, \ mVU_MR32 , mVU_SQI , mVU_SQRT , mVU_MFIR, \
mVU_RGET<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x10 */ \ mVU_RGET , mVUunknown , mVUunknown , mVUunknown, /* 0x10 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVU_XITOP<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVU_XITOP , mVUunknown, \
mVU_ERSADD<x> , mVU_EATANxz<x>, mVU_ERSQRT<x> , mVU_EATAN<x>, \ mVU_ERSADD , mVU_EATANxz , mVU_ERSQRT , mVU_EATAN, \
}; };
#define microVU_LowerOP_T3_10_OPCODE(x) void (*mVULowerOP_T3_10_OPCODE##x [32])(mF) = { \ #define microVU_LowerOP_T3_10_OPCODE(x) void (*mVULowerOP_T3_10_OPCODE##x [32])(mP) = { \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVU_LQD<x> , mVU_RSQRT<x> , mVU_ILWR<x>, \ mVUunknown , mVU_LQD , mVU_RSQRT , mVU_ILWR, \
mVU_RINIT<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x10 */ \ mVU_RINIT , mVUunknown , mVUunknown , mVUunknown, /* 0x10 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_ELENG<x> , mVU_ESUM<x> , mVU_ERCPR<x> , mVU_EEXP<x>, \ mVU_ELENG , mVU_ESUM , mVU_ERCPR , mVU_EEXP, \
}; };
#define microVU_LowerOP_T3_11_OPCODE(x) void (*mVULowerOP_T3_11_OPCODE##x [32])(mF) = { \ #define microVU_LowerOP_T3_11_OPCODE(x) void (*mVULowerOP_T3_11_OPCODE##x [32])(mP) = { \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVU_SQD<x> , mVU_WAITQ<x> , mVU_ISWR<x>, \ mVUunknown , mVU_SQD , mVU_WAITQ , mVU_ISWR, \
mVU_RXOR<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x10 */ \ mVU_RXOR , mVUunknown , mVUunknown , mVUunknown, /* 0x10 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_ERLENG<x> , mVUunknown<x> , mVU_WAITP<x> , mVUunknown<x>, \ mVU_ERLENG , mVUunknown , mVU_WAITP , mVUunknown, \
}; };
#define microVU_LowerOP_OPCODE(x) void (*mVULowerOP_OPCODE##x [64])(mF) = { \ #define microVU_LowerOP_OPCODE(x) void (*mVULowerOP_OPCODE##x [64])(mP) = { \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x10 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x10 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x20 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x20 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_IADD<x> , mVU_ISUB<x> , mVU_IADDI<x> , mVUunknown<x>, /* 0x30 */ \ mVU_IADD , mVU_ISUB , mVU_IADDI , mVUunknown, /* 0x30 */ \
mVU_IAND<x> , mVU_IOR<x> , mVUunknown<x> , mVUunknown<x>, \ mVU_IAND , mVU_IOR , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVULowerOP_T3_00<x>, mVULowerOP_T3_01<x>, mVULowerOP_T3_10<x>, mVULowerOP_T3_11<x>, \ mVULowerOP_T3_00, mVULowerOP_T3_01, mVULowerOP_T3_10, mVULowerOP_T3_11, \
}; };
#define microVU_UPPER_OPCODE(x) void (*mVU_UPPER_OPCODE##x [64])(mF) = { \ #define microVU_UPPER_OPCODE(x) void (*mVU_UPPER_OPCODE##x [64])(mP) = { \
mVU_ADDx<x> , mVU_ADDy<x> , mVU_ADDz<x> , mVU_ADDw<x>, \ mVU_ADDx , mVU_ADDy , mVU_ADDz , mVU_ADDw, \
mVU_SUBx<x> , mVU_SUBy<x> , mVU_SUBz<x> , mVU_SUBw<x>, \ mVU_SUBx , mVU_SUBy , mVU_SUBz , mVU_SUBw, \
mVU_MADDx<x> , mVU_MADDy<x> , mVU_MADDz<x> , mVU_MADDw<x>, \ mVU_MADDx , mVU_MADDy , mVU_MADDz , mVU_MADDw, \
mVU_MSUBx<x> , mVU_MSUBy<x> , mVU_MSUBz<x> , mVU_MSUBw<x>, \ mVU_MSUBx , mVU_MSUBy , mVU_MSUBz , mVU_MSUBw, \
mVU_MAXx<x> , mVU_MAXy<x> , mVU_MAXz<x> , mVU_MAXw<x>, /* 0x10 */ \ mVU_MAXx , mVU_MAXy , mVU_MAXz , mVU_MAXw, /* 0x10 */ \
mVU_MINIx<x> , mVU_MINIy<x> , mVU_MINIz<x> , mVU_MINIw<x>, \ mVU_MINIx , mVU_MINIy , mVU_MINIz , mVU_MINIw, \
mVU_MULx<x> , mVU_MULy<x> , mVU_MULz<x> , mVU_MULw<x>, \ mVU_MULx , mVU_MULy , mVU_MULz , mVU_MULw, \
mVU_MULq<x> , mVU_MAXi<x> , mVU_MULi<x> , mVU_MINIi<x>, \ mVU_MULq , mVU_MAXi , mVU_MULi , mVU_MINIi, \
mVU_ADDq<x> , mVU_MADDq<x> , mVU_ADDi<x> , mVU_MADDi<x>, /* 0x20 */ \ mVU_ADDq , mVU_MADDq , mVU_ADDi , mVU_MADDi, /* 0x20 */ \
mVU_SUBq<x> , mVU_MSUBq<x> , mVU_SUBi<x> , mVU_MSUBi<x>, \ mVU_SUBq , mVU_MSUBq , mVU_SUBi , mVU_MSUBi, \
mVU_ADD<x> , mVU_MADD<x> , mVU_MUL<x> , mVU_MAX<x>, \ mVU_ADD , mVU_MADD , mVU_MUL , mVU_MAX, \
mVU_SUB<x> , mVU_MSUB<x> , mVU_OPMSUB<x> , mVU_MINI<x>, \ mVU_SUB , mVU_MSUB , mVU_OPMSUB , mVU_MINI, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, /* 0x30 */ \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, /* 0x30 */ \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVU_UPPER_FD_00<x>, mVU_UPPER_FD_01<x>, mVU_UPPER_FD_10<x>, mVU_UPPER_FD_11<x>, \ mVU_UPPER_FD_00, mVU_UPPER_FD_01, mVU_UPPER_FD_10, mVU_UPPER_FD_11, \
}; };
#define microVU_UPPER_FD_00_TABLE(x) void (*mVU_UPPER_FD_00_TABLE##x [32])(mF) = { \ #define microVU_UPPER_FD_00_TABLE(x) void (*mVU_UPPER_FD_00_TABLE##x [32])(mP) = { \
mVU_ADDAx<x> , mVU_SUBAx<x> , mVU_MADDAx<x> , mVU_MSUBAx<x>, \ mVU_ADDAx , mVU_SUBAx , mVU_MADDAx , mVU_MSUBAx, \
mVU_ITOF0<x> , mVU_FTOI0<x> , mVU_MULAx<x> , mVU_MULAq<x>, \ mVU_ITOF0 , mVU_FTOI0 , mVU_MULAx , mVU_MULAq, \
mVU_ADDAq<x> , mVU_SUBAq<x> , mVU_ADDA<x> , mVU_SUBA<x>, \ mVU_ADDAq , mVU_SUBAq , mVU_ADDA , mVU_SUBA, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
}; };
#define microVU_UPPER_FD_01_TABLE(x) void (* mVU_UPPER_FD_01_TABLE##x [32])(mF) = { \ #define microVU_UPPER_FD_01_TABLE(x) void (* mVU_UPPER_FD_01_TABLE##x [32])(mP) = { \
mVU_ADDAy<x> , mVU_SUBAy<x> , mVU_MADDAy<x> , mVU_MSUBAy<x>, \ mVU_ADDAy , mVU_SUBAy , mVU_MADDAy , mVU_MSUBAy, \
mVU_ITOF4<x> , mVU_FTOI4<x> , mVU_MULAy<x> , mVU_ABS<x>, \ mVU_ITOF4 , mVU_FTOI4 , mVU_MULAy , mVU_ABS, \
mVU_MADDAq<x> , mVU_MSUBAq<x> , mVU_MADDA<x> , mVU_MSUBA<x>, \ mVU_MADDAq , mVU_MSUBAq , mVU_MADDA , mVU_MSUBA, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
}; };
#define microVU_UPPER_FD_10_TABLE(x) void (* mVU_UPPER_FD_10_TABLE##x [32])(mF) = { \ #define microVU_UPPER_FD_10_TABLE(x) void (* mVU_UPPER_FD_10_TABLE##x [32])(mP) = { \
mVU_ADDAz<x> , mVU_SUBAz<x> , mVU_MADDAz<x> , mVU_MSUBAz<x>, \ mVU_ADDAz , mVU_SUBAz , mVU_MADDAz , mVU_MSUBAz, \
mVU_ITOF12<x> , mVU_FTOI12<x> , mVU_MULAz<x> , mVU_MULAi<x>, \ mVU_ITOF12 , mVU_FTOI12 , mVU_MULAz , mVU_MULAi, \
mVU_ADDAi<x> , mVU_SUBAi<x> , mVU_MULA<x> , mVU_OPMULA<x>, \ mVU_ADDAi , mVU_SUBAi , mVU_MULA , mVU_OPMULA, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
}; };
#define microVU_UPPER_FD_11_TABLE(x) void (* mVU_UPPER_FD_11_TABLE##x [32])(mF) = { \ #define microVU_UPPER_FD_11_TABLE(x) void (* mVU_UPPER_FD_11_TABLE##x [32])(mP) = { \
mVU_ADDAw<x> , mVU_SUBAw<x> , mVU_MADDAw<x> , mVU_MSUBAw<x>, \ mVU_ADDAw , mVU_SUBAw , mVU_MADDAw , mVU_MSUBAw, \
mVU_ITOF15<x> , mVU_FTOI15<x> , mVU_MULAw<x> , mVU_CLIP<x>, \ mVU_ITOF15 , mVU_FTOI15 , mVU_MULAw , mVU_CLIP, \
mVU_MADDAi<x> , mVU_MSUBAi<x> , mVUunknown<x> , mVU_NOP<x>, \ mVU_MADDAi , mVU_MSUBAi , mVUunknown , mVU_NOP, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
mVUunknown<x> , mVUunknown<x> , mVUunknown<x> , mVUunknown<x>, \ mVUunknown , mVUunknown , mVUunknown , mVUunknown, \
}; };
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -218,29 +218,27 @@ microVU_UPPER_FD_10_TABLE(x) \
microVU_UPPER_FD_11_TABLE(x) microVU_UPPER_FD_11_TABLE(x)
mVUcreateTable(0) mVUcreateTable(0)
mVUcreateTable(1)
//------------------------------------------------------------------ //------------------------------------------------------------------
// Table Functions // Table Functions
//------------------------------------------------------------------ //------------------------------------------------------------------
#define doTableStuff(tableName, args) { \ #define doTableStuff(tableName, args) { \
if (vuIndex) tableName##1[ args ](recPass); \ tableName##0[ args ](mX); \
else tableName##0[ args ](recPass); \
} }
microVUf(void) mVU_UPPER_FD_00(mF) { doTableStuff(mVU_UPPER_FD_00_TABLE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVU_UPPER_FD_00) { doTableStuff(mVU_UPPER_FD_00_TABLE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVU_UPPER_FD_01(mF) { doTableStuff(mVU_UPPER_FD_01_TABLE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVU_UPPER_FD_01) { doTableStuff(mVU_UPPER_FD_01_TABLE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVU_UPPER_FD_10(mF) { doTableStuff(mVU_UPPER_FD_10_TABLE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVU_UPPER_FD_10) { doTableStuff(mVU_UPPER_FD_10_TABLE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVU_UPPER_FD_11(mF) { doTableStuff(mVU_UPPER_FD_11_TABLE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVU_UPPER_FD_11) { doTableStuff(mVU_UPPER_FD_11_TABLE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVULowerOP(mF) { doTableStuff(mVULowerOP_OPCODE, (mVUgetCode & 0x3f)); } mVUop(mVULowerOP) { doTableStuff(mVULowerOP_OPCODE, (mVUgetCode & 0x3f)); }
microVUf(void) mVULowerOP_T3_00(mF) { doTableStuff(mVULowerOP_T3_00_OPCODE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVULowerOP_T3_00) { doTableStuff(mVULowerOP_T3_00_OPCODE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVULowerOP_T3_01(mF) { doTableStuff(mVULowerOP_T3_01_OPCODE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVULowerOP_T3_01) { doTableStuff(mVULowerOP_T3_01_OPCODE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVULowerOP_T3_10(mF) { doTableStuff(mVULowerOP_T3_10_OPCODE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVULowerOP_T3_10) { doTableStuff(mVULowerOP_T3_10_OPCODE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVULowerOP_T3_11(mF) { doTableStuff(mVULowerOP_T3_11_OPCODE, ((mVUgetCode >> 6) & 0x1f)); } mVUop(mVULowerOP_T3_11) { doTableStuff(mVULowerOP_T3_11_OPCODE, ((mVUgetCode >> 6) & 0x1f)); }
microVUf(void) mVUopU(mF) { doTableStuff(mVU_UPPER_OPCODE, (mVUgetCode & 0x3f)); } // Gets Upper Opcode mVUop(mVUopU) { doTableStuff(mVU_UPPER_OPCODE, (mVUgetCode & 0x3f)); } // Gets Upper Opcode
microVUf(void) mVUopL(mF) { doTableStuff(mVULOWER_OPCODE, (mVUgetCode >> 25)); } // Gets Lower Opcode mVUop(mVUopL) { doTableStuff(mVULOWER_OPCODE, (mVUgetCode >> 25)); } // Gets Lower Opcode
microVUf(void) mVUunknown(mF) { mVUop(mVUunknown) {
pass2 { SysPrintf("microVU%d: Unknown Micro VU opcode called (%x)\n", vuIndex, mVUgetCode); } pass2 { SysPrintf("microVU%d: Unknown Micro VU opcode called (%x)\n", getIndex, mVUgetCode); }
pass3 { mVUlog("Unknown", mVUgetCode); } pass3 { mVUlog("Unknown", mVUgetCode); }
} }

View File

@ -27,8 +27,7 @@
#define SHIFT_XYZW(gprReg) { if (_XYZW_SS && modXYZW && !_W) { SHL32ItoR(gprReg, ADD_XYZW); } } #define SHIFT_XYZW(gprReg) { if (_XYZW_SS && modXYZW && !_W) { SHL32ItoR(gprReg, ADD_XYZW); } }
// Note: If modXYZW is true, then it adjusts XYZW for Single Scalar operations // Note: If modXYZW is true, then it adjusts XYZW for Single Scalar operations
microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modXYZW) { microVUt(void) mVUupdateFlags(mV, int reg, int regT1, int regT2, int xyzw, bool modXYZW) {
microVU* mVU = mVUx;
int sReg, mReg = gprT1; int sReg, mReg = gprT1;
static u8 *pjmp, *pjmp2; static u8 *pjmp, *pjmp2;
static const u16 flipMask[16] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}; static const u16 flipMask[16] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15};
@ -40,7 +39,7 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
else { SSE2_PSHUFD_XMM_to_XMM(regT1, reg, 0x1B); } // Flip wzyx to xyzw else { SSE2_PSHUFD_XMM_to_XMM(regT1, reg, 0x1B); } // Flip wzyx to xyzw
if (doStatus) { if (doStatus) {
getFlagReg(sReg, fsInstance); // Set sReg to valid GPR by Cur Flag Instance getFlagReg(sReg, fsInstance); // Set sReg to valid GPR by Cur Flag Instance
mVUallocSFLAGa<vuIndex>(sReg, fpsInstance); // Get Prev Status Flag mVUallocSFLAGa(sReg, fpsInstance); // Get Prev Status Flag
AND32ItoR(sReg, 0xff0); // Keep Sticky and D/I flags AND32ItoR(sReg, 0xff0); // Keep Sticky and D/I flags
} }
@ -73,7 +72,7 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
if (_XYZW_SS && doStatus) x86SetJ8(pjmp2); // If we skipped the Zero Flag Checking, return here if (_XYZW_SS && doStatus) x86SetJ8(pjmp2); // If we skipped the Zero Flag Checking, return here
if (doMac) mVUallocMFLAGb<vuIndex>(mReg, fmInstance); // Set Mac Flag if (doMac) mVUallocMFLAGb(mVU, mReg, fmInstance); // Set Mac Flag
} }
//------------------------------------------------------------------ //------------------------------------------------------------------
@ -82,95 +81,88 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
// FMAC1 - Normal FMAC Opcodes // FMAC1 - Normal FMAC Opcodes
#define mVU_FMAC1(operation, OPname) { \ #define mVU_FMAC1(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, Fs, Ft; \ int Fd, Fs, Ft; \
mVUallocFMAC1a<vuIndex>(Fd, Fs, Ft); \ mVUallocFMAC1a(mVU, Fd, Fs, Ft); \
if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \
mVUallocFMAC1b<vuIndex>(Fd); \ mVUallocFMAC1b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \
} }
// FMAC3 - BC(xyzw) FMAC Opcodes // FMAC3 - BC(xyzw) FMAC Opcodes
#define mVU_FMAC3(operation, OPname) { \ #define mVU_FMAC3(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC3(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC3<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, Fs, Ft; \ int Fd, Fs, Ft; \
mVUallocFMAC3a<vuIndex>(Fd, Fs, Ft); \ mVUallocFMAC3a(mVU, Fd, Fs, Ft); \
if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \
mVUallocFMAC3b<vuIndex>(Fd); \ mVUallocFMAC3b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \
} }
// FMAC4 - FMAC Opcodes Storing Result to ACC // FMAC4 - FMAC Opcodes Storing Result to ACC
#define mVU_FMAC4(operation, OPname) { \ #define mVU_FMAC4(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int ACC, Fs, Ft; \ int ACC, Fs, Ft; \
mVUallocFMAC4a<vuIndex>(ACC, Fs, Ft); \ mVUallocFMAC4a(mVU, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC4b<vuIndex>(ACC, Fs); \ mVUallocFMAC4b(mVU, ACC, Fs); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \
} }
// FMAC5 - FMAC BC(xyzw) Opcodes Storing Result to ACC // FMAC5 - FMAC BC(xyzw) Opcodes Storing Result to ACC
#define mVU_FMAC5(operation, OPname) { \ #define mVU_FMAC5(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC3(mVU, 0, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC3<vuIndex>(0, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int ACC, Fs, Ft; \ int ACC, Fs, Ft; \
mVUallocFMAC5a<vuIndex>(ACC, Fs, Ft); \ mVUallocFMAC5a(mVU, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC5b<vuIndex>(ACC, Fs); \ mVUallocFMAC5b(mVU, ACC, Fs); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogBC(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogBC(); } \
} }
// FMAC6 - Normal FMAC Opcodes (I Reg) // FMAC6 - Normal FMAC Opcodes (I Reg)
#define mVU_FMAC6(operation, OPname) { \ #define mVU_FMAC6(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, Fs, Ft; \ int Fd, Fs, Ft; \
mVUallocFMAC6a<vuIndex>(Fd, Fs, Ft); \ mVUallocFMAC6a(mVU, Fd, Fs, Ft); \
if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \
mVUallocFMAC6b<vuIndex>(Fd); \ mVUallocFMAC6b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \
} }
// FMAC7 - FMAC Opcodes Storing Result to ACC (I Reg) // FMAC7 - FMAC Opcodes Storing Result to ACC (I Reg)
#define mVU_FMAC7(operation, OPname) { \ #define mVU_FMAC7(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, 0); } \
pass2 { \ pass2 { \
int ACC, Fs, Ft; \ int ACC, Fs, Ft; \
mVUallocFMAC7a<vuIndex>(ACC, Fs, Ft); \ mVUallocFMAC7a(mVU, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC7b<vuIndex>(ACC, Fs); \ mVUallocFMAC7b(mVU, ACC, Fs); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogI(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogI(); } \
} }
// FMAC8 - MADD FMAC Opcode Storing Result to Fd // FMAC8 - MADD FMAC Opcode Storing Result to Fd
#define mVU_FMAC8(operation, OPname) { \ #define mVU_FMAC8(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC8a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC8a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \
@ -179,18 +171,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC8b<vuIndex>(Fd); \ mVUallocFMAC8b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \
} }
// FMAC9 - MSUB FMAC Opcode Storing Result to Fd // FMAC9 - MSUB FMAC Opcode Storing Result to Fd
#define mVU_FMAC9(operation, OPname) { \ #define mVU_FMAC9(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC9a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC9a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \
@ -199,18 +190,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC9b<vuIndex>(Fd); \ mVUallocFMAC9b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \
} }
// FMAC10 - MADD FMAC BC(xyzw) Opcode Storing Result to Fd // FMAC10 - MADD FMAC BC(xyzw) Opcode Storing Result to Fd
#define mVU_FMAC10(operation, OPname) { \ #define mVU_FMAC10(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC3(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC3<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC10a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC10a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \
@ -219,18 +209,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC10b<vuIndex>(Fd); \ mVUallocFMAC10b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \
} }
// FMAC11 - MSUB FMAC BC(xyzw) Opcode Storing Result to Fd // FMAC11 - MSUB FMAC BC(xyzw) Opcode Storing Result to Fd
#define mVU_FMAC11(operation, OPname) { \ #define mVU_FMAC11(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC3(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC3<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC11a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC11a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \
@ -239,18 +228,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC11b<vuIndex>(Fd); \ mVUallocFMAC11b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogBC(); } \
} }
// FMAC12 - MADD FMAC Opcode Storing Result to Fd (I Reg) // FMAC12 - MADD FMAC Opcode Storing Result to Fd (I Reg)
#define mVU_FMAC12(operation, OPname) { \ #define mVU_FMAC12(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC12a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC12a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \
@ -259,18 +247,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC12b<vuIndex>(Fd); \ mVUallocFMAC12b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \
} }
// FMAC13 - MSUB FMAC Opcode Storing Result to Fd (I Reg) // FMAC13 - MSUB FMAC Opcode Storing Result to Fd (I Reg)
#define mVU_FMAC13(operation, OPname) { \ #define mVU_FMAC13(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC13a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC13a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \
@ -279,18 +266,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC13b<vuIndex>(Fd); \ mVUallocFMAC13b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogI(); } \
} }
// FMAC14 - MADDA/MSUBA FMAC Opcode // FMAC14 - MADDA/MSUBA FMAC Opcode
#define mVU_FMAC14(operation, OPname) { \ #define mVU_FMAC14(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int ACCw, ACCr, Fs, Ft; \ int ACCw, ACCr, Fs, Ft; \
mVUallocFMAC14a<vuIndex>(ACCw, ACCr, Fs, Ft); \ mVUallocFMAC14a(mVU, ACCw, ACCr, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \
@ -299,18 +285,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC14b<vuIndex>(ACCw, ACCr); \ mVUallocFMAC14b(mVU, ACCw, ACCr); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \
} }
// FMAC15 - MADDA/MSUBA BC(xyzw) FMAC Opcode // FMAC15 - MADDA/MSUBA BC(xyzw) FMAC Opcode
#define mVU_FMAC15(operation, OPname) { \ #define mVU_FMAC15(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC3(mVU, 0, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC3<vuIndex>(0, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int ACCw, ACCr, Fs, Ft; \ int ACCw, ACCr, Fs, Ft; \
mVUallocFMAC15a<vuIndex>(ACCw, ACCr, Fs, Ft); \ mVUallocFMAC15a(mVU, ACCw, ACCr, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \
@ -319,18 +304,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC15b<vuIndex>(ACCw, ACCr); \ mVUallocFMAC15b(mVU, ACCw, ACCr); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogBC(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogBC(); } \
} }
// FMAC16 - MADDA/MSUBA FMAC Opcode (I Reg) // FMAC16 - MADDA/MSUBA FMAC Opcode (I Reg)
#define mVU_FMAC16(operation, OPname) { \ #define mVU_FMAC16(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, 0); } \
pass2 { \ pass2 { \
int ACCw, ACCr, Fs, Ft; \ int ACCw, ACCr, Fs, Ft; \
mVUallocFMAC16a<vuIndex>(ACCw, ACCr, Fs, Ft); \ mVUallocFMAC16a(mVU, ACCw, ACCr, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \
@ -339,73 +323,68 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC16b<vuIndex>(ACCw, ACCr); \ mVUallocFMAC16b(mVU, ACCw, ACCr); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogI(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogI(); } \
} }
// FMAC18 - OPMULA FMAC Opcode // FMAC18 - OPMULA FMAC Opcode
#define mVU_FMAC18(operation, OPname) { \ #define mVU_FMAC18(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int ACC, Fs, Ft; \ int ACC, Fs, Ft; \
mVUallocFMAC18a<vuIndex>(ACC, Fs, Ft); \ mVUallocFMAC18a(mVU, ACC, Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC18b<vuIndex>(ACC, Fs); \ mVUallocFMAC18b(mVU, ACC, Fs); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogFt(); } \
} }
// FMAC19 - OPMSUB FMAC Opcode // FMAC19 - OPMSUB FMAC Opcode
#define mVU_FMAC19(operation, OPname) { \ #define mVU_FMAC19(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, _Ft_); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, _Ft_); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC19a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC19a(mVU, Fd, ACC, Fs, Ft); \
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \
mVUupdateFlags<vuIndex>(Fd, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC19b<vuIndex>(Fd); \ mVUallocFMAC19b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogFt(); } \
} }
// FMAC22 - Normal FMAC Opcodes (Q Reg) // FMAC22 - Normal FMAC Opcodes (Q Reg)
#define mVU_FMAC22(operation, OPname) { \ #define mVU_FMAC22(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, Fs, Ft; \ int Fd, Fs, Ft; \
mVUallocFMAC22a<vuIndex>(Fd, Fs, Ft); \ mVUallocFMAC22a(mVU, Fd, Fs, Ft); \
if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_XYZW_SS) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 1); \
mVUallocFMAC22b<vuIndex>(Fd); \ mVUallocFMAC22b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \
} }
// FMAC23 - FMAC Opcodes Storing Result to ACC (Q Reg) // FMAC23 - FMAC Opcodes Storing Result to ACC (Q Reg)
#define mVU_FMAC23(operation, OPname) { \ #define mVU_FMAC23(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, 0); } \
pass2 { \ pass2 { \
int ACC, Fs, Ft; \ int ACC, Fs, Ft; \
mVUallocFMAC23a<vuIndex>(ACC, Fs, Ft); \ mVUallocFMAC23a(mVU, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \ if (_X_Y_Z_W == 8) SSE_##operation##SS_XMM_to_XMM(Fs, Ft); \
else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \ else SSE_##operation##PS_XMM_to_XMM(Fs, Ft); \
mVUupdateFlags<vuIndex>(Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fs, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC23b<vuIndex>(ACC, Fs); \ mVUallocFMAC23b(mVU, ACC, Fs); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogQ();} \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogQ();} \
} }
// FMAC24 - MADD FMAC Opcode Storing Result to Fd (Q Reg) // FMAC24 - MADD FMAC Opcode Storing Result to Fd (Q Reg)
#define mVU_FMAC24(operation, OPname) { \ #define mVU_FMAC24(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC24a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC24a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##SS_XMM_to_XMM(Fs, ACC); \
@ -414,18 +393,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \ SSE_##operation##PS_XMM_to_XMM(Fs, ACC); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, xmmT1, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC24b<vuIndex>(Fd); \ mVUallocFMAC24b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \
} }
// FMAC25 - MSUB FMAC Opcode Storing Result to Fd (Q Reg) // FMAC25 - MSUB FMAC Opcode Storing Result to Fd (Q Reg)
#define mVU_FMAC25(operation, OPname) { \ #define mVU_FMAC25(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, _Fd_, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(_Fd_, _Fs_, 0); } \
pass2 { \ pass2 { \
int Fd, ACC, Fs, Ft; \ int Fd, ACC, Fs, Ft; \
mVUallocFMAC25a<vuIndex>(Fd, ACC, Fs, Ft); \ mVUallocFMAC25a(mVU, Fd, ACC, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACC, Fs); \
@ -434,18 +412,17 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACC, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(Fd, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, Fd, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC25b<vuIndex>(Fd); \ mVUallocFMAC25b(mVU, Fd); \
} \ } \
pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \ pass3 { mVUlog(OPname); mVUlogFd(); mVUlogQ(); } \
} }
// FMAC26 - MADDA/MSUBA FMAC Opcode (Q Reg) // FMAC26 - MADDA/MSUBA FMAC Opcode (Q Reg)
#define mVU_FMAC26(operation, OPname) { \ #define mVU_FMAC26(operation, OPname) { \
microVU* mVU = mVUx; \ pass1 { mVUanalyzeFMAC1(mVU, 0, _Fs_, 0); } \
pass1 { mVUanalyzeFMAC1<vuIndex>(0, _Fs_, 0); } \
pass2 { \ pass2 { \
int ACCw, ACCr, Fs, Ft; \ int ACCw, ACCr, Fs, Ft; \
mVUallocFMAC26a<vuIndex>(ACCw, ACCr, Fs, Ft); \ mVUallocFMAC26a(mVU, ACCw, ACCr, Fs, Ft); \
if (_X_Y_Z_W == 8) { \ if (_X_Y_Z_W == 8) { \
SSE_MULSS_XMM_to_XMM(Fs, Ft); \ SSE_MULSS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##SS_XMM_to_XMM(ACCr, Fs); \
@ -454,123 +431,121 @@ microVUt(void) mVUupdateFlags(int reg, int regT1, int regT2, int xyzw, bool modX
SSE_MULPS_XMM_to_XMM(Fs, Ft); \ SSE_MULPS_XMM_to_XMM(Fs, Ft); \
SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \ SSE_##operation##PS_XMM_to_XMM(ACCr, Fs); \
} \ } \
mVUupdateFlags<vuIndex>(ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \ mVUupdateFlags(mVU, ACCr, Fs, xmmT2, _X_Y_Z_W, 0); \
mVUallocFMAC26b<vuIndex>(ACCw, ACCr); \ mVUallocFMAC26b(mVU, ACCw, ACCr); \
} \ } \
pass3 { mVUlog(OPname); mVUlogACC(); mVUlogQ(); } \ pass3 { mVUlog(OPname); mVUlogACC(); mVUlogQ(); } \
} }
// FMAC27~29 - MAX/MINI FMAC Opcodes // FMAC27~29 - MAX/MINI FMAC Opcodes
#define mVU_FMAC27(operation, OPname) { mVU_FMAC1 (operation, OPname); pass1 { microVU* mVU = mVUx; mVUinfo &= ~_doStatus; } } #define mVU_FMAC27(operation, OPname) { mVU_FMAC1 (operation, OPname); pass1 { mVUinfo &= ~_doStatus; } }
#define mVU_FMAC28(operation, OPname) { mVU_FMAC6 (operation, OPname); pass1 { microVU* mVU = mVUx; mVUinfo &= ~_doStatus; } } #define mVU_FMAC28(operation, OPname) { mVU_FMAC6 (operation, OPname); pass1 { mVUinfo &= ~_doStatus; } }
#define mVU_FMAC29(operation, OPname) { mVU_FMAC3 (operation, OPname); pass1 { microVU* mVU = mVUx; mVUinfo &= ~_doStatus; } } #define mVU_FMAC29(operation, OPname) { mVU_FMAC3 (operation, OPname); pass1 { mVUinfo &= ~_doStatus; } }
//------------------------------------------------------------------ //------------------------------------------------------------------
// Micro VU Micromode Upper instructions // Micro VU Micromode Upper instructions
//------------------------------------------------------------------ //------------------------------------------------------------------
microVUf(void) mVU_ABS(mF) { mVUop(mVU_ABS) {
microVU* mVU = mVUx; pass1 { mVUanalyzeFMAC2(mVU, _Fs_, _Ft_); }
pass1 { mVUanalyzeFMAC2<vuIndex>(_Fs_, _Ft_); }
pass2 { pass2 {
int Fs, Ft; int Fs, Ft;
mVUallocFMAC2a<vuIndex>(Fs, Ft); mVUallocFMAC2a(mVU, Fs, Ft);
SSE_ANDPS_M128_to_XMM(Fs, (uptr)mVU_absclip); SSE_ANDPS_M128_to_XMM(Fs, (uptr)mVU_absclip);
mVUallocFMAC2b<vuIndex>(Ft); mVUallocFMAC2b(mVU, Ft);
} }
pass3 { mVUlog("ABS"); mVUlogFtFs(); } pass3 { mVUlog("ABS"); mVUlogFtFs(); }
} }
microVUf(void) mVU_ADD(mF) { mVU_FMAC1 (ADD, "ADD"); } mVUop(mVU_ADD) { mVU_FMAC1 (ADD, "ADD"); }
microVUf(void) mVU_ADDi(mF) { mVU_FMAC6 (ADD2, "ADDi"); } mVUop(mVU_ADDi) { mVU_FMAC6 (ADD2, "ADDi"); }
microVUf(void) mVU_ADDq(mF) { mVU_FMAC22(ADD, "ADDq"); } mVUop(mVU_ADDq) { mVU_FMAC22(ADD, "ADDq"); }
microVUf(void) mVU_ADDx(mF) { mVU_FMAC3 (ADD, "ADDx"); } mVUop(mVU_ADDx) { mVU_FMAC3 (ADD, "ADDx"); }
microVUf(void) mVU_ADDy(mF) { mVU_FMAC3 (ADD, "ADDy"); } mVUop(mVU_ADDy) { mVU_FMAC3 (ADD, "ADDy"); }
microVUf(void) mVU_ADDz(mF) { mVU_FMAC3 (ADD, "ADDz"); } mVUop(mVU_ADDz) { mVU_FMAC3 (ADD, "ADDz"); }
microVUf(void) mVU_ADDw(mF) { mVU_FMAC3 (ADD, "ADDw"); } mVUop(mVU_ADDw) { mVU_FMAC3 (ADD, "ADDw"); }
microVUf(void) mVU_ADDA(mF) { mVU_FMAC4 (ADD, "ADDA"); } mVUop(mVU_ADDA) { mVU_FMAC4 (ADD, "ADDA"); }
microVUf(void) mVU_ADDAi(mF) { mVU_FMAC7 (ADD, "ADDAi"); } mVUop(mVU_ADDAi) { mVU_FMAC7 (ADD, "ADDAi"); }
microVUf(void) mVU_ADDAq(mF) { mVU_FMAC23(ADD, "ADDAq"); } mVUop(mVU_ADDAq) { mVU_FMAC23(ADD, "ADDAq"); }
microVUf(void) mVU_ADDAx(mF) { mVU_FMAC5 (ADD, "ADDAx"); } mVUop(mVU_ADDAx) { mVU_FMAC5 (ADD, "ADDAx"); }
microVUf(void) mVU_ADDAy(mF) { mVU_FMAC5 (ADD, "ADDAy"); } mVUop(mVU_ADDAy) { mVU_FMAC5 (ADD, "ADDAy"); }
microVUf(void) mVU_ADDAz(mF) { mVU_FMAC5 (ADD, "ADDAz"); } mVUop(mVU_ADDAz) { mVU_FMAC5 (ADD, "ADDAz"); }
microVUf(void) mVU_ADDAw(mF) { mVU_FMAC5 (ADD, "ADDAw"); } mVUop(mVU_ADDAw) { mVU_FMAC5 (ADD, "ADDAw"); }
microVUf(void) mVU_SUB(mF) { mVU_FMAC1 (SUB, "SUB"); } mVUop(mVU_SUB) { mVU_FMAC1 (SUB, "SUB"); }
microVUf(void) mVU_SUBi(mF) { mVU_FMAC6 (SUB, "SUBi"); } mVUop(mVU_SUBi) { mVU_FMAC6 (SUB, "SUBi"); }
microVUf(void) mVU_SUBq(mF) { mVU_FMAC22(SUB, "SUBq"); } mVUop(mVU_SUBq) { mVU_FMAC22(SUB, "SUBq"); }
microVUf(void) mVU_SUBx(mF) { mVU_FMAC3 (SUB, "SUBx"); } mVUop(mVU_SUBx) { mVU_FMAC3 (SUB, "SUBx"); }
microVUf(void) mVU_SUBy(mF) { mVU_FMAC3 (SUB, "SUBy"); } mVUop(mVU_SUBy) { mVU_FMAC3 (SUB, "SUBy"); }
microVUf(void) mVU_SUBz(mF) { mVU_FMAC3 (SUB, "SUBz"); } mVUop(mVU_SUBz) { mVU_FMAC3 (SUB, "SUBz"); }
microVUf(void) mVU_SUBw(mF) { mVU_FMAC3 (SUB, "SUBw"); } mVUop(mVU_SUBw) { mVU_FMAC3 (SUB, "SUBw"); }
microVUf(void) mVU_SUBA(mF) { mVU_FMAC4 (SUB, "SUBA"); } mVUop(mVU_SUBA) { mVU_FMAC4 (SUB, "SUBA"); }
microVUf(void) mVU_SUBAi(mF) { mVU_FMAC7 (SUB, "SUBAi"); } mVUop(mVU_SUBAi) { mVU_FMAC7 (SUB, "SUBAi"); }
microVUf(void) mVU_SUBAq(mF) { mVU_FMAC23(SUB, "SUBAq"); } mVUop(mVU_SUBAq) { mVU_FMAC23(SUB, "SUBAq"); }
microVUf(void) mVU_SUBAx(mF) { mVU_FMAC5 (SUB, "SUBAx"); } mVUop(mVU_SUBAx) { mVU_FMAC5 (SUB, "SUBAx"); }
microVUf(void) mVU_SUBAy(mF) { mVU_FMAC5 (SUB, "SUBAy"); } mVUop(mVU_SUBAy) { mVU_FMAC5 (SUB, "SUBAy"); }
microVUf(void) mVU_SUBAz(mF) { mVU_FMAC5 (SUB, "SUBAz"); } mVUop(mVU_SUBAz) { mVU_FMAC5 (SUB, "SUBAz"); }
microVUf(void) mVU_SUBAw(mF) { mVU_FMAC5 (SUB, "SUBAw"); } mVUop(mVU_SUBAw) { mVU_FMAC5 (SUB, "SUBAw"); }
microVUf(void) mVU_MUL(mF) { mVU_FMAC1 (MUL, "MUL"); } mVUop(mVU_MUL) { mVU_FMAC1 (MUL, "MUL"); }
microVUf(void) mVU_MULi(mF) { mVU_FMAC6 (MUL, "MULi"); } mVUop(mVU_MULi) { mVU_FMAC6 (MUL, "MULi"); }
microVUf(void) mVU_MULq(mF) { mVU_FMAC22(MUL, "MULq"); } mVUop(mVU_MULq) { mVU_FMAC22(MUL, "MULq"); }
microVUf(void) mVU_MULx(mF) { mVU_FMAC3 (MUL, "MULx"); } mVUop(mVU_MULx) { mVU_FMAC3 (MUL, "MULx"); }
microVUf(void) mVU_MULy(mF) { mVU_FMAC3 (MUL, "MULy"); } mVUop(mVU_MULy) { mVU_FMAC3 (MUL, "MULy"); }
microVUf(void) mVU_MULz(mF) { mVU_FMAC3 (MUL, "MULz"); } mVUop(mVU_MULz) { mVU_FMAC3 (MUL, "MULz"); }
microVUf(void) mVU_MULw(mF) { mVU_FMAC3 (MUL, "MULw"); } mVUop(mVU_MULw) { mVU_FMAC3 (MUL, "MULw"); }
microVUf(void) mVU_MULA(mF) { mVU_FMAC4 (MUL, "MULA"); } mVUop(mVU_MULA) { mVU_FMAC4 (MUL, "MULA"); }
microVUf(void) mVU_MULAi(mF) { mVU_FMAC7 (MUL, "MULAi"); } mVUop(mVU_MULAi) { mVU_FMAC7 (MUL, "MULAi"); }
microVUf(void) mVU_MULAq(mF) { mVU_FMAC23(MUL, "MULAq"); } mVUop(mVU_MULAq) { mVU_FMAC23(MUL, "MULAq"); }
microVUf(void) mVU_MULAx(mF) { mVU_FMAC5 (MUL, "MULAx"); } mVUop(mVU_MULAx) { mVU_FMAC5 (MUL, "MULAx"); }
microVUf(void) mVU_MULAy(mF) { mVU_FMAC5 (MUL, "MULAy"); } mVUop(mVU_MULAy) { mVU_FMAC5 (MUL, "MULAy"); }
microVUf(void) mVU_MULAz(mF) { mVU_FMAC5 (MUL, "MULAz"); } mVUop(mVU_MULAz) { mVU_FMAC5 (MUL, "MULAz"); }
microVUf(void) mVU_MULAw(mF) { mVU_FMAC5 (MUL, "MULAw"); } mVUop(mVU_MULAw) { mVU_FMAC5 (MUL, "MULAw"); }
microVUf(void) mVU_MADD(mF) { mVU_FMAC8 (ADD, "MADD"); } mVUop(mVU_MADD) { mVU_FMAC8 (ADD, "MADD"); }
microVUf(void) mVU_MADDi(mF) { mVU_FMAC12(ADD, "MADDi"); } mVUop(mVU_MADDi) { mVU_FMAC12(ADD, "MADDi"); }
microVUf(void) mVU_MADDq(mF) { mVU_FMAC24(ADD, "MADDq"); } mVUop(mVU_MADDq) { mVU_FMAC24(ADD, "MADDq"); }
microVUf(void) mVU_MADDx(mF) { mVU_FMAC10(ADD, "MADDx"); } mVUop(mVU_MADDx) { mVU_FMAC10(ADD, "MADDx"); }
microVUf(void) mVU_MADDy(mF) { mVU_FMAC10(ADD, "MADDy"); } mVUop(mVU_MADDy) { mVU_FMAC10(ADD, "MADDy"); }
microVUf(void) mVU_MADDz(mF) { mVU_FMAC10(ADD, "MADDz"); } mVUop(mVU_MADDz) { mVU_FMAC10(ADD, "MADDz"); }
microVUf(void) mVU_MADDw(mF) { mVU_FMAC10(ADD, "MADDw"); } mVUop(mVU_MADDw) { mVU_FMAC10(ADD, "MADDw"); }
microVUf(void) mVU_MADDA(mF) { mVU_FMAC14(ADD, "MADDA"); } mVUop(mVU_MADDA) { mVU_FMAC14(ADD, "MADDA"); }
microVUf(void) mVU_MADDAi(mF) { mVU_FMAC16(ADD, "MADDAi"); } mVUop(mVU_MADDAi) { mVU_FMAC16(ADD, "MADDAi"); }
microVUf(void) mVU_MADDAq(mF) { mVU_FMAC26(ADD, "MADDAq"); } mVUop(mVU_MADDAq) { mVU_FMAC26(ADD, "MADDAq"); }
microVUf(void) mVU_MADDAx(mF) { mVU_FMAC15(ADD, "MADDAx"); } mVUop(mVU_MADDAx) { mVU_FMAC15(ADD, "MADDAx"); }
microVUf(void) mVU_MADDAy(mF) { mVU_FMAC15(ADD, "MADDAy"); } mVUop(mVU_MADDAy) { mVU_FMAC15(ADD, "MADDAy"); }
microVUf(void) mVU_MADDAz(mF) { mVU_FMAC15(ADD, "MADDAz"); } mVUop(mVU_MADDAz) { mVU_FMAC15(ADD, "MADDAz"); }
microVUf(void) mVU_MADDAw(mF) { mVU_FMAC15(ADD, "MADDAw"); } mVUop(mVU_MADDAw) { mVU_FMAC15(ADD, "MADDAw"); }
microVUf(void) mVU_MSUB(mF) { mVU_FMAC9 (SUB, "MSUB"); } mVUop(mVU_MSUB) { mVU_FMAC9 (SUB, "MSUB"); }
microVUf(void) mVU_MSUBi(mF) { mVU_FMAC13(SUB, "MSUBi"); } mVUop(mVU_MSUBi) { mVU_FMAC13(SUB, "MSUBi"); }
microVUf(void) mVU_MSUBq(mF) { mVU_FMAC25(SUB, "MSUBq"); } mVUop(mVU_MSUBq) { mVU_FMAC25(SUB, "MSUBq"); }
microVUf(void) mVU_MSUBx(mF) { mVU_FMAC11(SUB, "MSUBx"); } mVUop(mVU_MSUBx) { mVU_FMAC11(SUB, "MSUBx"); }
microVUf(void) mVU_MSUBy(mF) { mVU_FMAC11(SUB, "MSUBy"); } mVUop(mVU_MSUBy) { mVU_FMAC11(SUB, "MSUBy"); }
microVUf(void) mVU_MSUBz(mF) { mVU_FMAC11(SUB, "MSUBz"); } mVUop(mVU_MSUBz) { mVU_FMAC11(SUB, "MSUBz"); }
microVUf(void) mVU_MSUBw(mF) { mVU_FMAC11(SUB, "MSUBw"); } mVUop(mVU_MSUBw) { mVU_FMAC11(SUB, "MSUBw"); }
microVUf(void) mVU_MSUBA(mF) { mVU_FMAC14(SUB, "MSUBA"); } mVUop(mVU_MSUBA) { mVU_FMAC14(SUB, "MSUBA"); }
microVUf(void) mVU_MSUBAi(mF) { mVU_FMAC16(SUB, "MSUBAi"); } mVUop(mVU_MSUBAi) { mVU_FMAC16(SUB, "MSUBAi"); }
microVUf(void) mVU_MSUBAq(mF) { mVU_FMAC26(SUB, "MSUBAq"); } mVUop(mVU_MSUBAq) { mVU_FMAC26(SUB, "MSUBAq"); }
microVUf(void) mVU_MSUBAx(mF) { mVU_FMAC15(SUB, "MSUBAx"); } mVUop(mVU_MSUBAx) { mVU_FMAC15(SUB, "MSUBAx"); }
microVUf(void) mVU_MSUBAy(mF) { mVU_FMAC15(SUB, "MSUBAy"); } mVUop(mVU_MSUBAy) { mVU_FMAC15(SUB, "MSUBAy"); }
microVUf(void) mVU_MSUBAz(mF) { mVU_FMAC15(SUB, "MSUBAz"); } mVUop(mVU_MSUBAz) { mVU_FMAC15(SUB, "MSUBAz"); }
microVUf(void) mVU_MSUBAw(mF) { mVU_FMAC15(SUB, "MSUBAw"); } mVUop(mVU_MSUBAw) { mVU_FMAC15(SUB, "MSUBAw"); }
microVUf(void) mVU_MAX(mF) { mVU_FMAC27(MAX2, "MAX"); } mVUop(mVU_MAX) { mVU_FMAC27(MAX2, "MAX"); }
microVUf(void) mVU_MAXi(mF) { mVU_FMAC28(MAX2, "MAXi"); } mVUop(mVU_MAXi) { mVU_FMAC28(MAX2, "MAXi"); }
microVUf(void) mVU_MAXx(mF) { mVU_FMAC29(MAX2, "MAXx"); } mVUop(mVU_MAXx) { mVU_FMAC29(MAX2, "MAXx"); }
microVUf(void) mVU_MAXy(mF) { mVU_FMAC29(MAX2, "MAXy"); } mVUop(mVU_MAXy) { mVU_FMAC29(MAX2, "MAXy"); }
microVUf(void) mVU_MAXz(mF) { mVU_FMAC29(MAX2, "MAXz"); } mVUop(mVU_MAXz) { mVU_FMAC29(MAX2, "MAXz"); }
microVUf(void) mVU_MAXw(mF) { mVU_FMAC29(MAX2, "MAXw"); } mVUop(mVU_MAXw) { mVU_FMAC29(MAX2, "MAXw"); }
microVUf(void) mVU_MINI(mF) { mVU_FMAC27(MIN2, "MINI"); } mVUop(mVU_MINI) { mVU_FMAC27(MIN2, "MINI"); }
microVUf(void) mVU_MINIi(mF) { mVU_FMAC28(MIN2, "MINIi"); } mVUop(mVU_MINIi) { mVU_FMAC28(MIN2, "MINIi"); }
microVUf(void) mVU_MINIx(mF) { mVU_FMAC29(MIN2, "MINIx"); } mVUop(mVU_MINIx) { mVU_FMAC29(MIN2, "MINIx"); }
microVUf(void) mVU_MINIy(mF) { mVU_FMAC29(MIN2, "MINIy"); } mVUop(mVU_MINIy) { mVU_FMAC29(MIN2, "MINIy"); }
microVUf(void) mVU_MINIz(mF) { mVU_FMAC29(MIN2, "MINIz"); } mVUop(mVU_MINIz) { mVU_FMAC29(MIN2, "MINIz"); }
microVUf(void) mVU_MINIw(mF) { mVU_FMAC29(MIN2, "MINIw"); } mVUop(mVU_MINIw) { mVU_FMAC29(MIN2, "MINIw"); }
microVUf(void) mVU_OPMULA(mF) { mVU_FMAC18(MUL, "OPMULA"); } mVUop(mVU_OPMULA) { mVU_FMAC18(MUL, "OPMULA"); }
microVUf(void) mVU_OPMSUB(mF) { mVU_FMAC19(SUB, "OPMSUB"); } mVUop(mVU_OPMSUB) { mVU_FMAC19(SUB, "OPMSUB"); }
microVUf(void) mVU_NOP(mF) { pass3 { mVUlog("NOP"); } } mVUop(mVU_NOP) { pass3 { mVUlog("NOP"); } }
microVUq(void) mVU_FTOIx(uptr addr, int recPass) { void mVU_FTOIx(mP, uptr addr) {
microVU* mVU = mVUx; pass1 { mVUanalyzeFMAC2(mVU, _Fs_, _Ft_); }
pass1 { mVUanalyzeFMAC2<vuIndex>(_Fs_, _Ft_); }
pass2 { pass2 {
int Fs, Ft; int Fs, Ft;
mVUallocFMAC2a<vuIndex>(Fs, Ft); mVUallocFMAC2a(mVU, Fs, Ft);
// Note: For help understanding this algorithm see recVUMI_FTOI_Saturate() // Note: For help understanding this algorithm see recVUMI_FTOI_Saturate()
SSE_MOVAPS_XMM_to_XMM(xmmT1, Fs); SSE_MOVAPS_XMM_to_XMM(xmmT1, Fs);
@ -583,38 +558,36 @@ microVUq(void) mVU_FTOIx(uptr addr, int recPass) {
SSE_ANDPS_XMM_to_XMM(xmmT1, xmmFt); SSE_ANDPS_XMM_to_XMM(xmmT1, xmmFt);
SSE2_PADDD_XMM_to_XMM(Fs, xmmT1); SSE2_PADDD_XMM_to_XMM(Fs, xmmT1);
mVUallocFMAC2b<vuIndex>(Ft); mVUallocFMAC2b(mVU, Ft);
} }
} }
microVUf(void) mVU_FTOI0(mF) { mVU_FTOIx<vuIndex>((uptr)0, recPass); pass3 { microVU* mVU = mVUx; mVUlog("FTOI0"); mVUlogFtFs(); } } mVUop(mVU_FTOI0) { mVU_FTOIx(mX, (uptr)0); pass3 { mVUlog("FTOI0"); mVUlogFtFs(); } }
microVUf(void) mVU_FTOI4(mF) { mVU_FTOIx<vuIndex>((uptr)mVU_FTOI_4, recPass); pass3 { microVU* mVU = mVUx; mVUlog("FTOI4"); mVUlogFtFs(); } } mVUop(mVU_FTOI4) { mVU_FTOIx(mX, (uptr)mVU_FTOI_4); pass3 { mVUlog("FTOI4"); mVUlogFtFs(); } }
microVUf(void) mVU_FTOI12(mF) { mVU_FTOIx<vuIndex>((uptr)mVU_FTOI_12, recPass); pass3 { microVU* mVU = mVUx; mVUlog("FTOI12"); mVUlogFtFs(); } } mVUop(mVU_FTOI12) { mVU_FTOIx(mX, (uptr)mVU_FTOI_12); pass3 { mVUlog("FTOI12"); mVUlogFtFs(); } }
microVUf(void) mVU_FTOI15(mF) { mVU_FTOIx<vuIndex>((uptr)mVU_FTOI_15, recPass); pass3 { microVU* mVU = mVUx; mVUlog("FTOI15"); mVUlogFtFs(); } } mVUop(mVU_FTOI15) { mVU_FTOIx(mX, (uptr)mVU_FTOI_15); pass3 { mVUlog("FTOI15"); mVUlogFtFs(); } }
microVUq(void) mVU_ITOFx(uptr addr, int recPass) { void mVU_ITOFx(mP, uptr addr) {
microVU* mVU = mVUx; pass1 { mVUanalyzeFMAC2(mVU, _Fs_, _Ft_); }
pass1 { mVUanalyzeFMAC2<vuIndex>(_Fs_, _Ft_); }
pass2 { pass2 {
int Fs, Ft; int Fs, Ft;
mVUallocFMAC2a<vuIndex>(Fs, Ft); mVUallocFMAC2a(mVU, Fs, Ft);
SSE2_CVTDQ2PS_XMM_to_XMM(Ft, Fs); SSE2_CVTDQ2PS_XMM_to_XMM(Ft, Fs);
if (addr) { SSE_MULPS_M128_to_XMM(Ft, addr); } if (addr) { SSE_MULPS_M128_to_XMM(Ft, addr); }
//mVUclamp2(Ft, xmmT1, 15); // Clamp (not sure if this is needed) //mVUclamp2(Ft, xmmT1, 15); // Clamp (not sure if this is needed)
mVUallocFMAC2b<vuIndex>(Ft); mVUallocFMAC2b(mVU, Ft);
} }
} }
microVUf(void) mVU_ITOF0(mF) { mVU_ITOFx<vuIndex>((uptr)0, recPass); pass3 { microVU* mVU = mVUx; mVUlog("ITOF0"); mVUlogFtFs(); } } mVUop(mVU_ITOF0) { mVU_ITOFx(mX, (uptr)0); pass3 { mVUlog("ITOF0"); mVUlogFtFs(); } }
microVUf(void) mVU_ITOF4(mF) { mVU_ITOFx<vuIndex>((uptr)mVU_ITOF_4, recPass); pass3 { microVU* mVU = mVUx; mVUlog("ITOF4"); mVUlogFtFs(); } } mVUop(mVU_ITOF4) { mVU_ITOFx(mX, (uptr)mVU_ITOF_4); pass3 { mVUlog("ITOF4"); mVUlogFtFs(); } }
microVUf(void) mVU_ITOF12(mF) { mVU_ITOFx<vuIndex>((uptr)mVU_ITOF_12, recPass); pass3 { microVU* mVU = mVUx; mVUlog("ITOF12"); mVUlogFtFs(); } } mVUop(mVU_ITOF12) { mVU_ITOFx(mX, (uptr)mVU_ITOF_12); pass3 { mVUlog("ITOF12"); mVUlogFtFs(); } }
microVUf(void) mVU_ITOF15(mF) { mVU_ITOFx<vuIndex>((uptr)mVU_ITOF_15, recPass); pass3 { microVU* mVU = mVUx; mVUlog("ITOF15"); mVUlogFtFs(); } } mVUop(mVU_ITOF15) { mVU_ITOFx(mX, (uptr)mVU_ITOF_15); pass3 { mVUlog("ITOF15"); mVUlogFtFs(); } }
microVUf(void) mVU_CLIP(mF) { mVUop(mVU_CLIP) {
microVU* mVU = mVUx; pass1 { mVUanalyzeFMAC4(mVU, _Fs_, _Ft_); }
pass1 { mVUanalyzeFMAC4<vuIndex>(_Fs_, _Ft_); }
pass2 { pass2 {
int Fs, Ft; int Fs, Ft;
mVUallocFMAC17a<vuIndex>(Fs, Ft); mVUallocFMAC17a(mVU, Fs, Ft);
mVUallocCFLAGa<vuIndex>(gprT1, fpcInstance); mVUallocCFLAGa(mVU, gprT1, fpcInstance);
SHL32ItoR(gprT1, 6); SHL32ItoR(gprT1, 6);
SSE_ANDPS_M128_to_XMM(Ft, (uptr)mVU_absclip); SSE_ANDPS_M128_to_XMM(Ft, (uptr)mVU_absclip);
@ -638,7 +611,7 @@ microVUf(void) mVU_CLIP(mF) {
OR32RtoR (gprT1, gprT2); OR32RtoR (gprT1, gprT2);
AND32ItoR(gprT1, 0xffffff); AND32ItoR(gprT1, 0xffffff);
mVUallocCFLAGb<vuIndex>(gprT1, fcInstance); mVUallocCFLAGb(mVU, gprT1, fcInstance);
} }
pass3 { mVUlog("CLIP"); mVUlogCLIP(); } pass3 { mVUlog("CLIP"); mVUlogCLIP(); }
} }