mirror of https://github.com/PCSX2/pcsx2.git
renamed iCP0 to iCOP0 to be consistent with the other COP files. very minor VU micro work.
git-svn-id: http://pcsx2-playground.googlecode.com/svn/trunk@607 a6443dda-0b58-4228-96e9-037be469359c
This commit is contained in:
parent
1098253df7
commit
70854f099e
|
@ -1251,7 +1251,7 @@ v0.9.1:
|
|||
* 18/12/03:[*] Improvements to TLB code.
|
||||
(linuzappz)
|
||||
|
||||
* 17/12/03:[-] Fixed iCP0.h define to __ICP0__
|
||||
* 17/12/03:[-] Fixed iCOP0.h define to __iCOP0__
|
||||
[+] Added disR5900GetUpperSym
|
||||
(linuzappz)
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "x86/iR5900LoadStore.h"
|
||||
#include "x86/iR5900Move.h"
|
||||
#include "x86/iMMI.h"
|
||||
#include "x86/iCP0.h"
|
||||
#include "x86/iCOP0.h"
|
||||
#include "x86/iFPU.h"
|
||||
|
||||
namespace R5900
|
||||
|
|
|
@ -2283,6 +2283,14 @@
|
|||
RelativePath="..\..\x86\ix86-32\aVif_proc-32.asm"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iCOP0.cpp"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iCOP0.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iCOP2.cpp"
|
||||
>
|
||||
|
@ -2295,14 +2303,6 @@
|
|||
RelativePath="..\..\x86\iCore.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iCP0.cpp"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iCP0.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\x86\iFPU.cpp"
|
||||
>
|
||||
|
@ -3006,10 +3006,6 @@
|
|||
RelativePath="..\ps2_silver.bmp"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\ps2_silver.jpg"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\Stats.cpp"
|
||||
>
|
||||
|
|
|
@ -1,445 +0,0 @@
|
|||
/* Pcsx2 - Pc Ps2 Emulator
|
||||
* Copyright (C) 2002-2008 Pcsx2 Team
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
// Important Note to Future Developers:
|
||||
// None of the COP0 instructions are really critical performance items,
|
||||
// so don't waste time converting any more them into recompiled code
|
||||
// unless it can make them nicely compact. Calling the C versions will
|
||||
// suffice.
|
||||
|
||||
#include "PrecompiledHeader.h"
|
||||
|
||||
#include "Common.h"
|
||||
#include "InterTables.h"
|
||||
#include "ix86/ix86.h"
|
||||
#include "iR5900.h"
|
||||
#include "iCP0.h"
|
||||
|
||||
namespace Interp = R5900::Interpreter::OpcodeImpl::COP0;
|
||||
|
||||
namespace Dynarec {
|
||||
namespace R5900 {
|
||||
|
||||
// R5900 branch hepler!
|
||||
// Recompiles code for a branch test and/or skip, complete with delay slot
|
||||
// handling. Note, for "likely" branches use iDoBranchImm_Likely instead, which
|
||||
// handles delay slots differently.
|
||||
// Parameters:
|
||||
// jmpSkip - This parameter is the result of the appropriate J32 instruction
|
||||
// (usually JZ32 or JNZ32).
|
||||
static void recDoBranchImm( u32* jmpSkip, bool isLikely = false )
|
||||
{
|
||||
// All R5900 branches use this format:
|
||||
const u32 branchTo = (s32)_Imm_ * 4 + pc;
|
||||
|
||||
// First up is the Branch Taken Path : Save the recompiler's state, compile the
|
||||
// DelaySlot, and issue a BranchTest insertion. The state is reloaded below for
|
||||
// the "did not branch" path (maintains consts, register allocations, and other optimizations).
|
||||
|
||||
SaveBranchState();
|
||||
recompileNextInstruction(1);
|
||||
SetBranchImm(branchTo);
|
||||
|
||||
// Jump target when the branch is *not* taken, skips the branchtest code
|
||||
// insertion above.
|
||||
x86SetJ32(jmpSkip);
|
||||
|
||||
// if it's a likely branch then we'll need to skip the delay slot here, since
|
||||
// MIPS cancels the delay slot instruction when branches aren't taken.
|
||||
if( !isLikely ) pc -= 4; // instruction rewinde for delay slot ,if non-likely.
|
||||
LoadBranchState();
|
||||
recompileNextInstruction(1);
|
||||
|
||||
SetBranchImm(pc);
|
||||
}
|
||||
|
||||
static void recDoBranchImm_Likely( u32* jmpSkip )
|
||||
{
|
||||
recDoBranchImm( jmpSkip, true );
|
||||
}
|
||||
|
||||
namespace OpcodeImpl {
|
||||
namespace COP0 {
|
||||
|
||||
/*********************************************************
|
||||
* COP0 opcodes *
|
||||
* *
|
||||
*********************************************************/
|
||||
|
||||
// emits "setup" code for a COP0 branch test. The instruction immediately following
|
||||
// this should be a conditional Jump -- JZ or JNZ normally.
|
||||
static void _setupBranchTest()
|
||||
{
|
||||
_eeFlushAllUnused();
|
||||
|
||||
// COP0 branch conditionals are based on the following equation:
|
||||
// (((psHu16(DMAC_STAT) & psHu16(DMAC_PCR)) & 0x3ff) == (psHu16(DMAC_PCR) & 0x3ff))
|
||||
// BC0F checks if the statement is false, BC0T checks if the statement is true.
|
||||
|
||||
// note: We only want to compare the 16 bit values of DMAC_STAT and PCR.
|
||||
// But using 32-bit loads here is ok (and faster), because we mask off
|
||||
// everything except the lower 10 bits away.
|
||||
|
||||
MOV32MtoR( EAX, (uptr)&psHu32(DMAC_STAT) );
|
||||
MOV32MtoR( ECX, (uptr)&psHu32(DMAC_PCR) );
|
||||
AND32ItoR( EAX, 0x3ff ); // masks off all but lower 10 bits.
|
||||
AND32ItoR( ECX, 0x3ff );
|
||||
CMP32RtoR( EAX, ECX );
|
||||
}
|
||||
|
||||
void recBC0F()
|
||||
{
|
||||
_setupBranchTest();
|
||||
recDoBranchImm(JNZ32(0));
|
||||
}
|
||||
|
||||
void recBC0T()
|
||||
{
|
||||
_setupBranchTest();
|
||||
recDoBranchImm(JZ32(0));
|
||||
}
|
||||
|
||||
void recBC0FL()
|
||||
{
|
||||
_setupBranchTest();
|
||||
recDoBranchImm_Likely(JNZ32(0));
|
||||
}
|
||||
|
||||
void recBC0TL()
|
||||
{
|
||||
_setupBranchTest();
|
||||
recDoBranchImm_Likely(JZ32(0));
|
||||
}
|
||||
|
||||
void recTLBR() { recCall( Interp::TLBR, -1 ); }
|
||||
void recTLBP() { recCall( Interp::TLBP, -1 ); }
|
||||
void recTLBWI() { recCall( Interp::TLBWI, -1 ); }
|
||||
void recTLBWR() { recCall( Interp::TLBWR, -1 ); }
|
||||
|
||||
void recERET()
|
||||
{
|
||||
recBranchCall( Interp::ERET );
|
||||
}
|
||||
|
||||
void recEI()
|
||||
{
|
||||
// must branch after enabling interrupts, so that anything
|
||||
// pending gets triggered properly.
|
||||
recBranchCall( Interp::EI );
|
||||
}
|
||||
|
||||
void recDI()
|
||||
{
|
||||
// No need to branch after disabling interrupts...
|
||||
|
||||
iFlushCall(0);
|
||||
|
||||
MOV32MtoR( EAX, (uptr)&cpuRegs.cycle );
|
||||
MOV32RtoM( (uptr)&g_nextBranchCycle, EAX );
|
||||
|
||||
CALLFunc( (uptr)Interp::DI );
|
||||
}
|
||||
|
||||
|
||||
#ifndef CP0_RECOMPILE
|
||||
|
||||
REC_SYS( MFC0 );
|
||||
REC_SYS( MTC0 );
|
||||
|
||||
#else
|
||||
|
||||
void recMFC0( void )
|
||||
{
|
||||
int mmreg;
|
||||
|
||||
if ( ! _Rt_ ) return;
|
||||
|
||||
if( _Rd_ == 9 ) {
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
MOV32RtoR(EAX,ECX);
|
||||
SUB32MtoR(EAX, (uptr)&s_iLastCOP0Cycle);
|
||||
ADD32RtoM((uptr)&cpuRegs.CP0.n.Count, EAX);
|
||||
MOV32RtoM((uptr)&s_iLastCOP0Cycle, ECX);
|
||||
MOV32MtoR( EAX, (uptr)&cpuRegs.CP0.r[ _Rd_ ] );
|
||||
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0],EAX);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
CDQ();
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
|
||||
}
|
||||
else EEINST_RESETHASLIVE1(_Rt_);
|
||||
return;
|
||||
}
|
||||
if( _Rd_ == 25 ) {
|
||||
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
switch(_Imm_ & 0x3F){
|
||||
case 0:
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pccr);
|
||||
|
||||
break;
|
||||
case 1:
|
||||
// check if needs to be incremented
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.PERF.n.pccr);
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pcr0);
|
||||
AND32ItoR(ECX, 0x800003E0);
|
||||
|
||||
CMP32ItoR(ECX, 0x80000020);
|
||||
j8Ptr[0] = JNE8(0);
|
||||
|
||||
MOV32MtoR(EDX, (uptr)&cpuRegs.cycle);
|
||||
SUB32MtoR(EAX, (uptr)&s_iLastPERFCycle[0]);
|
||||
ADD32RtoR(EAX, EDX);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[0], EDX);
|
||||
MOV32RtoM((uptr)&cpuRegs.PERF.n.pcr0, EAX);
|
||||
|
||||
x86SetJ8(j8Ptr[0]);
|
||||
break;
|
||||
case 3:
|
||||
// check if needs to be incremented
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.PERF.n.pccr);
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pcr1);
|
||||
AND32ItoR(ECX, 0x800F8000);
|
||||
|
||||
CMP32ItoR(ECX, 0x80008000);
|
||||
j8Ptr[0] = JNE8(0);
|
||||
|
||||
MOV32MtoR(EDX, (uptr)&cpuRegs.cycle);
|
||||
SUB32MtoR(EAX, (uptr)&s_iLastPERFCycle[1]);
|
||||
ADD32RtoR(EAX, EDX);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[1], EDX);
|
||||
MOV32RtoM((uptr)&cpuRegs.PERF.n.pcr1, EAX);
|
||||
|
||||
x86SetJ8(j8Ptr[0]);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0],EAX);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
CDQ();
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
|
||||
}
|
||||
else EEINST_RESETHASLIVE1(_Rt_);
|
||||
|
||||
#ifdef PCSX2_DEVBUILD
|
||||
COP0_LOG("MFC0 PCCR = %x PCR0 = %x PCR1 = %x IMM= %x\n",
|
||||
cpuRegs.PERF.n.pccr, cpuRegs.PERF.n.pcr0, cpuRegs.PERF.n.pcr1, _Imm_ & 0x3F);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
else if( _Rd_ == 24){
|
||||
COP0_LOG("MFC0 Breakpoint debug Registers code = %x\n", cpuRegs.code & 0x3FF);
|
||||
return;
|
||||
}
|
||||
_eeOnWriteReg(_Rt_, 1);
|
||||
|
||||
if( EEINST_ISLIVE1(_Rt_) ) {
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.CP0.r[ _Rd_ ]);
|
||||
CDQ();
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0], EAX);
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
|
||||
if( (mmreg = _allocCheckGPRtoMMX(g_pCurInstInfo, _Rt_, MODE_WRITE)) >= 0 ) {
|
||||
MOVDMtoMMX(mmreg, (uptr)&cpuRegs.CP0.r[ _Rd_ ]);
|
||||
SetMMXstate();
|
||||
}
|
||||
else if( (mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ)) >= 0) {
|
||||
|
||||
if( EEINST_ISLIVE2(_Rt_) ) {
|
||||
if( xmmregs[mmreg].mode & MODE_WRITE ) {
|
||||
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rt_].UL[2], mmreg);
|
||||
}
|
||||
xmmregs[mmreg].inuse = 0;
|
||||
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.CP0.r[ _Rd_ ]);
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0],EAX);
|
||||
}
|
||||
else {
|
||||
SSE_MOVLPS_M64_to_XMM(mmreg, (uptr)&cpuRegs.CP0.r[ _Rd_ ]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.CP0.r[ _Rd_ ]);
|
||||
if(_Rd_ == 12) AND32ItoR(EAX, 0xf0c79c1f);
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0],EAX);
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
CDQ();
|
||||
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void updatePCCR()
|
||||
{
|
||||
// read the old pccr and update pcr0/1
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pccr);
|
||||
MOV32RtoR(EDX, EAX);
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
|
||||
AND32ItoR(EAX, 0x800003E0);
|
||||
CMP32ItoR(EAX, 0x80000020);
|
||||
j8Ptr[0] = JNE8(0);
|
||||
MOV32MtoR(EAX, (uptr)&s_iLastPERFCycle[0]);
|
||||
ADD32RtoM((uptr)&cpuRegs.PERF.n.pcr0, ECX);
|
||||
SUB32RtoM((uptr)&cpuRegs.PERF.n.pcr0, EAX);
|
||||
x86SetJ8(j8Ptr[0]);
|
||||
|
||||
AND32ItoR(EDX, 0x800F8000);
|
||||
CMP32ItoR(EDX, 0x80008000);
|
||||
j8Ptr[0] = JNE8(0);
|
||||
MOV32MtoR(EAX, (uptr)&s_iLastPERFCycle[1]);
|
||||
ADD32RtoM((uptr)&cpuRegs.PERF.n.pcr1, ECX);
|
||||
SUB32RtoM((uptr)&cpuRegs.PERF.n.pcr1, EAX);
|
||||
x86SetJ8(j8Ptr[0]);
|
||||
}
|
||||
|
||||
void recMTC0()
|
||||
{
|
||||
if( GPR_IS_CONST1(_Rt_) ) {
|
||||
switch (_Rd_) {
|
||||
case 12:
|
||||
iFlushCall(FLUSH_NODESTROY);
|
||||
//_flushCachedRegs(); //NOTE: necessary?
|
||||
_callFunctionArg1((uptr)WriteCP0Status, MEM_CONSTTAG, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
break;
|
||||
case 9:
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
MOV32RtoM((uptr)&s_iLastCOP0Cycle, ECX);
|
||||
MOV32ItoM((uptr)&cpuRegs.CP0.r[9], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
break;
|
||||
case 25:
|
||||
COP0_LOG("MTC0 PCCR = %x PCR0 = %x PCR1 = %x IMM= %x\n",
|
||||
cpuRegs.PERF.n.pccr, cpuRegs.PERF.n.pcr0, cpuRegs.PERF.n.pcr1, _Imm_ & 0x3F);
|
||||
switch(_Imm_ & 0x3F){
|
||||
case 0:
|
||||
|
||||
updatePCCR();
|
||||
MOV32ItoM((uptr)&cpuRegs.PERF.n.pccr, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
|
||||
// update the cycles
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[0], ECX);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[1], ECX);
|
||||
break;
|
||||
case 1:
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.cycle);
|
||||
MOV32ItoM((uptr)&cpuRegs.PERF.n.pcr0, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[0], EAX);
|
||||
break;
|
||||
case 3:
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.cycle);
|
||||
MOV32ItoM((uptr)&cpuRegs.PERF.n.pcr1, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[1], EAX);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
COP0_LOG("MTC0 Breakpoint debug Registers code = %x\n", cpuRegs.code & 0x3FF);
|
||||
break;
|
||||
default:
|
||||
MOV32ItoM((uptr)&cpuRegs.CP0.r[_Rd_], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
switch (_Rd_) {
|
||||
case 12:
|
||||
iFlushCall(FLUSH_NODESTROY);
|
||||
//_flushCachedRegs(); //NOTE: necessary?
|
||||
_callFunctionArg1((uptr)WriteCP0Status, MEM_GPRTAG|_Rt_, 0);
|
||||
break;
|
||||
case 9:
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.CP0.r[9], _Rt_);
|
||||
MOV32RtoM((uptr)&s_iLastCOP0Cycle, ECX);
|
||||
break;
|
||||
case 25:
|
||||
COP0_LOG("MTC0 PCCR = %x PCR0 = %x PCR1 = %x IMM= %x\n",
|
||||
cpuRegs.PERF.n.pccr, cpuRegs.PERF.n.pcr0, cpuRegs.PERF.n.pcr1, _Imm_ & 0x3F);
|
||||
switch(_Imm_ & 0x3F){
|
||||
case 0:
|
||||
updatePCCR();
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pccr, _Rt_);
|
||||
|
||||
// update the cycles
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[0], ECX);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[1], ECX);
|
||||
break;
|
||||
case 1:
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr0, _Rt_);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[0], ECX);
|
||||
break;
|
||||
case 3:
|
||||
MOV32MtoR(ECX, (uptr)&cpuRegs.cycle);
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr1, _Rt_);
|
||||
MOV32RtoM((uptr)&s_iLastPERFCycle[1], ECX);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
COP0_LOG("MTC0 Breakpoint debug Registers code = %x\n", cpuRegs.code & 0x3FF);
|
||||
break;
|
||||
default:
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.CP0.r[_Rd_], _Rt_);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*void rec(COP0) {
|
||||
}
|
||||
|
||||
void rec(BC0F) {
|
||||
}
|
||||
|
||||
void rec(BC0T) {
|
||||
}
|
||||
|
||||
void rec(BC0FL) {
|
||||
}
|
||||
|
||||
void rec(BC0TL) {
|
||||
}
|
||||
|
||||
void rec(TLBR) {
|
||||
}
|
||||
|
||||
void rec(TLBWI) {
|
||||
}
|
||||
|
||||
void rec(TLBWR) {
|
||||
}
|
||||
|
||||
void rec(TLBP) {
|
||||
}*/
|
||||
|
||||
}}}}
|
|
@ -1,47 +0,0 @@
|
|||
/* Pcsx2 - Pc Ps2 Emulator
|
||||
* Copyright (C) 2002-2008 Pcsx2 Team
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef __ICP0_H__
|
||||
#define __ICP0_H__
|
||||
|
||||
/*********************************************************
|
||||
* COP0 opcodes *
|
||||
* *
|
||||
*********************************************************/
|
||||
|
||||
namespace Dynarec {
|
||||
namespace R5900 {
|
||||
namespace OpcodeImpl {
|
||||
namespace COP0
|
||||
{
|
||||
void recMFC0( void );
|
||||
void recMTC0( void );
|
||||
void recBC0F( void );
|
||||
void recBC0T( void );
|
||||
void recBC0FL( void );
|
||||
void recBC0TL( void );
|
||||
void recTLBR( void );
|
||||
void recTLBWI( void );
|
||||
void recTLBWR( void );
|
||||
void recTLBP( void );
|
||||
void recERET( void );
|
||||
void recDI( void );
|
||||
void recEI( void );
|
||||
|
||||
}}}}
|
||||
#endif
|
|
@ -25,7 +25,7 @@
|
|||
#include "iR5900.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
#include "VUmicro.h"
|
||||
#include "VUflags.h"
|
||||
#include "iVUmicro.h"
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "iR5900.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
#include "VUmicro.h"
|
||||
#include "VUflags.h"
|
||||
#include "iVUmicro.h"
|
||||
|
@ -705,13 +705,13 @@ int recVUTransformAddr(int x86reg, VURegs* VU, int vireg, int imm)
|
|||
void recVUMI_LQ(VURegs *VU, int info)
|
||||
{
|
||||
s16 imm;
|
||||
|
||||
if ( _Ft_ == 0 ) return;
|
||||
//SysPrintf("recVUMI_LQ \n");
|
||||
imm = (VU->code & 0x400) ? (VU->code & 0x3ff) | 0xfc00 : (VU->code & 0x3ff);
|
||||
if (_Fs_ == 0) {
|
||||
_loadEAX(VU, -1, (uptr)GET_VU_MEM(VU, (u32)imm*16), info);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
int fsreg = ALLOCVI(_Fs_, MODE_READ);
|
||||
_loadEAX(VU, recVUTransformAddr(fsreg, VU, _Fs_, imm), (uptr)VU->Mem, info);
|
||||
}
|
||||
|
@ -777,58 +777,49 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
{
|
||||
assert( offset < 0x80000000 );
|
||||
|
||||
if( _Fs_ == 0 ) {
|
||||
if( _XYZW_SS ) {
|
||||
if ( _Fs_ == 0 ) {
|
||||
if ( _XYZW_SS ) {
|
||||
u32 c = _W ? 0x3f800000 : 0;
|
||||
if( x86reg >= 0 ) MOV32ItoRmOffset(x86reg, c, offset+(_W?12:(_Z?8:(_Y?4:0))));
|
||||
if ( x86reg >= 0 ) MOV32ItoRmOffset(x86reg, c, offset+(_W?12:(_Z?8:(_Y?4:0))));
|
||||
else MOV32ItoM(offset+(_W?12:(_Z?8:(_Y?4:0))), c);
|
||||
}
|
||||
else {
|
||||
int zeroreg = (x86reg == EAX) ? ALLOCTEMPX86(0) : EAX;
|
||||
|
||||
XOR32RtoR(zeroreg, zeroreg);
|
||||
if( x86reg >= 0 ) {
|
||||
if( _X ) MOV32RtoRmOffset(x86reg, zeroreg, offset);
|
||||
if( _Y ) MOV32RtoRmOffset(x86reg, zeroreg, offset+4);
|
||||
if( _Z ) MOV32RtoRmOffset(x86reg, zeroreg, offset+8);
|
||||
if( _W ) MOV32ItoRmOffset(x86reg, 0x3f800000, offset+12);
|
||||
if ( x86reg >= 0 ) {
|
||||
if ( _X ) MOV32ItoRmOffset(x86reg, 0x00000000, offset);
|
||||
if ( _Y ) MOV32ItoRmOffset(x86reg, 0x00000000, offset+4);
|
||||
if ( _Z ) MOV32ItoRmOffset(x86reg, 0x00000000, offset+8);
|
||||
if ( _W ) MOV32ItoRmOffset(x86reg, 0x3f800000, offset+12);
|
||||
}
|
||||
else {
|
||||
if( _X ) MOV32RtoM(offset, zeroreg);
|
||||
if( _Y ) MOV32RtoM(offset+4, zeroreg);
|
||||
if( _Z ) MOV32RtoM(offset+8, zeroreg);
|
||||
if( _W ) MOV32ItoM(offset+12, 0x3f800000);
|
||||
if ( _X ) MOV32ItoM(offset, 0x00000000);
|
||||
if ( _Y ) MOV32ItoM(offset+4, 0x00000000);
|
||||
if ( _Z ) MOV32ItoM(offset+8, 0x00000000);
|
||||
if ( _W ) MOV32ItoM(offset+12, 0x3f800000);
|
||||
}
|
||||
|
||||
if( zeroreg != EAX ) _freeX86reg(zeroreg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
switch(_X_Y_Z_W) {
|
||||
case 1: // W
|
||||
//SysPrintf("SAVE EAX W\n");
|
||||
switch ( _X_Y_Z_W ) {
|
||||
case 1: // W*
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x27);
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset+12);
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset+12);
|
||||
else SSE_MOVSS_XMM_to_M32(offset+12, EEREC_S);
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x27);
|
||||
break;
|
||||
case 2: // Z
|
||||
//SysPrintf("SAVE EAX Z\n");
|
||||
case 2: // Z*
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+8);
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+8);
|
||||
else SSE_MOVSS_XMM_to_M32(offset+8, EEREC_TEMP);
|
||||
break;
|
||||
case 3: // ZW
|
||||
//SysPrintf("SAVE EAX ZW\n");
|
||||
if( x86reg >= 0 ) SSE_MOVHPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+8);
|
||||
case 3: // ZW*
|
||||
if ( x86reg >= 0 ) SSE_MOVHPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+8);
|
||||
else SSE_MOVHPS_XMM_to_M64(offset+8, EEREC_S);
|
||||
break;
|
||||
case 5: // YW
|
||||
//SysPrintf("SAVE EAX YW\n");
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB1);
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) {
|
||||
if ( x86reg >= 0 ) {
|
||||
SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+12);
|
||||
}
|
||||
|
@ -838,25 +829,22 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
}
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB1);
|
||||
break;
|
||||
case 4: // Y
|
||||
//SysPrintf("SAVE EAX Y\n");
|
||||
case 4: // Y*
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xe1);
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
else SSE_MOVSS_XMM_to_M32(offset+4, EEREC_S);
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xe1);
|
||||
break;
|
||||
case 6: // YZ
|
||||
//SysPrintf("SAVE EAX YZ\n");
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xc9);
|
||||
if( x86reg >= 0 ) SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
if ( x86reg >= 0 ) SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
else SSE_MOVLPS_XMM_to_M64(offset+4, EEREC_S);
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xd2);
|
||||
break;
|
||||
case 7: // YZW
|
||||
//SysPrintf("SAVE EAX YZW\n");
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x39);
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) {
|
||||
if ( x86reg >= 0 ) {
|
||||
SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+4);
|
||||
SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+12);
|
||||
}
|
||||
|
@ -866,35 +854,33 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
}
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x93);
|
||||
break;
|
||||
case 8: // X
|
||||
//SysPrintf("SAVE EAX X\n");
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset);
|
||||
case 8: // X*
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset);
|
||||
else SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
|
||||
break;
|
||||
case 9: // XW
|
||||
//SysPrintf("SAVE EAX XW\n");
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset);
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_S, offset);
|
||||
else SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
|
||||
|
||||
if( cpucaps.hasStreamingSIMD3Extensions ) SSE3_MOVSLDUP_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
|
||||
if ( cpucaps.hasStreamingSIMD3Extensions ) SSE3_MOVSLDUP_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
|
||||
else SSE_SHUFPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP, 0x55);
|
||||
|
||||
if( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+12);
|
||||
if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+12);
|
||||
else SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
|
||||
|
||||
break;
|
||||
//case 10: break;
|
||||
//case 11: break;
|
||||
case 12: // XY
|
||||
//SysPrintf("SAVE EAX XY\n");
|
||||
if( x86reg >= 0 ) SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+0);
|
||||
if ( x86reg >= 0 ) SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+0);
|
||||
else SSE_MOVLPS_XMM_to_M64(offset, EEREC_S);
|
||||
break;
|
||||
|
||||
case 13: // XYW
|
||||
//SysPrintf("SAVE EAX XYW\n");
|
||||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB4); //ZWYX
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) {
|
||||
if ( x86reg >= 0 ) {
|
||||
SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+0);
|
||||
SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+12);
|
||||
}
|
||||
|
@ -906,9 +892,8 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB4);
|
||||
break;
|
||||
case 14: // XYZ
|
||||
//SysPrintf("SAVE EAX XYZ\n");
|
||||
SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
|
||||
if( x86reg >= 0 ) {
|
||||
if ( x86reg >= 0 ) {
|
||||
SSE_MOVLPS_XMM_to_RmOffset(x86reg, EEREC_S, offset+0);
|
||||
SSE_MOVSS_XMM_to_RmOffset(x86reg, EEREC_TEMP, offset+8);
|
||||
}
|
||||
|
@ -918,8 +903,7 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
}
|
||||
break;
|
||||
case 15: // XYZW
|
||||
//SysPrintf("SAVE EAX XYZW\n");
|
||||
if( VU == &VU1 ) {
|
||||
if ( VU == &VU1 ) {
|
||||
if( x86reg >= 0 ) SSE_MOVAPSRtoRmOffset(x86reg, EEREC_S, offset+0);
|
||||
else SSE_MOVAPS_XMM_to_M128(offset, EEREC_S);
|
||||
}
|
||||
|
@ -933,22 +917,8 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
break;
|
||||
default: // ToDo: Needs checking! (cottonvibes)
|
||||
SysPrintf("SAVEEAX Default %d\n", _X_Y_Z_W);
|
||||
// EEREC_D is a temp reg
|
||||
// find the first nonwrite reg
|
||||
/*
|
||||
t1reg = _vuGetTempXMMreg(info);
|
||||
if( t1reg < 0 ) {
|
||||
for(t1reg = 0; t1reg < XMMREGS; ++t1reg) {
|
||||
if( xmmregs[t1reg].inuse && !(xmmregs[t1reg].mode&MODE_WRITE) ) break;
|
||||
}
|
||||
if( t1reg == XMMREGS ) t1reg = -1;
|
||||
else if( t1reg != EEREC_S ) _allocTempXMMreg(XMMT_FPS, t1reg);
|
||||
}
|
||||
*/
|
||||
// do it with one reg
|
||||
//SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
|
||||
|
||||
if( VU == &VU1 ) {
|
||||
if ( VU == &VU1 ) {
|
||||
if( x86reg >= 0 ) SSE_MOVAPSRmtoROffset(EEREC_TEMP, x86reg, offset);
|
||||
else SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, offset);
|
||||
}
|
||||
|
@ -962,20 +932,18 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
|||
|
||||
VU_MERGE_REGS_SAFE(EEREC_TEMP, EEREC_S, _X_Y_Z_W);
|
||||
|
||||
if( VU == &VU1 ) {
|
||||
if ( VU == &VU1 ) {
|
||||
if( x86reg >= 0 ) SSE_MOVAPSRtoRmOffset(x86reg, EEREC_TEMP, offset);
|
||||
else SSE_MOVAPS_XMM_to_M128(offset, EEREC_TEMP);
|
||||
}
|
||||
else {
|
||||
if( x86reg >= 0 ) SSE_MOVUPSRtoRmOffset(x86reg, EEREC_TEMP, offset);
|
||||
if ( x86reg >= 0 ) SSE_MOVUPSRtoRmOffset(x86reg, EEREC_TEMP, offset);
|
||||
else {
|
||||
if( offset & 15 ) SSE_MOVUPS_XMM_to_M128(offset, EEREC_TEMP);
|
||||
else SSE_MOVAPS_XMM_to_M128(offset, EEREC_TEMP);
|
||||
}
|
||||
}
|
||||
|
||||
// read back the data
|
||||
//SSE_MOVAPS_M128_to_XMM(EEREC_S, (uptr)&VU->VF[_Fs_]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "iR5900.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
#include "VUmicro.h"
|
||||
#include "VUflags.h"
|
||||
#include "iVUmicro.h"
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "iR5900Move.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
|
||||
namespace Dynarec {
|
||||
namespace R5900
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "iR5900Move.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
#include "iVUmicro.h"
|
||||
#include "VU.h"
|
||||
#include "VUmicro.h"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "iR5900.h"
|
||||
#include "iMMI.h"
|
||||
#include "iFPU.h"
|
||||
#include "iCP0.h"
|
||||
#include "iCOP0.h"
|
||||
#include "iVUmicro.h"
|
||||
#include "VU.h"
|
||||
#include "VUmicro.h"
|
||||
|
|
Loading…
Reference in New Issue