improve nop handling and proper behaviour for LDM^
fixes dslinux
This commit is contained in:
parent
60650fa82e
commit
9b98b8816a
|
@ -725,6 +725,8 @@ void ARMv4::ExecuteJIT()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//printf("executing armv4 at %08x\n", instrAddr);
|
||||||
|
|
||||||
ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock<1>(instrAddr);
|
ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock<1>(instrAddr);
|
||||||
if (block)
|
if (block)
|
||||||
Cycles += block();
|
Cycles += block();
|
||||||
|
|
|
@ -273,6 +273,8 @@ bool IsIdleLoop(FetchedInstr* instrs, int instrsCount)
|
||||||
|
|
||||||
typedef void (*InterpreterFunc)(ARM* cpu);
|
typedef void (*InterpreterFunc)(ARM* cpu);
|
||||||
|
|
||||||
|
void NOP(ARM* cpu) {}
|
||||||
|
|
||||||
#define F(x) &ARMInterpreter::A_##x
|
#define F(x) &ARMInterpreter::A_##x
|
||||||
#define F_ALU(name, s) \
|
#define F_ALU(name, s) \
|
||||||
F(name##_REG_LSL_IMM##s), F(name##_REG_LSR_IMM##s), F(name##_REG_ASR_IMM##s), F(name##_REG_ROR_IMM##s), \
|
F(name##_REG_LSL_IMM##s), F(name##_REG_LSR_IMM##s), F(name##_REG_ASR_IMM##s), F(name##_REG_ROR_IMM##s), \
|
||||||
|
@ -320,7 +322,8 @@ InterpreterFunc InterpretARM[ARMInstrInfo::ak_Count] =
|
||||||
F(LDM), F(STM),
|
F(LDM), F(STM),
|
||||||
|
|
||||||
F(B), F(BL), F(BLX_IMM), F(BX), F(BLX_REG),
|
F(B), F(BL), F(BLX_IMM), F(BX), F(BLX_REG),
|
||||||
F(UNK), F(MSR_IMM), F(MSR_REG), F(MRS), F(MCR), F(MRC), F(SVC)
|
F(UNK), F(MSR_IMM), F(MSR_REG), F(MRS), F(MCR), F(MRC), F(SVC),
|
||||||
|
NOP
|
||||||
};
|
};
|
||||||
#undef F_ALU
|
#undef F_ALU
|
||||||
#undef F_MEM_WB
|
#undef F_MEM_WB
|
||||||
|
@ -387,8 +390,8 @@ void CompileBlock(ARM* cpu)
|
||||||
u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]};
|
u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]};
|
||||||
u32 nextInstrAddr[2] = {blockAddr, r15};
|
u32 nextInstrAddr[2] = {blockAddr, r15};
|
||||||
|
|
||||||
JIT_DEBUGPRINT("start block %x (%x) %p %p (region invalidates %dx)\n",
|
JIT_DEBUGPRINT("start block %x %08x (%x) %p %p (region invalidates %dx)\n",
|
||||||
blockAddr, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2],
|
blockAddr, cpu->CPSR, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2],
|
||||||
cpu->Num == 0 ? LookUpBlock<0>(blockAddr) : LookUpBlock<1>(blockAddr),
|
cpu->Num == 0 ? LookUpBlock<0>(blockAddr) : LookUpBlock<1>(blockAddr),
|
||||||
CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated);
|
CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated);
|
||||||
|
|
||||||
|
@ -473,7 +476,9 @@ void CompileBlock(ARM* cpu)
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
u32 icode = ((instrs[i].Instr >> 4) & 0xF) | ((instrs[i].Instr >> 16) & 0xFF0);
|
u32 icode = ((instrs[i].Instr >> 4) & 0xF) | ((instrs[i].Instr >> 16) & 0xFF0);
|
||||||
assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode] || instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM);
|
assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode]
|
||||||
|
|| instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM
|
||||||
|
|| instrs[i].Info.Kind == ARMInstrInfo::ak_Nop);
|
||||||
if (cpu->CheckCondition(instrs[i].Cond()))
|
if (cpu->CheckCondition(instrs[i].Cond()))
|
||||||
InterpretARM[instrs[i].Info.Kind](cpu);
|
InterpretARM[instrs[i].Info.Kind](cpu);
|
||||||
else
|
else
|
||||||
|
|
|
@ -152,7 +152,7 @@ public:
|
||||||
needValueLoaded = BitSet16(instr.Info.SrcRegs);
|
needValueLoaded = BitSet16(instr.Info.SrcRegs);
|
||||||
for (int reg : needToBeLoaded)
|
for (int reg : needToBeLoaded)
|
||||||
LoadRegister(reg, needValueLoaded[reg]);
|
LoadRegister(reg, needValueLoaded[reg]);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
BitSet16 loadedSet(LoadedRegs);
|
BitSet16 loadedSet(LoadedRegs);
|
||||||
BitSet16 loadRegs(instr.Info.NotStrictlyNeeded & futureNeeded & ~LoadedRegs);
|
BitSet16 loadRegs(instr.Info.NotStrictlyNeeded & futureNeeded & ~LoadedRegs);
|
||||||
|
|
|
@ -134,7 +134,7 @@ void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR)
|
||||||
{
|
{
|
||||||
IrregularCycles = true;
|
IrregularCycles = true;
|
||||||
|
|
||||||
BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFF00);
|
BitSet16 hiRegsLoaded(RegCache.LoadedRegs & 0x7F00);
|
||||||
bool previouslyDirty = CPSRDirty;
|
bool previouslyDirty = CPSRDirty;
|
||||||
SaveCPSR();
|
SaveCPSR();
|
||||||
|
|
||||||
|
@ -156,12 +156,12 @@ void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR)
|
||||||
if (!restoreCPSR)
|
if (!restoreCPSR)
|
||||||
XOR(32, R(ABI_PARAM3), R(ABI_PARAM3));
|
XOR(32, R(ABI_PARAM3), R(ABI_PARAM3));
|
||||||
else
|
else
|
||||||
MOV(32, R(ABI_PARAM3), Imm32(restoreCPSR));
|
MOV(32, R(ABI_PARAM3), Imm32(true)); // what a waste
|
||||||
if (Num == 0)
|
if (Num == 0)
|
||||||
CALL((void*)&ARMv5::JumpTo);
|
CALL((void*)&ARMv5::JumpTo);
|
||||||
else
|
else
|
||||||
CALL((void*)&ARMv4::JumpTo);
|
CALL((void*)&ARMv4::JumpTo);
|
||||||
|
|
||||||
if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE)
|
if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE)
|
||||||
{
|
{
|
||||||
for (int reg : hiRegsLoaded)
|
for (int reg : hiRegsLoaded)
|
||||||
|
|
|
@ -308,6 +308,7 @@ const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] =
|
||||||
F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchXchangeReg), F(A_Comp_BranchXchangeReg),
|
F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchXchangeReg), F(A_Comp_BranchXchangeReg),
|
||||||
// system stuff
|
// system stuff
|
||||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||||
|
F(Nop)
|
||||||
};
|
};
|
||||||
|
|
||||||
const Compiler::CompileFunc T_Comp[ARMInstrInfo::tk_Count] = {
|
const Compiler::CompileFunc T_Comp[ARMInstrInfo::tk_Count] = {
|
||||||
|
|
|
@ -79,6 +79,8 @@ public:
|
||||||
opInvertOp2 = 1 << 5,
|
opInvertOp2 = 1 << 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void Nop() {}
|
||||||
|
|
||||||
void A_Comp_Arith();
|
void A_Comp_Arith();
|
||||||
void A_Comp_MovOp();
|
void A_Comp_MovOp();
|
||||||
void A_Comp_CmpOp();
|
void A_Comp_CmpOp();
|
||||||
|
|
|
@ -531,7 +531,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
|
||||||
{
|
{
|
||||||
if (regs[reg])
|
if (regs[reg])
|
||||||
{
|
{
|
||||||
if (usermode && reg >= 8 && reg < 15)
|
if (usermode && !regs[15] && reg >= 8 && reg < 15)
|
||||||
{
|
{
|
||||||
if (firstUserMode)
|
if (firstUserMode)
|
||||||
{
|
{
|
||||||
|
@ -545,7 +545,8 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
|
||||||
FixupBranch sucessfulWritten = J_CC(CC_NC);
|
FixupBranch sucessfulWritten = J_CC(CC_NC);
|
||||||
if (RegCache.Mapping[reg] != INVALID_REG)
|
if (RegCache.Mapping[reg] != INVALID_REG)
|
||||||
MOV(32, R(RegCache.Mapping[reg]), R(ABI_PARAM3));
|
MOV(32, R(RegCache.Mapping[reg]), R(ABI_PARAM3));
|
||||||
SaveReg(reg, ABI_PARAM3);
|
else
|
||||||
|
SaveReg(reg, ABI_PARAM3);
|
||||||
SetJumpTarget(sucessfulWritten);
|
SetJumpTarget(sucessfulWritten);
|
||||||
}
|
}
|
||||||
else if (RegCache.Mapping[reg] == INVALID_REG)
|
else if (RegCache.Mapping[reg] == INVALID_REG)
|
||||||
|
|
|
@ -392,6 +392,8 @@ Info Decode(bool thumb, u32 num, u32 instr)
|
||||||
u32 data = ARMInstrTable[((instr >> 4) & 0xF) | ((instr >> 16) & 0xFF0)];
|
u32 data = ARMInstrTable[((instr >> 4) & 0xF) | ((instr >> 16) & 0xFF0)];
|
||||||
if (num == 0 && (instr & 0xFE000000) == 0xFA000000)
|
if (num == 0 && (instr & 0xFE000000) == 0xFA000000)
|
||||||
data = A_BLX_IMM;
|
data = A_BLX_IMM;
|
||||||
|
else if ((instr >> 28) == 0xF)
|
||||||
|
data = ak(ak_Nop);
|
||||||
|
|
||||||
if (data & A_UnkOnARM7 && num != 0)
|
if (data & A_UnkOnARM7 && num != 0)
|
||||||
data = A_UNK;
|
data = A_UNK;
|
||||||
|
|
|
@ -139,6 +139,8 @@ enum
|
||||||
ak_MRC,
|
ak_MRC,
|
||||||
ak_SVC,
|
ak_SVC,
|
||||||
|
|
||||||
|
ak_Nop,
|
||||||
|
|
||||||
ak_Count,
|
ak_Count,
|
||||||
|
|
||||||
tk_LSL_IMM = 0,
|
tk_LSL_IMM = 0,
|
||||||
|
|
Loading…
Reference in New Issue