Merge pull request #9447 from Dentomologist/convert_indextype_to_enum_class

Arm64Emitter: Convert IndexType to enum class
This commit is contained in:
JosJuice 2021-01-17 12:23:53 +01:00 committed by GitHub
commit f383397c9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 267 additions and 259 deletions

View File

@ -751,8 +751,10 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn,
else if (size == 16)
imm >>= 1;
ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED): offset must be positive %d", __func__, imm);
ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s(INDEX_UNSIGNED): offset too large %d", __func__, imm);
ASSERT_MSG(DYNA_REC, imm >= 0, "%s(IndexType::Unsigned): offset must be positive %d", __func__,
imm);
ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s(IndexType::Unsigned): offset too large %d", __func__,
imm);
Rt = DecodeReg(Rt);
Rn = DecodeReg(Rn);
@ -826,17 +828,17 @@ void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64R
switch (type)
{
case INDEX_SIGNED:
case IndexType::Signed:
type_encode = 0b010;
break;
case INDEX_POST:
case IndexType::Post:
type_encode = 0b001;
break;
case INDEX_PRE:
case IndexType::Pre:
type_encode = 0b011;
break;
case INDEX_UNSIGNED:
ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __func__);
case IndexType::Unsigned:
ASSERT_MSG(DYNA_REC, false, "%s doesn't support IndexType::Unsigned!", __func__);
break;
}
@ -1830,70 +1832,70 @@ void ARM64XEmitter::LDNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm)
// XXX: Most of these support vectors
void ARM64XEmitter::STRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(0x0E4, Rt, Rn, imm, 8);
else
EncodeLoadStoreIndexedInst(0x0E0, type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
EncodeLoadStoreIndexedInst(0x0E0, type == IndexType::Post ? 1 : 3, Rt, Rn, imm);
}
void ARM64XEmitter::LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(0x0E5, Rt, Rn, imm, 8);
else
EncodeLoadStoreIndexedInst(0x0E1, type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
EncodeLoadStoreIndexedInst(0x0E1, type == IndexType::Post ? 1 : 3, Rt, Rn, imm);
}
void ARM64XEmitter::LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E6 : 0x0E7, Rt, Rn, imm, 8);
else
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E2 : 0x0E3, type == INDEX_POST ? 1 : 3, Rt, Rn,
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E2 : 0x0E3, type == IndexType::Post ? 1 : 3, Rt, Rn,
imm);
}
void ARM64XEmitter::STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(0x1E4, Rt, Rn, imm, 16);
else
EncodeLoadStoreIndexedInst(0x1E0, type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
EncodeLoadStoreIndexedInst(0x1E0, type == IndexType::Post ? 1 : 3, Rt, Rn, imm);
}
void ARM64XEmitter::LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(0x1E5, Rt, Rn, imm, 16);
else
EncodeLoadStoreIndexedInst(0x1E1, type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
EncodeLoadStoreIndexedInst(0x1E1, type == IndexType::Post ? 1 : 3, Rt, Rn, imm);
}
void ARM64XEmitter::LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E6 : 0x1E7, Rt, Rn, imm, 16);
else
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E2 : 0x1E3, type == INDEX_POST ? 1 : 3, Rt, Rn,
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E2 : 0x1E3, type == IndexType::Post ? 1 : 3, Rt, Rn,
imm);
}
void ARM64XEmitter::STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E4 : 0x2E4, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32);
else
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E0 : 0x2E0, type == INDEX_POST ? 1 : 3, Rt, Rn,
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E0 : 0x2E0, type == IndexType::Post ? 1 : 3, Rt, Rn,
imm);
}
void ARM64XEmitter::LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E5 : 0x2E5, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32);
else
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E1 : 0x2E1, type == INDEX_POST ? 1 : 3, Rt, Rn,
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E1 : 0x2E1, type == IndexType::Post ? 1 : 3, Rt, Rn,
imm);
}
void ARM64XEmitter::LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
{
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
EncodeLoadStoreIndexedInst(0x2E6, Rt, Rn, imm, 32);
else
EncodeLoadStoreIndexedInst(0x2E2, type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
EncodeLoadStoreIndexedInst(0x2E2, type == IndexType::Post ? 1 : 3, Rt, Rn, imm);
}
// Load/Store register (register offset)
@ -2121,13 +2123,13 @@ void ARM64XEmitter::ABI_PushRegisters(BitSet32 registers)
// The first push must adjust the SP, else a context switch may invalidate everything below SP.
if (num_regs & 1)
{
STR(INDEX_PRE, (ARM64Reg)(X0 + *it++), SP, -stack_size);
STR(IndexType::Pre, (ARM64Reg)(X0 + *it++), SP, -stack_size);
}
else
{
ARM64Reg first_reg = (ARM64Reg)(X0 + *it++);
ARM64Reg second_reg = (ARM64Reg)(X0 + *it++);
STP(INDEX_PRE, first_reg, second_reg, SP, -stack_size);
STP(IndexType::Pre, first_reg, second_reg, SP, -stack_size);
}
// Fast store for all other registers, this is always an even number.
@ -2135,7 +2137,7 @@ void ARM64XEmitter::ABI_PushRegisters(BitSet32 registers)
{
ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++);
ARM64Reg even_reg = (ARM64Reg)(X0 + *it++);
STP(INDEX_SIGNED, odd_reg, even_reg, SP, 16 * (i + 1));
STP(IndexType::Signed, odd_reg, even_reg, SP, 16 * (i + 1));
}
ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__);
@ -2166,14 +2168,14 @@ void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask)
{
ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++);
ARM64Reg even_reg = (ARM64Reg)(X0 + *it++);
LDP(INDEX_SIGNED, odd_reg, even_reg, SP, 16 * (i + 1));
LDP(IndexType::Signed, odd_reg, even_reg, SP, 16 * (i + 1));
}
// Post loading the first (two) registers.
if (num_regs & 1)
LDR(INDEX_POST, first, SP, stack_size);
LDR(IndexType::Post, first, SP, stack_size);
else
LDP(INDEX_POST, first, second, SP, stack_size);
LDP(IndexType::Post, first, second, SP, stack_size);
ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__);
}
@ -2198,12 +2200,12 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type,
else if (size == 128)
encoded_size = 0;
if (type == INDEX_UNSIGNED)
if (type == IndexType::Unsigned)
{
ASSERT_MSG(DYNA_REC, !(imm & ((size - 1) >> 3)),
"%s(INDEX_UNSIGNED) immediate offset must be aligned to size! (%d) (%p)", __func__,
imm, m_emit->GetCodePtr());
ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED) immediate offset must be positive!",
"%s(IndexType::Unsigned) immediate offset must be aligned to size! (%d) (%p)",
__func__, imm, m_emit->GetCodePtr());
ASSERT_MSG(DYNA_REC, imm >= 0, "%s(IndexType::Unsigned) immediate offset must be positive!",
__func__);
if (size == 16)
imm >>= 1;
@ -2220,13 +2222,13 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type,
ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255),
"%s immediate offset must be within range of -256 to 256!", __func__);
encoded_imm = (imm & 0x1FF) << 2;
if (type == INDEX_POST)
if (type == IndexType::Post)
encoded_imm |= 1;
else
encoded_imm |= 3;
}
Write32((encoded_size << 30) | (0xF << 26) | (type == INDEX_UNSIGNED ? (1 << 24) : 0) |
Write32((encoded_size << 30) | (0xF << 26) | (type == IndexType::Unsigned ? (1 << 24) : 0) |
(size == 128 ? (1 << 23) : 0) | (opc << 22) | (encoded_imm << 10) | (Rn << 5) | Rt);
}
@ -2572,17 +2574,17 @@ void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type,
switch (type)
{
case INDEX_SIGNED:
case IndexType::Signed:
type_encode = 0b010;
break;
case INDEX_POST:
case IndexType::Post:
type_encode = 0b001;
break;
case INDEX_PRE:
case IndexType::Pre:
type_encode = 0b011;
break;
case INDEX_UNSIGNED:
ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __func__);
case IndexType::Unsigned:
ASSERT_MSG(DYNA_REC, false, "%s doesn't support IndexType::Unsigned!", __func__);
break;
}
@ -2995,7 +2997,7 @@ void ARM64FloatEmitter::LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM6
{
ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!",
__func__);
ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __func__);
ASSERT_MSG(DYNA_REC, type == IndexType::Post, "%s only supports post indexing!", __func__);
u32 opcode = 0;
if (count == 1)
@ -3028,7 +3030,7 @@ void ARM64FloatEmitter::ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM6
{
ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!",
__func__);
ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __func__);
ASSERT_MSG(DYNA_REC, type == IndexType::Post, "%s only supports post indexing!", __func__);
u32 opcode = 0;
if (count == 1)
@ -3955,7 +3957,7 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp)
if (count == 1)
island_regs.push_back((ARM64Reg)(Q0 + i));
else
ST1(64, count, INDEX_POST, (ARM64Reg)(Q0 + i), tmp);
ST1(64, count, IndexType::Post, (ARM64Reg)(Q0 + i), tmp);
i += count - 1;
}
@ -3967,12 +3969,12 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp)
pair_regs.push_back(it);
if (pair_regs.size() == 2)
{
STP(128, INDEX_POST, pair_regs[0], pair_regs[1], tmp, 32);
STP(128, IndexType::Post, pair_regs[0], pair_regs[1], tmp, 32);
pair_regs.clear();
}
}
if (pair_regs.size())
STR(128, INDEX_POST, pair_regs[0], tmp, 16);
STR(128, IndexType::Post, pair_regs[0], tmp, 16);
}
else
{
@ -3982,12 +3984,12 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp)
pair_regs.push_back((ARM64Reg)(Q0 + it));
if (pair_regs.size() == 2)
{
STP(128, INDEX_PRE, pair_regs[0], pair_regs[1], SP, -32);
STP(128, IndexType::Pre, pair_regs[0], pair_regs[1], SP, -32);
pair_regs.clear();
}
}
if (pair_regs.size())
STR(128, INDEX_PRE, pair_regs[0], SP, -16);
STR(128, IndexType::Pre, pair_regs[0], SP, -16);
}
}
void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp)
@ -4028,7 +4030,7 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp)
if (count == 1)
island_regs.push_back((ARM64Reg)(Q0 + i));
else
LD1(64, count, INDEX_POST, (ARM64Reg)(Q0 + i), SP);
LD1(64, count, IndexType::Post, (ARM64Reg)(Q0 + i), SP);
i += count - 1;
}
@ -4040,12 +4042,12 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp)
pair_regs.push_back(it);
if (pair_regs.size() == 2)
{
LDP(128, INDEX_POST, pair_regs[0], pair_regs[1], SP, 32);
LDP(128, IndexType::Post, pair_regs[0], pair_regs[1], SP, 32);
pair_regs.clear();
}
}
if (pair_regs.size())
LDR(128, INDEX_POST, pair_regs[0], SP, 16);
LDR(128, IndexType::Post, pair_regs[0], SP, 16);
}
else
{
@ -4060,14 +4062,14 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp)
{
// First load must be a regular LDR if odd
odd = false;
LDR(128, INDEX_POST, (ARM64Reg)(Q0 + i), SP, 16);
LDR(128, IndexType::Post, (ARM64Reg)(Q0 + i), SP, 16);
}
else
{
pair_regs.push_back((ARM64Reg)(Q0 + i));
if (pair_regs.size() == 2)
{
LDP(128, INDEX_POST, pair_regs[1], pair_regs[0], SP, 32);
LDP(128, IndexType::Post, pair_regs[1], pair_regs[0], SP, 32);
pair_regs.clear();
}
}

View File

@ -285,12 +285,12 @@ enum ShiftType
ST_ROR = 3,
};
enum IndexType
enum class IndexType
{
INDEX_UNSIGNED,
INDEX_POST,
INDEX_PRE,
INDEX_SIGNED, // used in LDP/STP
Unsigned,
Post,
Pre,
Signed, // used in LDP/STP
};
enum class ShiftAmount

View File

@ -150,9 +150,9 @@ void JitArm64::FallBackToInterpreter(UGeckoInstruction inst)
// also flush the program counter
ARM64Reg WA = gpr.GetReg();
MOVI2R(WA, js.compilerPC);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(pc));
ADD(WA, WA, 4);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(npc));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(npc));
gpr.Unlock(WA);
}
@ -166,7 +166,7 @@ void JitArm64::FallBackToInterpreter(UGeckoInstruction inst)
if (js.isLastInstruction)
{
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(npc));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(npc));
WriteExceptionExit(WA);
gpr.Unlock(WA);
}
@ -174,7 +174,7 @@ void JitArm64::FallBackToInterpreter(UGeckoInstruction inst)
{
// only exit if ppcstate.npc was changed
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(npc));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(npc));
ARM64Reg WB = gpr.GetReg();
MOVI2R(WB, js.compilerPC + 4);
CMP(WB, WA);
@ -189,7 +189,7 @@ void JitArm64::FallBackToInterpreter(UGeckoInstruction inst)
if (jo.memcheck && (js.op->opinfo->flags & FL_LOADSTORE))
{
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
FixupBranch noException = TBZ(WA, IntLog2(EXCEPTION_DSI));
FixupBranch handleException = B();
@ -234,7 +234,7 @@ void JitArm64::Cleanup()
{
if (jo.optimizeGatherPipe && js.fifoBytesSinceCheck > 0)
{
LDP(INDEX_SIGNED, X0, X1, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
LDP(IndexType::Signed, X0, X1, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
SUB(X0, X0, X1);
CMP(X0, GPFifo::GATHER_PIPE_SIZE);
FixupBranch exit = B(CC_LT);
@ -256,9 +256,9 @@ void JitArm64::Cleanup()
void JitArm64::DoDownCount()
{
LDR(INDEX_UNSIGNED, W0, PPC_REG, PPCSTATE_OFF(downcount));
LDR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF(downcount));
SUBSI2R(W0, W0, js.downcountAmount, W1);
STR(INDEX_UNSIGNED, W0, PPC_REG, PPCSTATE_OFF(downcount));
STR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF(downcount));
}
void JitArm64::ResetStack()
@ -266,7 +266,7 @@ void JitArm64::ResetStack()
if (!m_enable_blr_optimization)
return;
LDR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer));
LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer));
ADD(SP, X0, 0);
}
@ -317,7 +317,7 @@ void JitArm64::WriteExit(u32 destination, bool LK, u32 exit_address_after_return
// Push {ARM_PC+20; PPC_PC} on the stack
MOVI2R(X1, exit_address_after_return);
ADR(X0, 20);
STP(INDEX_PRE, X0, X1, SP, -16);
STP(IndexType::Pre, X0, X1, SP, -16);
}
JitBlock* b = js.curBlock;
@ -363,7 +363,7 @@ void JitArm64::WriteExit(Arm64Gen::ARM64Reg dest, bool LK, u32 exit_address_afte
// Push {ARM_PC, PPC_PC} on the stack
MOVI2R(X1, exit_address_after_return);
ADR(X0, 12);
STP(INDEX_PRE, X0, X1, SP, -16);
STP(IndexType::Pre, X0, X1, SP, -16);
BL(dispatcher);
@ -391,7 +391,7 @@ void JitArm64::FakeLKExit(u32 exit_address_after_return)
ARM64Reg code_reg = gpr.GetReg();
MOVI2R(after_reg, exit_address_after_return);
ADR(EncodeRegTo64(code_reg), 12);
STP(INDEX_PRE, EncodeRegTo64(code_reg), EncodeRegTo64(after_reg), SP, -16);
STP(IndexType::Pre, EncodeRegTo64(code_reg), EncodeRegTo64(after_reg), SP, -16);
gpr.Unlock(after_reg, code_reg);
FixupBranch skip_exit = BL();
@ -425,7 +425,7 @@ void JitArm64::WriteBLRExit(Arm64Gen::ARM64Reg dest)
EndTimeProfile(js.curBlock);
// Check if {ARM_PC, PPC_PC} matches the current state.
LDP(INDEX_POST, X2, X1, SP, 16);
LDP(IndexType::Post, X2, X1, SP, 16);
CMP(W1, DISPATCHER_PC);
FixupBranch no_match = B(CC_NEQ);
@ -446,18 +446,18 @@ void JitArm64::WriteExceptionExit(u32 destination, bool only_external)
{
Cleanup();
LDR(INDEX_UNSIGNED, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
MOVI2R(DISPATCHER_PC, destination);
FixupBranch no_exceptions = CBZ(W30);
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
if (only_external)
MOVP2R(X30, &PowerPC::CheckExternalExceptions);
else
MOVP2R(X30, &PowerPC::CheckExceptions);
BLR(X30);
LDR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
SetJumpTarget(no_exceptions);
@ -474,17 +474,17 @@ void JitArm64::WriteExceptionExit(ARM64Reg dest, bool only_external)
Cleanup();
LDR(INDEX_UNSIGNED, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
FixupBranch no_exceptions = CBZ(W30);
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
if (only_external)
MOVP2R(EncodeRegTo64(DISPATCHER_PC), &PowerPC::CheckExternalExceptions);
else
MOVP2R(EncodeRegTo64(DISPATCHER_PC), &PowerPC::CheckExceptions);
BLR(EncodeRegTo64(DISPATCHER_PC));
LDR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
SetJumpTarget(no_exceptions);
@ -502,7 +502,7 @@ bool JitArm64::HandleFunctionHooking(u32 address)
if (type != HLE::HookType::Replace)
return false;
LDR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
js.downcountAmount += js.st.numCycles;
WriteExit(DISPATCHER_PC);
return true;
@ -520,14 +520,14 @@ void JitArm64::DumpCode(const u8* start, const u8* end)
void JitArm64::BeginTimeProfile(JitBlock* b)
{
MOVP2R(X0, &b->profile_data);
LDR(INDEX_UNSIGNED, X1, X0, offsetof(JitBlock::ProfileData, runCount));
LDR(IndexType::Unsigned, X1, X0, offsetof(JitBlock::ProfileData, runCount));
ADD(X1, X1, 1);
// Fetch the current counter register
CNTVCT(X2);
// stores runCount and ticStart
STP(INDEX_SIGNED, X1, X2, X0, offsetof(JitBlock::ProfileData, runCount));
STP(IndexType::Signed, X1, X2, X0, offsetof(JitBlock::ProfileData, runCount));
}
void JitArm64::EndTimeProfile(JitBlock* b)
@ -540,16 +540,16 @@ void JitArm64::EndTimeProfile(JitBlock* b)
MOVP2R(X0, &b->profile_data);
LDR(INDEX_UNSIGNED, X2, X0, offsetof(JitBlock::ProfileData, ticStart));
LDR(IndexType::Unsigned, X2, X0, offsetof(JitBlock::ProfileData, ticStart));
SUB(X1, X1, X2);
// loads ticCounter and downcountCounter
LDP(INDEX_SIGNED, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter));
LDP(IndexType::Signed, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter));
ADD(X2, X2, X1);
ADDI2R(X3, X3, js.downcountAmount, X1);
// stores ticCounter and downcountCounter
STP(INDEX_SIGNED, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter));
STP(IndexType::Signed, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter));
}
void JitArm64::Run()
@ -658,13 +658,13 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
int gqr = *code_block.m_gqr_used.begin();
if (!code_block.m_gqr_modified[gqr] && !GQR(gqr))
{
LDR(INDEX_UNSIGNED, W0, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0]) + gqr * 4);
LDR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0]) + gqr * 4);
FixupBranch no_fail = CBZ(W0);
FixupBranch fail = B();
SwitchToFarCode();
SetJumpTarget(fail);
MOVI2R(DISPATCHER_PC, js.blockStart);
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
MOVI2R(W0, static_cast<u32>(JitInterface::ExceptionType::PairedQuantize));
MOVP2R(X1, &JitInterface::CompileExceptionCheck);
BLR(X1);
@ -720,12 +720,12 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
ABI_PopRegisters(regs_in_use);
// Inline exception check
LDR(INDEX_UNSIGNED, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions));
TBZ(W30, 3, done_here); // EXCEPTION_EXTERNAL_INT
LDR(INDEX_UNSIGNED, W30, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(msr));
TBZ(W30, 11, done_here);
MOVP2R(X30, &ProcessorInterface::m_InterruptCause);
LDR(INDEX_UNSIGNED, W30, X30, 0);
LDR(IndexType::Unsigned, W30, X30, 0);
TST(W30, 23, 2);
B(CC_EQ, done_here);
@ -746,17 +746,17 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
{
ARM64Reg WA = gpr.GetReg();
ARM64Reg XA = EncodeRegTo64(WA);
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
FixupBranch NoExtException = TBZ(WA, 3); // EXCEPTION_EXTERNAL_INT
FixupBranch Exception = B();
SwitchToFarCode();
const u8* done_here = GetCodePtr();
FixupBranch exit = B();
SetJumpTarget(Exception);
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(msr));
TBZ(WA, 11, done_here);
MOVP2R(XA, &ProcessorInterface::m_InterruptCause);
LDR(INDEX_UNSIGNED, WA, XA, 0);
LDR(IndexType::Unsigned, WA, XA, 0);
TST(WA, 23, 2);
B(CC_EQ, done_here);
gpr.Unlock(WA);
@ -778,7 +778,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
{
// This instruction uses FPU - needs to add FP exception bailout
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(msr));
FixupBranch b1 = TBNZ(WA, 13); // Test FP enabled bit
FixupBranch far_addr = B();
@ -788,9 +788,9 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
gpr.Flush(FlushMode::MaintainState);
fpr.Flush(FlushMode::MaintainState);
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORR(WA, WA, 26, 0); // EXCEPTION_FPU_UNAVAILABLE
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA);

View File

@ -120,8 +120,8 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR
{
// This literally only stores 32bytes of zeros to the target address
ADD(addr, addr, MEM_REG);
STP(INDEX_SIGNED, ZR, ZR, addr, 0);
STP(INDEX_SIGNED, ZR, ZR, addr, 16);
STP(IndexType::Signed, ZR, ZR, addr, 0);
STP(IndexType::Signed, ZR, ZR, addr, 16);
}
else
{

View File

@ -24,9 +24,9 @@ void JitArm64::sc(UGeckoInstruction inst)
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORR(WA, WA, 31, 0); // Same as WA | EXCEPTION_SYSCALL
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA);
@ -53,18 +53,18 @@ void JitArm64::rfi(UGeckoInstruction inst)
ARM64Reg WB = gpr.GetReg();
ARM64Reg WC = gpr.GetReg();
LDR(INDEX_UNSIGNED, WC, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, WC, PPC_REG, PPCSTATE_OFF(msr));
ANDI2R(WC, WC, (~mask) & clearMSR13, WA); // rD = Masked MSR
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_SRR1])); // rB contains SRR1 here
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_SRR1])); // rB contains SRR1 here
ANDI2R(WA, WA, mask & clearMSR13, WB); // rB contains masked SRR1 here
ORR(WA, WA, WC); // rB = Masked MSR OR masked SRR1
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(msr)); // STR rB in to rA
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(msr)); // STR rB in to rA
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_SRR0]));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_SRR0]));
gpr.Unlock(WB, WC);
WriteExceptionExit(WA);
@ -80,7 +80,7 @@ void JitArm64::bx(UGeckoInstruction inst)
{
ARM64Reg WA = gpr.GetReg();
MOVI2R(WA, js.compilerPC + 4);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
gpr.Unlock(WA);
}
@ -125,9 +125,9 @@ void JitArm64::bcx(UGeckoInstruction inst)
FixupBranch pCTRDontBranch;
if ((inst.BO & BO_DONT_DECREMENT_FLAG) == 0) // Decrement and test CTR
{
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
SUBS(WA, WA, 1);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
if (inst.BO & BO_BRANCH_IF_CTR_0)
pCTRDontBranch = B(CC_NEQ);
@ -150,7 +150,7 @@ void JitArm64::bcx(UGeckoInstruction inst)
if (inst.LK)
{
MOVI2R(WA, js.compilerPC + 4);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
}
gpr.Unlock(WA);
@ -213,13 +213,13 @@ void JitArm64::bcctrx(UGeckoInstruction inst)
{
ARM64Reg WB = gpr.GetReg();
MOVI2R(WB, js.compilerPC + 4);
STR(INDEX_UNSIGNED, WB, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
STR(IndexType::Unsigned, WB, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
gpr.Unlock(WB);
}
ARM64Reg WA = gpr.GetReg();
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
AND(WA, WA, 30, 29); // Wipe the bottom 2 bits.
WriteExit(WA, inst.LK_3, js.compilerPC + 4);
@ -241,9 +241,9 @@ void JitArm64::bclrx(UGeckoInstruction inst)
FixupBranch pCTRDontBranch;
if ((inst.BO & BO_DONT_DECREMENT_FLAG) == 0) // Decrement and test CTR
{
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
SUBS(WA, WA, 1);
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_CTR]));
if (inst.BO & BO_BRANCH_IF_CTR_0)
pCTRDontBranch = B(CC_NEQ);
@ -265,13 +265,13 @@ void JitArm64::bclrx(UGeckoInstruction inst)
SetJumpTarget(far_addr);
}
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
AND(WA, WA, 30, 29); // Wipe the bottom 2 bits.
if (inst.LK)
{
MOVI2R(WB, js.compilerPC + 4);
STR(INDEX_UNSIGNED, WB, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
STR(IndexType::Unsigned, WB, PPC_REG, PPCSTATE_OFF(spr[SPR_LR]));
gpr.Unlock(WB);
}

View File

@ -41,12 +41,12 @@ void JitArm64::ComputeCarry(bool Carry)
{
ARM64Reg WA = gpr.GetReg();
MOVI2R(WA, 1);
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
gpr.Unlock(WA);
return;
}
STRB(INDEX_UNSIGNED, WSP, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WSP, PPC_REG, PPCSTATE_OFF(xer_ca));
}
void JitArm64::ComputeCarry()
@ -72,7 +72,7 @@ void JitArm64::FlushCarry()
ARM64Reg WA = gpr.GetReg();
CSINC(WA, WSP, WSP, CC_CC);
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
gpr.Unlock(WA);
js.carryFlagSet = false;
@ -672,7 +672,7 @@ void JitArm64::srawix(UGeckoInstruction inst)
else
{
CSINC(WA, WSP, WSP, CC_EQ);
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
}
gpr.Unlock(WA);
}
@ -832,14 +832,14 @@ void JitArm64::addzex(UGeckoInstruction inst)
{
gpr.BindToRegister(d, true);
ARM64Reg WA = gpr.GetReg();
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
ADDS(gpr.R(d), gpr.R(a), WA);
gpr.Unlock(WA);
}
else
{
gpr.BindToRegister(d, false);
LDRB(INDEX_UNSIGNED, gpr.R(d), PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, gpr.R(d), PPC_REG, PPCSTATE_OFF(xer_ca));
ADDS(gpr.R(d), gpr.R(a), gpr.R(d));
}
@ -899,7 +899,7 @@ void JitArm64::subfex(UGeckoInstruction inst)
}
else
{
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
ADDI2R(gpr.R(d), WA, ~i + j, gpr.R(d));
}
gpr.Unlock(WA);
@ -928,7 +928,7 @@ void JitArm64::subfex(UGeckoInstruction inst)
// upload the carry state
if (!js.carryFlagSet)
{
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
CMP(WA, 1);
}
@ -998,7 +998,7 @@ void JitArm64::subfzex(UGeckoInstruction inst)
else
{
ARM64Reg WA = gpr.GetReg();
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
MVN(gpr.R(d), gpr.R(a));
ADDS(gpr.R(d), gpr.R(d), WA);
gpr.Unlock(WA);
@ -1060,7 +1060,7 @@ void JitArm64::addex(UGeckoInstruction inst)
}
else
{
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
ADDI2R(gpr.R(d), WA, i + j, gpr.R(d));
}
gpr.Unlock(WA);
@ -1089,7 +1089,7 @@ void JitArm64::addex(UGeckoInstruction inst)
if (!js.carryFlagSet)
{
ARM64Reg WA = gpr.GetReg();
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
CMP(WA, 1);
gpr.Unlock(WA);
}
@ -1425,7 +1425,7 @@ void JitArm64::srawx(UGeckoInstruction inst)
}
else
{
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
}
gpr.Unlock(WA, WB, WC);

View File

@ -238,22 +238,22 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s
else
accessSize = 8;
LDR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
if (accessSize == 32)
{
REV32(W1, RS);
STR(INDEX_POST, W1, X0, 4);
STR(IndexType::Post, W1, X0, 4);
}
else if (accessSize == 16)
{
REV16(W1, RS);
STRH(INDEX_POST, W1, X0, 2);
STRH(IndexType::Post, W1, X0, 2);
}
else
{
STRB(INDEX_POST, RS, X0, 1);
STRB(IndexType::Post, RS, X0, 1);
}
STR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
js.fifoBytesSinceCheck += accessSize >> 3;
}
else if (jo.fastmem_arena && is_immediate && PowerPC::IsOptimizableRAMAddress(imm_addr))
@ -471,7 +471,7 @@ void JitArm64::lmw(UGeckoInstruction inst)
ARM64Reg RX3 = gpr.R(i + 2);
ARM64Reg RX2 = gpr.R(i + 1);
ARM64Reg RX1 = gpr.R(i);
LDP(INDEX_POST, EncodeRegTo64(RX1), EncodeRegTo64(RX3), XA, 16);
LDP(IndexType::Post, EncodeRegTo64(RX1), EncodeRegTo64(RX3), XA, 16);
REV32(EncodeRegTo64(RX1), EncodeRegTo64(RX1));
REV32(EncodeRegTo64(RX3), EncodeRegTo64(RX3));
LSR(EncodeRegTo64(RX2), EncodeRegTo64(RX1), 32);
@ -484,7 +484,7 @@ void JitArm64::lmw(UGeckoInstruction inst)
gpr.BindToRegister(i, false);
ARM64Reg RX2 = gpr.R(i + 1);
ARM64Reg RX1 = gpr.R(i);
LDP(INDEX_POST, RX1, RX2, XA, 8);
LDP(IndexType::Post, RX1, RX2, XA, 8);
REV32(RX1, RX1);
REV32(RX2, RX2);
++i;
@ -493,7 +493,7 @@ void JitArm64::lmw(UGeckoInstruction inst)
{
gpr.BindToRegister(i, false);
ARM64Reg RX = gpr.R(i);
LDR(INDEX_POST, RX, XA, 4);
LDR(IndexType::Post, RX, XA, 4);
REV32(RX, RX);
}
}
@ -527,7 +527,7 @@ void JitArm64::stmw(UGeckoInstruction inst)
{
ARM64Reg RX = gpr.R(i);
REV32(WB, RX);
STR(INDEX_UNSIGNED, WB, XA, (i - inst.RD) * 4);
STR(IndexType::Unsigned, WB, XA, (i - inst.RD) * 4);
}
gpr.Unlock(WA, WB);

View File

@ -358,7 +358,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
else
accessSize = 32;
LDR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
if (flags & BackPatchInfo::FLAG_SIZE_F64)
{
m_float_emit.REV64(8, Q0, V0);
@ -373,9 +373,10 @@ void JitArm64::stfXX(UGeckoInstruction inst)
m_float_emit.REV32(8, D0, V0);
}
m_float_emit.STR(accessSize, INDEX_POST, accessSize == 64 ? Q0 : D0, X0, accessSize >> 3);
m_float_emit.STR(accessSize, IndexType::Post, accessSize == 64 ? Q0 : D0, X0,
accessSize >> 3);
STR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
js.fifoBytesSinceCheck += accessSize >> 3;
if (update)

View File

@ -77,7 +77,7 @@ void JitArm64::psq_l(UGeckoInstruction inst)
}
else
{
LDR(INDEX_UNSIGNED, scale_reg, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0 + inst.I]));
LDR(IndexType::Unsigned, scale_reg, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0 + inst.I]));
UBFM(type_reg, scale_reg, 16, 18); // Type
UBFM(scale_reg, scale_reg, 24, 29); // Scale
@ -179,7 +179,7 @@ void JitArm64::psq_st(UGeckoInstruction inst)
m_float_emit.FCVTN(32, D0, VS);
}
LDR(INDEX_UNSIGNED, scale_reg, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0 + inst.I]));
LDR(IndexType::Unsigned, scale_reg, PPC_REG, PPCSTATE_OFF(spr[SPR_GQR0 + inst.I]));
UBFM(type_reg, scale_reg, 0, 2); // Type
UBFM(scale_reg, scale_reg, 8, 13); // Scale

View File

@ -164,7 +164,7 @@ void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state)
{
ARM64Reg host_reg = reg.GetReg();
if (reg.IsDirty())
m_emit->STR(INDEX_UNSIGNED, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
m_emit->STR(IndexType::Unsigned, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
if (!maintain_state)
{
@ -176,14 +176,15 @@ void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state)
{
if (!reg.GetImm())
{
m_emit->STR(INDEX_UNSIGNED, bitsize == 64 ? ZR : WZR, PPC_REG, u32(guest_reg.ppc_offset));
m_emit->STR(IndexType::Unsigned, bitsize == 64 ? ZR : WZR, PPC_REG,
u32(guest_reg.ppc_offset));
}
else
{
ARM64Reg host_reg = bitsize != 64 ? GetReg() : EncodeRegTo64(GetReg());
m_emit->MOVI2R(host_reg, reg.GetImm());
m_emit->STR(INDEX_UNSIGNED, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
m_emit->STR(IndexType::Unsigned, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
UnlockRegister(DecodeReg(host_reg));
}
@ -210,7 +211,7 @@ void Arm64GPRCache::FlushRegisters(BitSet32 regs, bool maintain_state)
size_t ppc_offset = GetGuestByIndex(i).ppc_offset;
ARM64Reg RX1 = R(GetGuestByIndex(i));
ARM64Reg RX2 = R(GetGuestByIndex(i + 1));
m_emit->STP(INDEX_SIGNED, RX1, RX2, PPC_REG, u32(ppc_offset));
m_emit->STP(IndexType::Signed, RX1, RX2, PPC_REG, u32(ppc_offset));
if (!maintain_state)
{
UnlockRegister(DecodeReg(RX1));
@ -274,7 +275,7 @@ ARM64Reg Arm64GPRCache::R(const GuestRegInfo& guest_reg)
ARM64Reg host_reg = bitsize != 64 ? GetReg() : EncodeRegTo64(GetReg());
reg.Load(host_reg);
reg.SetDirty(false);
m_emit->LDR(INDEX_UNSIGNED, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
m_emit->LDR(IndexType::Unsigned, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
return host_reg;
}
break;
@ -307,7 +308,7 @@ void Arm64GPRCache::BindToRegister(const GuestRegInfo& guest_reg, bool do_load)
const ARM64Reg host_reg = bitsize != 64 ? GetReg() : EncodeRegTo64(GetReg());
reg.Load(host_reg);
if (do_load)
m_emit->LDR(INDEX_UNSIGNED, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
m_emit->LDR(IndexType::Unsigned, host_reg, PPC_REG, u32(guest_reg.ppc_offset));
}
}
@ -441,7 +442,7 @@ ARM64Reg Arm64FPRCache::R(size_t preg, RegType type)
// Load the high 64bits from the file and insert them in to the high 64bits of the host
// register
const ARM64Reg tmp_reg = GetReg();
m_float_emit->LDR(64, INDEX_UNSIGNED, tmp_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps1)));
m_float_emit->LDR(64, IndexType::Unsigned, tmp_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps1)));
m_float_emit->INS(64, host_reg, 1, tmp_reg, 0);
UnlockRegister(tmp_reg);
@ -494,7 +495,7 @@ ARM64Reg Arm64FPRCache::R(size_t preg, RegType type)
reg.Load(host_reg, RegType::LowerPair);
}
reg.SetDirty(false);
m_float_emit->LDR(load_size, INDEX_UNSIGNED, host_reg, PPC_REG,
m_float_emit->LDR(load_size, IndexType::Unsigned, host_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps0)));
return host_reg;
}
@ -543,7 +544,8 @@ ARM64Reg Arm64FPRCache::RW(size_t preg, RegType type)
// We are doing a full 128bit store because it takes 2 cycles on a Cortex-A57 to do a 128bit
// store.
// It would take longer to do an insert to a temporary and a 64bit store than to just do this.
m_float_emit->STR(128, INDEX_UNSIGNED, flush_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps0)));
m_float_emit->STR(128, IndexType::Unsigned, flush_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps0)));
break;
case RegType::DuplicatedSingle:
flush_reg = GetReg();
@ -551,7 +553,8 @@ ARM64Reg Arm64FPRCache::RW(size_t preg, RegType type)
[[fallthrough]];
case RegType::Duplicated:
// Store PSR1 (which is equal to PSR0) in memory.
m_float_emit->STR(64, INDEX_UNSIGNED, flush_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps1)));
m_float_emit->STR(64, IndexType::Unsigned, flush_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps1)));
break;
default:
// All other types doesn't store anything in PSR1.
@ -678,7 +681,7 @@ void Arm64FPRCache::FlushRegister(size_t preg, bool maintain_state)
if (dirty)
{
m_float_emit->STR(store_size, INDEX_UNSIGNED, host_reg, PPC_REG,
m_float_emit->STR(store_size, IndexType::Unsigned, host_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps0)));
}
@ -694,10 +697,12 @@ void Arm64FPRCache::FlushRegister(size_t preg, bool maintain_state)
{
// If the paired registers were at the start of ppcState we could do an STP here.
// Too bad moving them would break savestate compatibility between x86_64 and AArch64
// m_float_emit->STP(64, INDEX_SIGNED, host_reg, host_reg, PPC_REG,
// m_float_emit->STP(64, IndexType::Signed, host_reg, host_reg, PPC_REG,
// PPCSTATE_OFF(ps[preg].ps0));
m_float_emit->STR(64, INDEX_UNSIGNED, host_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps0)));
m_float_emit->STR(64, INDEX_UNSIGNED, host_reg, PPC_REG, u32(PPCSTATE_OFF(ps[preg].ps1)));
m_float_emit->STR(64, IndexType::Unsigned, host_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps0)));
m_float_emit->STR(64, IndexType::Unsigned, host_reg, PPC_REG,
u32(PPCSTATE_OFF(ps[preg].ps1)));
}
if (!maintain_state)

View File

@ -42,7 +42,7 @@ void JitArm64::mtmsr(UGeckoInstruction inst)
JITDISABLE(bJITSystemRegistersOff);
gpr.BindToRegister(inst.RS, true);
STR(INDEX_UNSIGNED, gpr.R(inst.RS), PPC_REG, PPCSTATE_OFF(msr));
STR(IndexType::Unsigned, gpr.R(inst.RS), PPC_REG, PPCSTATE_OFF(msr));
gpr.Flush(FlushMode::All);
fpr.Flush(FlushMode::All);
@ -60,7 +60,7 @@ void JitArm64::mfmsr(UGeckoInstruction inst)
JITDISABLE(bJITSystemRegistersOff);
gpr.BindToRegister(inst.RD, false);
LDR(INDEX_UNSIGNED, gpr.R(inst.RD), PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, gpr.R(inst.RD), PPC_REG, PPCSTATE_OFF(msr));
}
void JitArm64::mcrf(UGeckoInstruction inst)
@ -87,8 +87,8 @@ void JitArm64::mcrxr(UGeckoInstruction inst)
ARM64Reg WB = DecodeReg(XB);
// Copy XER[0-3] into CR[inst.CRFD]
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(INDEX_UNSIGNED, WB, PPC_REG, PPCSTATE_OFF(xer_so_ov));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRB(IndexType::Unsigned, WB, PPC_REG, PPCSTATE_OFF(xer_so_ov));
// [0 SO OV CA]
ADD(WA, WA, WB, ArithOption(WB, ST_LSL, 2));
@ -99,8 +99,8 @@ void JitArm64::mcrxr(UGeckoInstruction inst)
LDR(XB, XB, XA);
// Clear XER[0-3]
STRB(INDEX_UNSIGNED, WZR, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(INDEX_UNSIGNED, WZR, PPC_REG, PPCSTATE_OFF(xer_so_ov));
STRB(IndexType::Unsigned, WZR, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WZR, PPC_REG, PPCSTATE_OFF(xer_so_ov));
gpr.Unlock(WA);
}
@ -111,7 +111,7 @@ void JitArm64::mfsr(UGeckoInstruction inst)
JITDISABLE(bJITSystemRegistersOff);
gpr.BindToRegister(inst.RD, false);
LDR(INDEX_UNSIGNED, gpr.R(inst.RD), PPC_REG, PPCSTATE_OFF(sr[inst.SR]));
LDR(IndexType::Unsigned, gpr.R(inst.RD), PPC_REG, PPCSTATE_OFF(sr[inst.SR]));
}
void JitArm64::mtsr(UGeckoInstruction inst)
@ -120,7 +120,7 @@ void JitArm64::mtsr(UGeckoInstruction inst)
JITDISABLE(bJITSystemRegistersOff);
gpr.BindToRegister(inst.RS, true);
STR(INDEX_UNSIGNED, gpr.R(inst.RS), PPC_REG, PPCSTATE_OFF(sr[inst.SR]));
STR(IndexType::Unsigned, gpr.R(inst.RS), PPC_REG, PPCSTATE_OFF(sr[inst.SR]));
}
void JitArm64::mfsrin(UGeckoInstruction inst)
@ -137,7 +137,7 @@ void JitArm64::mfsrin(UGeckoInstruction inst)
UBFM(index, RB, 28, 31);
ADD(index64, PPC_REG, index64, ArithOption(index64, ST_LSL, 2));
LDR(INDEX_UNSIGNED, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
LDR(IndexType::Unsigned, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
gpr.Unlock(index);
}
@ -156,7 +156,7 @@ void JitArm64::mtsrin(UGeckoInstruction inst)
UBFM(index, RB, 28, 31);
ADD(index64, PPC_REG, index64, ArithOption(index64, ST_LSL, 2));
STR(INDEX_UNSIGNED, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
STR(IndexType::Unsigned, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
gpr.Unlock(index);
}
@ -204,9 +204,9 @@ void JitArm64::twx(UGeckoInstruction inst)
gpr.Flush(FlushMode::MaintainState);
fpr.Flush(FlushMode::MaintainState);
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORR(WA, WA, 24, 0); // Same as WA | EXCEPTION_PROGRAM
STR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA);
WriteExceptionExit(js.compilerPC);
@ -256,14 +256,14 @@ void JitArm64::mfspr(UGeckoInstruction inst)
MOVP2R(Xg, &CoreTiming::g);
LDR(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(downcount));
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(downcount));
m_float_emit.SCVTF(SC, WA);
m_float_emit.LDR(32, INDEX_UNSIGNED, SD, Xg,
m_float_emit.LDR(32, IndexType::Unsigned, SD, Xg,
offsetof(CoreTiming::Globals, last_OC_factor_inverted));
m_float_emit.FMUL(SC, SC, SD);
m_float_emit.FCVTS(Xresult, SC, RoundingMode::Z);
LDP(INDEX_SIGNED, XA, XB, Xg, offsetof(CoreTiming::Globals, global_timer));
LDP(IndexType::Signed, XA, XB, Xg, offsetof(CoreTiming::Globals, global_timer));
SXTW(XB, WB);
SUB(Xresult, XB, Xresult);
ADD(Xresult, Xresult, XA);
@ -274,7 +274,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
// into a block with only 50 downcount remaining, some games don't function correctly, such as
// Karaoke Party Revolution, which won't get past the loading screen.
LDP(INDEX_SIGNED, XA, XB, Xg, offsetof(CoreTiming::Globals, fake_TB_start_value));
LDP(IndexType::Signed, XA, XB, Xg, offsetof(CoreTiming::Globals, fake_TB_start_value));
SUB(Xresult, Xresult, XB);
// a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67
@ -283,7 +283,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
UMULH(Xresult, Xresult, XB);
ADD(Xresult, XA, Xresult, ArithOption(Xresult, ST_LSR, 3));
STR(INDEX_UNSIGNED, Xresult, PPC_REG, PPCSTATE_OFF(spr[SPR_TL]));
STR(IndexType::Unsigned, Xresult, PPC_REG, PPCSTATE_OFF(spr[SPR_TL]));
if (CanMergeNextInstructions(1))
{
@ -330,10 +330,10 @@ void JitArm64::mfspr(UGeckoInstruction inst)
gpr.BindToRegister(d, false);
ARM64Reg RD = gpr.R(d);
ARM64Reg WA = gpr.GetReg();
LDRH(INDEX_UNSIGNED, RD, PPC_REG, PPCSTATE_OFF(xer_stringctrl));
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
LDRH(IndexType::Unsigned, RD, PPC_REG, PPCSTATE_OFF(xer_stringctrl));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
ORR(RD, RD, WA, ArithOption(WA, ST_LSL, XER_CA_SHIFT));
LDRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_so_ov));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_so_ov));
ORR(RD, RD, WA, ArithOption(WA, ST_LSL, XER_OV_SHIFT));
gpr.Unlock(WA);
}
@ -344,7 +344,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
default:
gpr.BindToRegister(d, false);
ARM64Reg RD = gpr.R(d);
LDR(INDEX_UNSIGNED, RD, PPC_REG, PPCSTATE_OFF(spr) + iIndex * 4);
LDR(IndexType::Unsigned, RD, PPC_REG, PPCSTATE_OFF(spr) + iIndex * 4);
break;
}
}
@ -394,11 +394,11 @@ void JitArm64::mtspr(UGeckoInstruction inst)
ARM64Reg RD = gpr.R(inst.RD);
ARM64Reg WA = gpr.GetReg();
AND(WA, RD, 24, 30);
STRH(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_stringctrl));
STRH(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_stringctrl));
UBFM(WA, RD, XER_CA_SHIFT, XER_CA_SHIFT + 1);
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
UBFM(WA, RD, XER_OV_SHIFT, 31); // Same as WA = RD >> XER_OV_SHIFT
STRB(INDEX_UNSIGNED, WA, PPC_REG, PPCSTATE_OFF(xer_so_ov));
STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_so_ov));
gpr.Unlock(WA);
}
break;
@ -408,7 +408,7 @@ void JitArm64::mtspr(UGeckoInstruction inst)
// OK, this is easy.
ARM64Reg RD = gpr.R(inst.RD);
STR(INDEX_UNSIGNED, RD, PPC_REG, PPCSTATE_OFF(spr) + iIndex * 4);
STR(IndexType::Unsigned, RD, PPC_REG, PPCSTATE_OFF(spr) + iIndex * 4);
}
void JitArm64::crXXX(UGeckoInstruction inst)

View File

@ -35,20 +35,20 @@ void JitArm64::GenerateAsm()
// Swap the stack pointer, so we have proper guard pages.
ADD(X0, SP, 0);
MOVP2R(X1, &m_saved_stack_pointer);
STR(INDEX_UNSIGNED, X0, X1, 0);
STR(IndexType::Unsigned, X0, X1, 0);
MOVP2R(X1, &m_stack_pointer);
LDR(INDEX_UNSIGNED, X0, X1, 0);
LDR(IndexType::Unsigned, X0, X1, 0);
FixupBranch no_fake_stack = CBZ(X0);
ADD(SP, X0, 0);
SetJumpTarget(no_fake_stack);
// Push {nullptr; -1} as invalid destination on the stack.
MOVI2R(X0, 0xFFFFFFFF);
STP(INDEX_PRE, ZR, X0, SP, -16);
STP(IndexType::Pre, ZR, X0, SP, -16);
// Store the stack pointer, so we can reset it if the BLR optimization fails.
ADD(X0, SP, 0);
STR(INDEX_UNSIGNED, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer));
STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer));
// The PC will be loaded into DISPATCHER_PC after the call to CoreTiming::Advance().
// Advance() does an exception check so we don't know what PC to use until afterwards.
@ -84,7 +84,7 @@ void JitArm64::GenerateAsm()
if (assembly_dispatcher)
{
// set the mem_base based on MSR flags
LDR(INDEX_UNSIGNED, ARM64Reg::W28, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, ARM64Reg::W28, PPC_REG, PPCSTATE_OFF(msr));
FixupBranch physmem = TBNZ(ARM64Reg::W28, 31 - 27);
MOVP2R(MEM_REG, Memory::physical_base);
FixupBranch membaseend = B();
@ -105,18 +105,18 @@ void JitArm64::GenerateAsm()
// b.effectiveAddress != addr || b.msrBits != msr
ARM64Reg pc_and_msr = W25;
ARM64Reg pc_and_msr2 = W24;
LDR(INDEX_UNSIGNED, pc_and_msr, block, offsetof(JitBlockData, effectiveAddress));
LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, effectiveAddress));
CMP(pc_and_msr, DISPATCHER_PC);
FixupBranch pc_missmatch = B(CC_NEQ);
LDR(INDEX_UNSIGNED, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr));
ANDI2R(pc_and_msr2, pc_and_msr2, JitBaseBlockCache::JIT_CACHE_MSR_MASK);
LDR(INDEX_UNSIGNED, pc_and_msr, block, offsetof(JitBlockData, msrBits));
LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits));
CMP(pc_and_msr, pc_and_msr2);
FixupBranch msr_missmatch = B(CC_NEQ);
// return blocks[block_num].normalEntry;
LDR(INDEX_UNSIGNED, block, block, offsetof(JitBlockData, normalEntry));
LDR(IndexType::Unsigned, block, block, offsetof(JitBlockData, normalEntry));
BR(block);
SetJumpTarget(not_found);
SetJumpTarget(pc_missmatch);
@ -124,7 +124,7 @@ void JitArm64::GenerateAsm()
}
// Call C version of Dispatch().
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
MOVP2R(X0, this);
MOVP2R(X30, reinterpret_cast<void*>(&JitBase::Dispatch));
BLR(X30);
@ -132,7 +132,7 @@ void JitArm64::GenerateAsm()
FixupBranch no_block_available = CBZ(X0);
// set the mem_base based on MSR flags and jump to next block.
LDR(INDEX_UNSIGNED, ARM64Reg::W28, PPC_REG, PPCSTATE_OFF(msr));
LDR(IndexType::Unsigned, ARM64Reg::W28, PPC_REG, PPCSTATE_OFF(msr));
FixupBranch physmem = TBNZ(ARM64Reg::W28, 31 - 27);
MOVP2R(MEM_REG, Memory::physical_base);
BR(X0);
@ -147,19 +147,19 @@ void JitArm64::GenerateAsm()
MOV(W1, DISPATCHER_PC);
MOVP2R(X30, reinterpret_cast<void*>(&JitTrampoline));
BLR(X30);
LDR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
B(dispatcher_no_check);
SetJumpTarget(bail);
do_timing = GetCodePtr();
// Write the current PC out to PPCSTATE
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc));
// Check the state pointer to see if we are exiting
// Gets checked on at the end of every slice
MOVP2R(X0, CPU::GetStatePtr());
LDR(INDEX_UNSIGNED, W0, X0, 0);
LDR(IndexType::Unsigned, W0, X0, 0);
CMP(W0, 0);
FixupBranch Exit = B(CC_NEQ);
@ -169,7 +169,7 @@ void JitArm64::GenerateAsm()
BLR(X30);
// Load the PC back into DISPATCHER_PC (the exception handler might have changed it)
LDR(INDEX_UNSIGNED, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc));
// We can safely assume that downcount >= 1
B(dispatcher_no_check);
@ -178,7 +178,7 @@ void JitArm64::GenerateAsm()
// Reset the stack pointer, as the BLR optimization have touched it.
MOVP2R(X1, &m_saved_stack_pointer);
LDR(INDEX_UNSIGNED, X0, X1, 0);
LDR(IndexType::Unsigned, X0, X1, 0);
ADD(SP, X0, 0);
m_float_emit.ABI_PopRegisters(regs_to_save_fpr, X30);
@ -218,28 +218,28 @@ void JitArm64::GenerateCommonAsm()
const u8* loadPairedU8Two = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(16, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.UXTL(8, D0, D0);
float_emit.UXTL(16, D0, D0);
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
const u8* loadPairedS8Two = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(16, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.SXTL(8, D0, D0);
float_emit.SXTL(16, D0, D0);
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
@ -253,7 +253,7 @@ void JitArm64::GenerateCommonAsm()
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
@ -267,7 +267,7 @@ void JitArm64::GenerateCommonAsm()
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
@ -275,63 +275,63 @@ void JitArm64::GenerateCommonAsm()
const u8* loadPairedFloatOne = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(32, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.REV32(8, D0, D0);
RET(X30);
}
const u8* loadPairedU8One = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(8, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(8, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.UXTL(8, D0, D0);
float_emit.UXTL(16, D0, D0);
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
const u8* loadPairedS8One = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(8, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(8, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.SXTL(8, D0, D0);
float_emit.SXTL(16, D0, D0);
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
const u8* loadPairedU16One = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(16, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.REV16(8, D0, D0);
float_emit.UXTL(16, D0, D0);
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
const u8* loadPairedS16One = GetCodePtr();
{
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.LDR(16, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0);
float_emit.REV16(8, D0, D0);
float_emit.SXTL(16, D0, D0);
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
}
@ -388,7 +388,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
float_emit.FCVTZU(32, D0, D0);
@ -415,7 +415,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
float_emit.FCVTZS(32, D0, D0);
@ -443,7 +443,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
float_emit.FCVTZU(32, D0, D0);
@ -470,7 +470,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
float_emit.FCVTZS(32, D0, D0);
@ -498,7 +498,7 @@ void JitArm64::GenerateCommonAsm()
storeSingleFloat = GetCodePtr();
float_emit.REV32(8, D0, D0);
ADD(addr_reg, addr_reg, MEM_REG);
float_emit.STR(32, INDEX_UNSIGNED, D0, addr_reg, 0);
float_emit.STR(32, IndexType::Unsigned, D0, addr_reg, 0);
RET(X30);
storeSingleFloatSlow = GetCodePtr();
@ -512,7 +512,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
float_emit.FCVTZU(32, D0, D0);
@ -538,7 +538,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
float_emit.FCVTZS(32, D0, D0);
@ -564,7 +564,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
float_emit.FCVTZU(32, D0, D0);
@ -590,7 +590,7 @@ void JitArm64::GenerateCommonAsm()
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
float_emit.LDR(32, INDEX_UNSIGNED, D1, scale_reg, 0);
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
float_emit.FCVTZS(32, D0, D0);

View File

@ -39,13 +39,13 @@ private:
switch (sbits)
{
case 8:
m_emit->STRB(INDEX_UNSIGNED, reg, X0, 0);
m_emit->STRB(IndexType::Unsigned, reg, X0, 0);
break;
case 16:
m_emit->STRH(INDEX_UNSIGNED, reg, X0, 0);
m_emit->STRH(IndexType::Unsigned, reg, X0, 0);
break;
case 32:
m_emit->STR(INDEX_UNSIGNED, reg, X0, 0);
m_emit->STR(IndexType::Unsigned, reg, X0, 0);
break;
default:
ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOWriteCodeGenerator!", sbits);
@ -127,18 +127,18 @@ private:
{
case 8:
if (m_sign_extend && !dont_extend)
m_emit->LDRSB(INDEX_UNSIGNED, m_dst_reg, X0, 0);
m_emit->LDRSB(IndexType::Unsigned, m_dst_reg, X0, 0);
else
m_emit->LDRB(INDEX_UNSIGNED, m_dst_reg, X0, 0);
m_emit->LDRB(IndexType::Unsigned, m_dst_reg, X0, 0);
break;
case 16:
if (m_sign_extend && !dont_extend)
m_emit->LDRSH(INDEX_UNSIGNED, m_dst_reg, X0, 0);
m_emit->LDRSH(IndexType::Unsigned, m_dst_reg, X0, 0);
else
m_emit->LDRH(INDEX_UNSIGNED, m_dst_reg, X0, 0);
m_emit->LDRH(IndexType::Unsigned, m_dst_reg, X0, 0);
break;
case 32:
m_emit->LDR(INDEX_UNSIGNED, m_dst_reg, X0, 0);
m_emit->LDR(IndexType::Unsigned, m_dst_reg, X0, 0);
break;
default:
ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOReadCodeGenerator!", sbits);

View File

@ -53,12 +53,12 @@ void VertexLoaderARM64::GetVertexAddr(int array, u64 attribute, ARM64Reg reg)
{
if (m_src_ofs < 4096)
{
LDRB(INDEX_UNSIGNED, scratch1_reg, src_reg, m_src_ofs);
LDRB(IndexType::Unsigned, scratch1_reg, src_reg, m_src_ofs);
}
else
{
ADD(reg, src_reg, m_src_ofs);
LDRB(INDEX_UNSIGNED, scratch1_reg, reg, 0);
LDRB(IndexType::Unsigned, scratch1_reg, reg, 0);
}
m_src_ofs += 1;
}
@ -70,12 +70,12 @@ void VertexLoaderARM64::GetVertexAddr(int array, u64 attribute, ARM64Reg reg)
}
else if (m_src_ofs <= 8190 && !(m_src_ofs & 1))
{
LDRH(INDEX_UNSIGNED, scratch1_reg, src_reg, m_src_ofs);
LDRH(IndexType::Unsigned, scratch1_reg, src_reg, m_src_ofs);
}
else
{
ADD(reg, src_reg, m_src_ofs);
LDRH(INDEX_UNSIGNED, scratch1_reg, reg, 0);
LDRH(IndexType::Unsigned, scratch1_reg, reg, 0);
}
m_src_ofs += 2;
REV16(scratch1_reg, scratch1_reg);
@ -87,10 +87,10 @@ void VertexLoaderARM64::GetVertexAddr(int array, u64 attribute, ARM64Reg reg)
m_skip_vertex = CBZ(scratch2_reg);
}
LDR(INDEX_UNSIGNED, scratch2_reg, stride_reg, array * 4);
LDR(IndexType::Unsigned, scratch2_reg, stride_reg, array * 4);
MUL(scratch1_reg, scratch1_reg, scratch2_reg);
LDR(INDEX_UNSIGNED, EncodeRegTo64(scratch2_reg), arraybase_reg, array * 8);
LDR(IndexType::Unsigned, EncodeRegTo64(scratch2_reg), arraybase_reg, array * 8);
ADD(EncodeRegTo64(reg), EncodeRegTo64(scratch1_reg), EncodeRegTo64(scratch2_reg));
}
else
@ -123,7 +123,7 @@ int VertexLoaderARM64::ReadVertex(u64 attribute, int format, int count_in, int c
if (offset == -1)
{
if (count_in == 1)
m_float_emit.LDR(elem_size, INDEX_UNSIGNED, coords, EncodeRegTo64(scratch1_reg), 0);
m_float_emit.LDR(elem_size, IndexType::Unsigned, coords, EncodeRegTo64(scratch1_reg), 0);
else
m_float_emit.LD1(elem_size, 1, coords, EncodeRegTo64(scratch1_reg));
}
@ -133,7 +133,7 @@ int VertexLoaderARM64::ReadVertex(u64 attribute, int format, int count_in, int c
}
else
{
m_float_emit.LDR(load_size, INDEX_UNSIGNED, coords, src_reg, offset);
m_float_emit.LDR(load_size, IndexType::Unsigned, coords, src_reg, offset);
}
if (format != FORMAT_FLOAT)
@ -163,7 +163,7 @@ int VertexLoaderARM64::ReadVertex(u64 attribute, int format, int count_in, int c
if (dequantize && scaling_exponent)
{
m_float_emit.LDR(32, INDEX_UNSIGNED, scale, scale_reg, scaling_exponent * 4);
m_float_emit.LDR(32, IndexType::Unsigned, scale, scale_reg, scaling_exponent * 4);
m_float_emit.FMUL(32, coords, coords, scale, 0);
}
}
@ -180,7 +180,7 @@ int VertexLoaderARM64::ReadVertex(u64 attribute, int format, int count_in, int c
}
else if (!(m_dst_ofs & mask))
{
m_float_emit.STR(write_size, INDEX_UNSIGNED, coords, dst_reg, m_dst_ofs);
m_float_emit.STR(write_size, IndexType::Unsigned, coords, dst_reg, m_dst_ofs);
}
else
{
@ -222,15 +222,15 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
case FORMAT_32B_888x:
case FORMAT_32B_8888:
if (offset == -1)
LDR(INDEX_UNSIGNED, scratch2_reg, EncodeRegTo64(scratch1_reg), 0);
LDR(IndexType::Unsigned, scratch2_reg, EncodeRegTo64(scratch1_reg), 0);
else if (offset & 3) // Not aligned - unscaled
LDUR(scratch2_reg, src_reg, offset);
else
LDR(INDEX_UNSIGNED, scratch2_reg, src_reg, offset);
LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset);
if (format != FORMAT_32B_8888)
ORRI2R(scratch2_reg, scratch2_reg, 0xFF000000);
STR(INDEX_UNSIGNED, scratch2_reg, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs);
load_bytes = 3 + (format != FORMAT_24B_888);
break;
@ -238,11 +238,11 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// RRRRRGGG GGGBBBBB
// AAAAAAAA BBBBBBBB GGGGGGGG RRRRRRRR
if (offset == -1)
LDRH(INDEX_UNSIGNED, scratch3_reg, EncodeRegTo64(scratch1_reg), 0);
LDRH(IndexType::Unsigned, scratch3_reg, EncodeRegTo64(scratch1_reg), 0);
else if (offset & 1) // Not aligned - unscaled
LDURH(scratch3_reg, src_reg, offset);
else
LDRH(INDEX_UNSIGNED, scratch3_reg, src_reg, offset);
LDRH(IndexType::Unsigned, scratch3_reg, src_reg, offset);
REV16(scratch3_reg, scratch3_reg);
@ -266,7 +266,7 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// A
ORRI2R(scratch1_reg, scratch1_reg, 0xFF000000);
STR(INDEX_UNSIGNED, scratch1_reg, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
load_bytes = 2;
break;
@ -275,11 +275,11 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// REV16 - RRRRGGGG BBBBAAAA
// AAAAAAAA BBBBBBBB GGGGGGGG RRRRRRRR
if (offset == -1)
LDRH(INDEX_UNSIGNED, scratch3_reg, EncodeRegTo64(scratch1_reg), 0);
LDRH(IndexType::Unsigned, scratch3_reg, EncodeRegTo64(scratch1_reg), 0);
else if (offset & 1) // Not aligned - unscaled
LDURH(scratch3_reg, src_reg, offset);
else
LDRH(INDEX_UNSIGNED, scratch3_reg, src_reg, offset);
LDRH(IndexType::Unsigned, scratch3_reg, src_reg, offset);
// R
UBFM(scratch1_reg, scratch3_reg, 4, 7);
@ -299,7 +299,7 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// Final duplication
ORR(scratch1_reg, scratch1_reg, scratch1_reg, ArithOption(scratch1_reg, ST_LSL, 4));
STR(INDEX_UNSIGNED, scratch1_reg, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
load_bytes = 2;
break;
@ -316,7 +316,7 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
if (offset & 3) // Not aligned - unscaled
LDUR(scratch3_reg, src_reg, offset);
else
LDR(INDEX_UNSIGNED, scratch3_reg, src_reg, offset);
LDR(IndexType::Unsigned, scratch3_reg, src_reg, offset);
}
REV32(scratch3_reg, scratch3_reg);
@ -344,7 +344,7 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 4));
STR(INDEX_UNSIGNED, scratch1_reg, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
load_bytes = 3;
break;
@ -401,15 +401,15 @@ void VertexLoaderARM64::GenerateVertexLoader()
if (m_VtxDesc.PosMatIdx)
{
LDRB(INDEX_UNSIGNED, scratch1_reg, src_reg, m_src_ofs);
LDRB(IndexType::Unsigned, scratch1_reg, src_reg, m_src_ofs);
AND(scratch1_reg, scratch1_reg, 0, 5);
STR(INDEX_UNSIGNED, scratch1_reg, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
// Z-Freeze
CMP(count_reg, 3);
FixupBranch dont_store = B(CC_GT);
MOVP2R(EncodeRegTo64(scratch2_reg), VertexLoaderManager::position_matrix_index);
STR(INDEX_UNSIGNED, scratch1_reg, EncodeRegTo64(scratch2_reg), 0);
STR(IndexType::Unsigned, scratch1_reg, EncodeRegTo64(scratch2_reg), 0);
SetJumpTarget(dont_store);
m_native_components |= VB_HAS_POSMTXIDX;
@ -544,12 +544,12 @@ void VertexLoaderARM64::GenerateVertexLoader()
m_native_vtx_decl.texcoords[i].type = VAR_FLOAT;
m_native_vtx_decl.texcoords[i].integer = false;
LDRB(INDEX_UNSIGNED, scratch2_reg, src_reg, texmatidx_ofs[i]);
LDRB(IndexType::Unsigned, scratch2_reg, src_reg, texmatidx_ofs[i]);
m_float_emit.UCVTF(S31, scratch2_reg);
if (tc[i])
{
m_float_emit.STR(32, INDEX_UNSIGNED, D31, dst_reg, m_dst_ofs);
m_float_emit.STR(32, IndexType::Unsigned, D31, dst_reg, m_dst_ofs);
m_dst_ofs += sizeof(float);
}
else
@ -565,14 +565,14 @@ void VertexLoaderARM64::GenerateVertexLoader()
// If m_dst_ofs isn't 8byte aligned we can't store an 8byte zero register
// So store two 4byte zero registers
// The destination is always 4byte aligned
STR(INDEX_UNSIGNED, WSP, dst_reg, m_dst_ofs);
STR(INDEX_UNSIGNED, WSP, dst_reg, m_dst_ofs + 4);
STR(IndexType::Unsigned, WSP, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, WSP, dst_reg, m_dst_ofs + 4);
}
else
{
STR(INDEX_UNSIGNED, SP, dst_reg, m_dst_ofs);
STR(IndexType::Unsigned, SP, dst_reg, m_dst_ofs);
}
m_float_emit.STR(32, INDEX_UNSIGNED, D31, dst_reg, m_dst_ofs + 8);
m_float_emit.STR(32, IndexType::Unsigned, D31, dst_reg, m_dst_ofs + 8);
m_dst_ofs += sizeof(float) * 3;
}