Make immediates more explicit

Instead of just casting OpArg::offset when needed, add some
accessor functions.

Also add some safety asserts to catch any mistakes.
This commit is contained in:
Scott Mansell 2015-03-16 22:58:40 +13:00
parent 4b7748f3c0
commit 6262a9bcbe
7 changed files with 72 additions and 59 deletions

View File

@ -149,9 +149,19 @@ struct OpArg
void WriteFloatModRM(XEmitter *emit, FloatOp op);
void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits);
// This one is public - must be written to
u64 offset; // use RIP-relative as much as possible - 64-bit immediates are not available.
u64 offset; // use RIP-relative as much as possible - Also used to store immediates.
u16 operandReg;
u64 Imm64() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM64); return (u64)offset; }
u32 Imm32() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM32); return (u32)offset; }
u16 Imm16() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM16); return (u16)offset; }
u8 Imm8() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM8); return (u8)offset; }
s64 SImm64() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM64); return (s64)offset; }
s32 SImm32() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM32); return (s32)offset; }
s16 SImm16() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM16); return (s16)offset; }
s8 SImm8() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM8); return (s8)offset; }
void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const;
bool IsImm() const {return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64;}
bool IsSimpleReg() const {return scale == SCALE_NONE;}

View File

@ -903,19 +903,19 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg)
switch (regs[dreg].size)
{
case 2:
emitter.MOV(16, reg, Imm16((u16) arg.offset));
emitter.MOV(16, reg, Imm16(arg.Imm16()));
break;
case 4:
emitter.MOV(32, reg, Imm32((u32) arg.offset));
emitter.MOV(32, reg, Imm32(arg.Imm32()));
break;
case 8:
if ((u32) arg.offset == arg.offset)
if ((u32)arg.Imm64() == arg.Imm64())
{
emitter.MOV(64, reg, Imm32((u32) arg.offset));
emitter.MOV(64, reg, Imm32((u32) arg.Imm64()));
}
else
{
emitter.MOV(64, reg, Imm64(arg.offset));
emitter.MOV(64, reg, Imm64(arg.Imm64()));
}
break;
default:

View File

@ -133,7 +133,7 @@ void Jit64::ComputeRC(const Gen::OpArg & arg, bool needs_test, bool needs_sext)
_assert_msg_(DYNA_REC, arg.IsSimpleReg() || arg.IsImm(), "Invalid ComputeRC operand");
if (arg.IsImm())
{
MOV(64, PPCSTATE(cr_val[0]), Imm32((s32)arg.offset));
MOV(64, PPCSTATE(cr_val[0]), Imm32(arg.SImm32()));
}
else if (needs_sext)
{
@ -148,7 +148,7 @@ void Jit64::ComputeRC(const Gen::OpArg & arg, bool needs_test, bool needs_sext)
{
if (arg.IsImm())
{
DoMergedBranchImmediate((s32)arg.offset);
DoMergedBranchImmediate(arg.SImm32());
}
else
{
@ -225,7 +225,7 @@ void Jit64::regimmop(int d, int a, bool binary, u32 value, Operation doop, void
carry &= js.op->wantsCA;
if (gpr.R(a).IsImm() && !carry)
{
gpr.SetImmediate32(d, doop((u32)gpr.R(a).offset, value));
gpr.SetImmediate32(d, doop(gpr.R(a).Imm32(), value));
}
else if (a == d)
{
@ -274,7 +274,7 @@ void Jit64::reg_imm(UGeckoInstruction inst)
// occasionally used as MOV - emulate, with immediate propagation
if (gpr.R(a).IsImm() && d != a && a != 0)
{
gpr.SetImmediate32(d, (u32)gpr.R(a).offset + (u32)(s32)(s16)inst.SIMM_16);
gpr.SetImmediate32(d, gpr.R(a).Imm32() + (u32)(s32)inst.SIMM_16);
}
else if (inst.SIMM_16 == 0 && d != a && a != 0)
{
@ -498,8 +498,8 @@ void Jit64::cmpXX(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && comparand.IsImm())
{
// Both registers contain immediate values, so we can pre-compile the compare result
s64 compareResult = signedCompare ? (s64)(s32)gpr.R(a).offset - (s64)(s32)comparand.offset :
(u64)(u32)gpr.R(a).offset - (u64)(u32)comparand.offset;
s64 compareResult = signedCompare ? (s64)gpr.R(a).SImm32() - (s64)comparand.SImm32() :
(u64)gpr.R(a).Imm32() - (u64)comparand.Imm32();
if (compareResult == (s32)compareResult)
{
MOV(64, PPCSTATE(cr_val[crf]), Imm32((u32)compareResult));
@ -519,7 +519,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
if (signedCompare)
{
if (gpr.R(a).IsImm())
MOV(64, R(input), Imm32((s32)gpr.R(a).offset));
MOV(64, R(input), Imm32(gpr.R(a).SImm32()));
else
MOVSX(64, 32, input, gpr.R(a));
@ -533,9 +533,9 @@ void Jit64::cmpXX(UGeckoInstruction inst)
{
if (gpr.R(a).IsImm())
{
MOV(32, R(input), Imm32((u32)gpr.R(a).offset));
MOV(32, R(input), Imm32(gpr.R(a).Imm32()));
}
else if (comparand.IsImm() && !comparand.offset)
else if (comparand.IsImm() && !comparand.Imm32())
{
gpr.BindToRegister(a, true, false);
input = gpr.RX(a);
@ -548,7 +548,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
if (comparand.IsImm())
{
// sign extension will ruin this, so store it in a register
if (comparand.offset & 0x80000000U)
if (comparand.Imm32() & 0x80000000U)
{
MOV(32, R(RSCRATCH2), comparand);
comparand = R(RSCRATCH2);
@ -560,7 +560,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
comparand = gpr.R(b);
}
}
if (comparand.IsImm() && !comparand.offset)
if (comparand.IsImm() && !comparand.Imm32())
{
MOV(64, PPCSTATE(cr_val[crf]), R(input));
// Place the comparison next to the branch for macro-op fusion
@ -590,8 +590,8 @@ void Jit64::boolX(UGeckoInstruction inst)
if (gpr.R(s).IsImm() && gpr.R(b).IsImm())
{
const u32 rs_offset = static_cast<u32>(gpr.R(s).offset);
const u32 rb_offset = static_cast<u32>(gpr.R(b).offset);
const u32 rs_offset = gpr.R(s).Imm32();
const u32 rb_offset = gpr.R(b).Imm32();
if (inst.SUBOP10 == 28) // andx
gpr.SetImmediate32(a, rs_offset & rb_offset);
@ -797,7 +797,7 @@ void Jit64::extsXx(UGeckoInstruction inst)
if (gpr.R(s).IsImm())
{
gpr.SetImmediate32(a, (u32)(s32)(size == 16 ? (s16)gpr.R(s).offset : (s8)gpr.R(s).offset));
gpr.SetImmediate32(a, (u32)(s32)(size == 16 ? (s16)gpr.R(s).Imm32() : (s8)gpr.R(s).Imm32()));
}
else
{
@ -860,7 +860,7 @@ void Jit64::subfx(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
s32 i = (s32)gpr.R(b).offset, j = (s32)gpr.R(a).offset;
s32 i = gpr.R(b).SImm32(), j = gpr.R(a).SImm32();
gpr.SetImmediate32(d, i - j);
if (inst.OE)
GenerateConstantOverflow((s64)i - (s64)j);
@ -959,7 +959,7 @@ void Jit64::mulli(UGeckoInstruction inst)
if (gpr.R(a).IsImm())
{
gpr.SetImmediate32(d, (u32)gpr.R(a).offset * imm);
gpr.SetImmediate32(d, gpr.R(a).Imm32() * imm);
}
else
{
@ -978,7 +978,7 @@ void Jit64::mullwx(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
s32 i = (s32)gpr.R(a).offset, j = (s32)gpr.R(b).offset;
s32 i = gpr.R(a).SImm32(), j = gpr.R(b).SImm32();
gpr.SetImmediate32(d, i * j);
if (inst.OE)
GenerateConstantOverflow((s64)i * (s64)j);
@ -989,7 +989,7 @@ void Jit64::mullwx(UGeckoInstruction inst)
gpr.BindToRegister(d, (d == a || d == b), true);
if (gpr.R(a).IsImm() || gpr.R(b).IsImm())
{
u32 imm = gpr.R(a).IsImm() ? (u32)gpr.R(a).offset : (u32)gpr.R(b).offset;
u32 imm = gpr.R(a).IsImm() ? gpr.R(a).Imm32() : gpr.R(b).Imm32();
int src = gpr.R(a).IsImm() ? b : a;
MultiplyImmediate(imm, src, d, inst.OE);
}
@ -1024,9 +1024,9 @@ void Jit64::mulhwXx(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
if (sign)
gpr.SetImmediate32(d, (u32)((u64)(((s64)(s32)gpr.R(a).offset * (s64)(s32)gpr.R(b).offset)) >> 32));
gpr.SetImmediate32(d, (u32)((u64)(((s64)gpr.R(a).SImm32() * (s64)gpr.R(b).SImm32())) >> 32));
else
gpr.SetImmediate32(d, (u32)((gpr.R(a).offset * gpr.R(b).offset) >> 32));
gpr.SetImmediate32(d, (u32)(((u64)gpr.R(a).Imm32() * (u64)gpr.R(b).Imm32()) >> 32));
}
else if (sign)
{
@ -1066,7 +1066,7 @@ void Jit64::divwux(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
if (gpr.R(b).offset == 0)
if (gpr.R(b).Imm32() == 0)
{
gpr.SetImmediate32(d, 0);
if (inst.OE)
@ -1074,14 +1074,14 @@ void Jit64::divwux(UGeckoInstruction inst)
}
else
{
gpr.SetImmediate32(d, (u32)gpr.R(a).offset / (u32)gpr.R(b).offset);
gpr.SetImmediate32(d, gpr.R(a).Imm32() / gpr.R(b).Imm32());
if (inst.OE)
GenerateConstantOverflow(false);
}
}
else if (gpr.R(b).IsImm())
{
u32 divisor = (u32)gpr.R(b).offset;
u32 divisor = gpr.R(b).Imm32();
if (divisor == 0)
{
gpr.SetImmediate32(d, 0);
@ -1186,7 +1186,7 @@ void Jit64::divwx(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
s32 i = (s32)gpr.R(a).offset, j = (s32)gpr.R(b).offset;
s32 i = gpr.R(a).SImm32(), j = gpr.R(b).SImm32();
if (j == 0 || (i == (s32)0x80000000 && j == -1))
{
gpr.SetImmediate32(d, (i >> 31) ^ j);
@ -1255,7 +1255,7 @@ void Jit64::addx(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(b).IsImm())
{
s32 i = (s32)gpr.R(a).offset, j = (s32)gpr.R(b).offset;
s32 i = gpr.R(a).SImm32(), j = gpr.R(b).SImm32();
gpr.SetImmediate32(d, i + j);
if (inst.OE)
GenerateConstantOverflow((s64)i + (s64)j);
@ -1338,7 +1338,7 @@ void Jit64::arithXex(UGeckoInstruction inst)
// if the source is an immediate, we can invert carry by going from add -> sub and doing src = -1 - src
if (js.carryFlagInverted && source.IsImm())
{
source.offset = -1 - (s32)source.offset;
source = Imm32(-1 - source.SImm32());
SBB(32, gpr.R(d), source);
invertedCarry = true;
}
@ -1403,7 +1403,7 @@ void Jit64::rlwinmx(UGeckoInstruction inst)
if (gpr.R(s).IsImm())
{
u32 result = (int)gpr.R(s).offset;
u32 result = gpr.R(s).Imm32();
if (inst.SH != 0)
result = _rotl(result, inst.SH);
result &= Helper_Mask(inst.MB, inst.ME);
@ -1491,7 +1491,7 @@ void Jit64::rlwimix(UGeckoInstruction inst)
if (gpr.R(a).IsImm() && gpr.R(s).IsImm())
{
u32 mask = Helper_Mask(inst.MB,inst.ME);
gpr.SetImmediate32(a, ((u32)gpr.R(a).offset & ~mask) | (_rotl((u32)gpr.R(s).offset,inst.SH) & mask));
gpr.SetImmediate32(a, (gpr.R(a).Imm32() & ~mask) | (_rotl(gpr.R(s).Imm32(),inst.SH) & mask));
if (inst.Rc)
ComputeRC(gpr.R(a));
}
@ -1517,7 +1517,7 @@ void Jit64::rlwimix(UGeckoInstruction inst)
{
gpr.BindToRegister(a, true, true);
AndWithMask(gpr.RX(a), ~mask);
OR(32, gpr.R(a), Imm32(_rotl((u32)gpr.R(s).offset, inst.SH) & mask));
OR(32, gpr.R(a), Imm32(_rotl(gpr.R(s).Imm32(), inst.SH) & mask));
}
else if (inst.SH)
{
@ -1525,7 +1525,7 @@ void Jit64::rlwimix(UGeckoInstruction inst)
bool isRightShift = mask == (1U << inst.SH) - 1;
if (gpr.R(a).IsImm())
{
u32 maskA = (u32)gpr.R(a).offset & ~mask;
u32 maskA = gpr.R(a).Imm32() & ~mask;
gpr.BindToRegister(a, false, true);
MOV(32, gpr.R(a), gpr.R(s));
if (isLeftShift)
@ -1591,7 +1591,7 @@ void Jit64::rlwnmx(UGeckoInstruction inst)
u32 mask = Helper_Mask(inst.MB, inst.ME);
if (gpr.R(b).IsImm() && gpr.R(s).IsImm())
{
gpr.SetImmediate32(a, _rotl((u32)gpr.R(s).offset, (u32)gpr.R(b).offset & 0x1F) & mask);
gpr.SetImmediate32(a, _rotl(gpr.R(s).Imm32(), gpr.R(b).Imm32() & 0x1F) & mask);
}
else
{
@ -1626,9 +1626,9 @@ void Jit64::negx(UGeckoInstruction inst)
if (gpr.R(a).IsImm())
{
gpr.SetImmediate32(d, ~((u32)gpr.R(a).offset) + 1);
gpr.SetImmediate32(d, ~(gpr.R(a).Imm32()) + 1);
if (inst.OE)
GenerateConstantOverflow(gpr.R(d).offset == 0x80000000);
GenerateConstantOverflow(gpr.R(d).Imm32() == 0x80000000);
}
else
{
@ -1655,8 +1655,8 @@ void Jit64::srwx(UGeckoInstruction inst)
if (gpr.R(b).IsImm() && gpr.R(s).IsImm())
{
u32 amount = (u32)gpr.R(b).offset;
gpr.SetImmediate32(a, (amount & 0x20) ? 0 : ((u32)gpr.R(s).offset >> (amount & 0x1f)));
u32 amount = gpr.R(b).Imm32();
gpr.SetImmediate32(a, (amount & 0x20) ? 0 : (gpr.R(s).Imm32() >> (amount & 0x1f)));
}
else
{
@ -1688,8 +1688,8 @@ void Jit64::slwx(UGeckoInstruction inst)
if (gpr.R(b).IsImm() && gpr.R(s).IsImm())
{
u32 amount = (u32)gpr.R(b).offset;
gpr.SetImmediate32(a, (amount & 0x20) ? 0 : (u32)gpr.R(s).offset << (amount & 0x1f));
u32 amount = gpr.R(b).Imm32();
gpr.SetImmediate32(a, (amount & 0x20) ? 0 : gpr.R(s).Imm32() << (amount & 0x1f));
if (inst.Rc)
ComputeRC(gpr.R(a));
}
@ -1820,7 +1820,7 @@ void Jit64::cntlzwx(UGeckoInstruction inst)
u32 i = 0;
for (; i < 32; i++, mask >>= 1)
{
if ((u32)gpr.R(s).offset & mask)
if (gpr.R(s).Imm32() & mask)
break;
}
gpr.SetImmediate32(a, i);

View File

@ -154,7 +154,7 @@ void Jit64::lXXx(UGeckoInstruction inst)
// Determine whether this instruction updates inst.RA
bool update;
if (inst.OPCD == 31)
update = ((inst.SUBOP10 & 0x20) != 0) && (!gpr.R(b).IsImm() || gpr.R(b).offset != 0);
update = ((inst.SUBOP10 & 0x20) != 0) && (!gpr.R(b).IsImm() || gpr.R(b).Imm32() != 0);
else
update = ((inst.OPCD & 1) != 0) && inst.SIMM_16 != 0;
@ -184,14 +184,14 @@ void Jit64::lXXx(UGeckoInstruction inst)
{
if ((inst.OPCD != 31) && gpr.R(a).IsImm() && !js.memcheck)
{
u32 val = (u32)gpr.R(a).offset + (s32)inst.SIMM_16;
u32 val = gpr.R(a).Imm32() + inst.SIMM_16;
opAddress = Imm32(val);
if (update)
gpr.SetImmediate32(a, val);
}
else if ((inst.OPCD == 31) && gpr.R(a).IsImm() && gpr.R(b).IsImm() && !js.memcheck)
{
u32 val = (u32)gpr.R(a).offset + (u32)gpr.R(b).offset;
u32 val = gpr.R(a).Imm32() + gpr.R(b).Imm32();
opAddress = Imm32(val);
if (update)
gpr.SetImmediate32(a, val);
@ -200,7 +200,10 @@ void Jit64::lXXx(UGeckoInstruction inst)
{
// If we're using reg+reg mode and b is an immediate, pretend we're using constant offset mode
bool use_constant_offset = inst.OPCD != 31 || gpr.R(b).IsImm();
s32 offset = inst.OPCD == 31 ? (s32)gpr.R(b).offset : (s32)inst.SIMM_16;
s32 offset;
if (use_constant_offset)
offset = inst.OPCD == 31 ? gpr.R(b).SImm32() : (s32)inst.SIMM_16;
// Depending on whether we have an immediate and/or update, find the optimum way to calculate
// the load address.
if ((update || use_constant_offset) && !js.memcheck)
@ -385,7 +388,7 @@ void Jit64::stX(UGeckoInstruction inst)
// If we already know the address of the write
if (!a || gpr.R(a).IsImm())
{
u32 addr = (a ? (u32)gpr.R(a).offset : 0) + offset;
u32 addr = (a ? gpr.R(a).Imm32() : 0) + offset;
bool exception = WriteToConstAddress(accessSize, gpr.R(s), addr, CallerSavedRegistersInUse());
if (update)
{

View File

@ -133,7 +133,7 @@ void Jit64::stfXXX(UGeckoInstruction inst)
if (!indexed && (!a || gpr.R(a).IsImm()))
{
u32 addr = (a ? (u32)gpr.R(a).offset : 0) + imm;
u32 addr = (a ? gpr.R(a).Imm32() : 0) + imm;
bool exception = WriteToConstAddress(accessSize, R(RSCRATCH), addr, CallerSavedRegistersInUse());
if (update)

View File

@ -431,7 +431,7 @@ void Jit64::mtcrf(UGeckoInstruction inst)
{
if ((crm & (0x80 >> i)) != 0)
{
u8 newcr = (gpr.R(inst.RS).offset >> (28 - (i * 4))) & 0xF;
u8 newcr = (gpr.R(inst.RS).Imm32() >> (28 - (i * 4))) & 0xF;
u64 newcrval = PPCCRToInternal(newcr);
if ((s64)newcrval == (s32)newcrval)
{

View File

@ -74,7 +74,7 @@ u8 *EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, OpArg opAddress, int accessS
}
else if (opAddress.IsImm())
{
MOV(32, R(reg_value), Imm32((u32)(opAddress.offset + offset)));
MOV(32, R(reg_value), Imm32((u32)(opAddress.Imm32() + offset)));
memOperand = MRegSum(RMEM, reg_value);
}
else
@ -298,7 +298,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
if (opAddress.IsImm())
{
u32 address = (u32)opAddress.offset + offset;
u32 address = opAddress.Imm32() + offset;
// If the address is known to be RAM, just load it directly.
if (PowerPC::IsOptimizableRAMAddress(address))
@ -398,11 +398,11 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
static OpArg SwapImmediate(int accessSize, OpArg reg_value)
{
if (accessSize == 32)
return Imm32(Common::swap32((u32)reg_value.offset));
return Imm32(Common::swap32(reg_value.Imm32()));
else if (accessSize == 16)
return Imm16(Common::swap16((u16)reg_value.offset));
return Imm16(Common::swap16(reg_value.Imm16()));
else
return Imm8((u8)reg_value.offset);
return Imm8(reg_value.Imm8());
}
u8 *EmuCodeBlock::UnsafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int accessSize, s32 offset, bool swap)
@ -441,9 +441,9 @@ static OpArg FixImmediate(int accessSize, OpArg arg)
{
if (arg.IsImm())
{
arg = accessSize == 8 ? Imm8((u8)arg.offset) :
accessSize == 16 ? Imm16((u16)arg.offset) :
Imm32((u32)arg.offset);
arg = accessSize == 8 ? Imm8((u8)arg.Imm32()) :
accessSize == 16 ? Imm16((u16)arg.Imm32()) :
Imm32((u32)arg.Imm32());
}
return arg;
}