diff --git a/src/Xenia.Debug/Function.cs b/src/Xenia.Debug/Function.cs index a597233bc..692530e4b 100644 --- a/src/Xenia.Debug/Function.cs +++ b/src/Xenia.Debug/Function.cs @@ -123,7 +123,6 @@ namespace Xenia.Debug { private void DisassembleX64() { var str = disassembler.GenerateString(IntPtr.Zero, 0); System.Diagnostics.Debug.WriteLine(str); - disassembler.Dispose(); } } } diff --git a/src/xenia/cpu/frontend/ppc_emit_altivec.cc b/src/xenia/cpu/frontend/ppc_emit_altivec.cc index f5c6fef73..4aee9cda7 100644 --- a/src/xenia/cpu/frontend/ppc_emit_altivec.cc +++ b/src/xenia/cpu/frontend/ppc_emit_altivec.cc @@ -85,14 +85,16 @@ XEEMITTER(dss, 0x7C00066C, XDSS)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(lvebx, 0x7C00000E, X)(PPCHIRBuilder& f, InstrData& i) { // Same as lvx. - Value* ea = f.And(CalculateEA_0(f, i.X.RA, i.X.RB), f.LoadConstant(~0xFull)); + Value* ea = + f.And(CalculateEA_0(f, i.X.RA, i.X.RB), f.LoadConstantUint64(~0xFull)); f.StoreVR(i.X.RT, f.ByteSwap(f.Load(ea, VEC128_TYPE))); return 0; } XEEMITTER(lvehx, 0x7C00004E, X)(PPCHIRBuilder& f, InstrData& i) { // Same as lvx. - Value* ea = f.And(CalculateEA_0(f, i.X.RA, i.X.RB), f.LoadConstant(~0xFull)); + Value* ea = + f.And(CalculateEA_0(f, i.X.RA, i.X.RB), f.LoadConstantUint64(~0xFull)); f.StoreVR(i.X.RT, f.ByteSwap(f.Load(ea, VEC128_TYPE))); return 0; } @@ -100,7 +102,7 @@ XEEMITTER(lvehx, 0x7C00004E, X)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvewx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { // Same as lvx. - Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstant(~0xFull)); + Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstantUint64(~0xFull)); f.StoreVR(vd, f.ByteSwap(f.Load(ea, VEC128_TYPE))); return 0; } @@ -114,7 +116,7 @@ XEEMITTER(lvewx128, VX128_1(4, 131), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvsl_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { Value* ea = CalculateEA_0(f, ra, rb); - Value* sh = f.Truncate(f.And(ea, f.LoadConstant((int64_t)0xF)), INT8_TYPE); + Value* sh = f.Truncate(f.And(ea, f.LoadConstantInt64(0xF)), INT8_TYPE); Value* v = f.LoadVectorShl(sh); f.StoreVR(vd, v); return 0; @@ -129,7 +131,7 @@ XEEMITTER(lvsl128, VX128_1(4, 3), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvsr_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { Value* ea = CalculateEA_0(f, ra, rb); - Value* sh = f.Truncate(f.And(ea, f.LoadConstant((int64_t)0xF)), INT8_TYPE); + Value* sh = f.Truncate(f.And(ea, f.LoadConstantInt64(0xF)), INT8_TYPE); Value* v = f.LoadVectorShr(sh); f.StoreVR(vd, v); return 0; @@ -143,7 +145,7 @@ XEEMITTER(lvsr128, VX128_1(4, 67), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { - Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstant(~0xFull)); + Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstantInt64(~0xFull)); f.StoreVR(vd, f.ByteSwap(f.Load(ea, VEC128_TYPE))); return 0; } @@ -162,7 +164,7 @@ XEEMITTER(lvxl128, VX128_1(4, 707), VX128_1)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(stvebx, 0x7C00010E, X)(PPCHIRBuilder& f, InstrData& i) { Value* ea = CalculateEA_0(f, i.X.RA, i.X.RB); - Value* el = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant(uint8_t(0xF))); + Value* el = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantUint8(0xF)); Value* v = f.Extract(f.LoadVR(i.X.RT), el, INT8_TYPE); f.Store(ea, v); return 0; @@ -170,9 +172,9 @@ XEEMITTER(stvebx, 0x7C00010E, X)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(stvehx, 0x7C00014E, X)(PPCHIRBuilder& f, InstrData& i) { Value* ea = CalculateEA_0(f, i.X.RA, i.X.RB); - ea = f.And(ea, f.LoadConstant(~0x1ull)); + ea = f.And(ea, f.LoadConstantUint64(~0x1ull)); Value* el = - f.Shr(f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant(uint8_t(0xF))), 1); + f.Shr(f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantUint8(0xF)), 1); Value* v = f.Extract(f.LoadVR(i.X.RT), el, INT16_TYPE); f.Store(ea, f.ByteSwap(v)); return 0; @@ -181,9 +183,9 @@ XEEMITTER(stvehx, 0x7C00014E, X)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_stvewx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { Value* ea = CalculateEA_0(f, ra, rb); - ea = f.And(ea, f.LoadConstant(~0x3ull)); + ea = f.And(ea, f.LoadConstantUint64(~0x3ull)); Value* el = - f.Shr(f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant(uint8_t(0xF))), 2); + f.Shr(f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantUint8(0xF)), 2); Value* v = f.Extract(f.LoadVR(vd), el, INT32_TYPE); f.Store(ea, f.ByteSwap(v)); return 0; @@ -197,7 +199,7 @@ XEEMITTER(stvewx128, VX128_1(4, 387), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_stvx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { - Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstant(~0xFull)); + Value* ea = f.And(CalculateEA_0(f, ra, rb), f.LoadConstantUint64(~0xFull)); f.Store(ea, f.ByteSwap(f.LoadVR(vd))); return 0; } @@ -219,12 +221,12 @@ XEEMITTER(stvxl128, VX128_1(4, 963), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvlx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { Value* ea = CalculateEA_0(f, ra, rb); - Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant((int8_t)0xF)); + Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantInt8(0xF)); // ea &= ~0xF - ea = f.And(ea, f.LoadConstant(~0xFull)); + ea = f.And(ea, f.LoadConstantUint64(~0xFull)); // v = (new << eb) Value* v = f.Permute(f.LoadVectorShl(eb), f.ByteSwap(f.Load(ea, VEC128_TYPE)), - f.LoadZero(VEC128_TYPE), INT8_TYPE); + f.LoadZeroVec128(), INT8_TYPE); f.StoreVR(vd, v); return 0; } @@ -244,11 +246,11 @@ XEEMITTER(lvlxl128, VX128_1(4, 1539), VX128_1)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_lvrx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, uint32_t rb) { Value* ea = CalculateEA_0(f, ra, rb); - Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant((int8_t)0xF)); + Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantInt8(0xF)); // ea &= ~0xF - ea = f.And(ea, f.LoadConstant(~0xFull)); + ea = f.And(ea, f.LoadConstantUint64(~0xFull)); // v = (new >> (16 - eb)) - Value* v = f.Permute(f.LoadVectorShl(eb), f.LoadZero(VEC128_TYPE), + Value* v = f.Permute(f.LoadVectorShl(eb), f.LoadZeroVec128(), f.ByteSwap(f.Load(ea, VEC128_TYPE)), INT8_TYPE); f.StoreVR(vd, v); return 0; @@ -271,16 +273,16 @@ int InstrEmit_stvlx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, // NOTE: if eb == 0 (so 16b aligned) this equals new_value // we could optimize this to prevent the other load/mask, in that case. Value* ea = CalculateEA_0(f, ra, rb); - Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant((int8_t)0xF)); + Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantInt8(0xF)); // ea &= ~0xF - ea = f.And(ea, f.LoadConstant(~0xFull)); + ea = f.And(ea, f.LoadConstantUint64(~0xFull)); // v = (old & ~mask) | ((new >> eb) & mask) - Value* new_value = f.Permute(f.LoadVectorShr(eb), f.LoadZero(VEC128_TYPE), + Value* new_value = f.Permute(f.LoadVectorShr(eb), f.LoadZeroVec128(), f.LoadVR(vd), INT8_TYPE); Value* old_value = f.ByteSwap(f.Load(ea, VEC128_TYPE)); // mask = FFFF... >> eb - Value* mask = f.Permute(f.LoadVectorShr(eb), f.LoadZero(VEC128_TYPE), - f.Not(f.LoadZero(VEC128_TYPE)), INT8_TYPE); + Value* mask = f.Permute(f.LoadVectorShr(eb), f.LoadZeroVec128(), + f.Not(f.LoadZeroVec128()), INT8_TYPE); Value* v = f.Or(f.And(old_value, f.Not(mask)), f.And(new_value, mask)); // ea &= ~0xF (handled above) f.Store(ea, f.ByteSwap(v)); @@ -305,16 +307,16 @@ int InstrEmit_stvrx_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t ra, // NOTE: if eb == 0 (so 16b aligned) this equals new_value // we could optimize this to prevent the other load/mask, in that case. Value* ea = CalculateEA_0(f, ra, rb); - Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant((int8_t)0xF)); + Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstantInt8(0xF)); // ea &= ~0xF - ea = f.And(ea, f.LoadConstant(~0xFull)); + ea = f.And(ea, f.LoadConstantUint64(~0xFull)); // v = (old & ~mask) | ((new << eb) & mask) Value* new_value = f.Permute(f.LoadVectorShr(eb), f.LoadVR(vd), - f.LoadZero(VEC128_TYPE), INT8_TYPE); + f.LoadZeroVec128(), INT8_TYPE); Value* old_value = f.ByteSwap(f.Load(ea, VEC128_TYPE)); // mask = ~FFFF... >> eb - Value* mask = f.Permute(f.LoadVectorShr(eb), f.Not(f.LoadZero(VEC128_TYPE)), - f.LoadZero(VEC128_TYPE), INT8_TYPE); + Value* mask = f.Permute(f.LoadVectorShr(eb), f.Not(f.LoadZeroVec128()), + f.LoadZeroVec128(), INT8_TYPE); Value* v = f.Or(f.And(old_value, f.Not(mask)), f.And(new_value, mask)); // ea &= ~0xF (handled above) f.Store(ea, f.ByteSwap(v)); @@ -511,7 +513,7 @@ int InstrEmit_vcfsx_(PPCHIRBuilder& f, uint32_t vd, uint32_t vb, // (VD) <- float(VB as signed) / 2^uimm float fuimm = static_cast(std::exp2(uimm)); Value* v = f.Div(f.VectorConvertI2F(f.LoadVR(vb)), - f.Splat(f.LoadConstant(fuimm), VEC128_TYPE)); + f.Splat(f.LoadConstantFloat32(fuimm), VEC128_TYPE)); f.StoreVR(vd, v); return 0; } @@ -528,7 +530,7 @@ int InstrEmit_vcfux_(PPCHIRBuilder& f, uint32_t vd, uint32_t vb, // (VD) <- float(VB as unsigned) / 2^uimm float fuimm = static_cast(std::exp2(uimm)); Value* v = f.Div(f.VectorConvertI2F(f.LoadVR(vb), ARITHMETIC_UNSIGNED), - f.Splat(f.LoadConstant(fuimm), VEC128_TYPE)); + f.Splat(f.LoadConstantFloat32(fuimm), VEC128_TYPE)); f.StoreVR(vd, v); return 0; } @@ -544,7 +546,8 @@ int InstrEmit_vctsxs_(PPCHIRBuilder& f, uint32_t vd, uint32_t vb, uint32_t uimm) { // (VD) <- int_sat(VB as signed * 2^uimm) float fuimm = static_cast(std::exp2(uimm)); - Value* v = f.Mul(f.LoadVR(vb), f.Splat(f.LoadConstant(fuimm), VEC128_TYPE)); + Value* v = + f.Mul(f.LoadVR(vb), f.Splat(f.LoadConstantFloat32(fuimm), VEC128_TYPE)); v = f.VectorConvertF2I(v, ARITHMETIC_SATURATE); f.StoreVR(vd, v); return 0; @@ -561,7 +564,8 @@ int InstrEmit_vctuxs_(PPCHIRBuilder& f, uint32_t vd, uint32_t vb, uint32_t uimm) { // (VD) <- int_sat(VB as unsigned * 2^uimm) float fuimm = static_cast(std::exp2(uimm)); - Value* v = f.Mul(f.LoadVR(vb), f.Splat(f.LoadConstant(fuimm), VEC128_TYPE)); + Value* v = + f.Mul(f.LoadVR(vb), f.Splat(f.LoadConstantFloat32(fuimm), VEC128_TYPE)); v = f.VectorConvertF2I(v, ARITHMETIC_UNSIGNED | ARITHMETIC_SATURATE); f.StoreVR(vd, v); return 0; @@ -581,10 +585,11 @@ int InstrEmit_vcmpbfp_(PPCHIRBuilder& f, InstrData& i, uint32_t vd, uint32_t va, Value* gt = f.VectorCompareSGT(va_value, vb_value, FLOAT32_TYPE); Value* lt = f.Not(f.VectorCompareSGE(va_value, f.Neg(vb_value), FLOAT32_TYPE)); - Value* v = f.Or(f.And(gt, f.LoadConstant(vec128i(0x80000000, 0x80000000, - 0x80000000, 0x80000000))), - f.And(lt, f.LoadConstant(vec128i(0x40000000, 0x40000000, - 0x40000000, 0x40000000)))); + Value* v = + f.Or(f.And(gt, f.LoadConstantVec128(vec128i(0x80000000, 0x80000000, + 0x80000000, 0x80000000))), + f.And(lt, f.LoadConstantVec128(vec128i(0x40000000, 0x40000000, + 0x40000000, 0x40000000)))); f.StoreVR(vd, v); if (rc) { // CR0:4 = 0; CR0:5 = VT == 0; CR0:6 = CR0:7 = 0; @@ -958,9 +963,10 @@ XEEMITTER(vmrghb, 0x1000000C, VX)(PPCHIRBuilder& f, InstrData& i) { // (VD.b[i]) = (VA.b[i]) // (VD.b[i+1]) = (VB.b[i+1]) // ... - Value* v = f.Permute(f.LoadConstant(vec128b(0, 16, 1, 17, 2, 18, 3, 19, 4, 20, - 5, 21, 6, 22, 7, 23)), - f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT8_TYPE); + Value* v = + f.Permute(f.LoadConstantVec128(vec128b(0, 16, 1, 17, 2, 18, 3, 19, 4, 20, + 5, 21, 6, 22, 7, 23)), + f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT8_TYPE); f.StoreVR(i.VX.VD, v); return 0; } @@ -969,7 +975,7 @@ XEEMITTER(vmrghh, 0x1000004C, VX)(PPCHIRBuilder& f, InstrData& i) { // (VD.w[i]) = (VA.w[i]) // (VD.w[i+1]) = (VB.w[i+1]) // ... - Value* v = f.Permute(f.LoadConstant(vec128s(0, 8, 1, 9, 2, 10, 3, 11)), + Value* v = f.Permute(f.LoadConstantVec128(vec128s(0, 8, 1, 9, 2, 10, 3, 11)), f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT16_TYPE); f.StoreVR(i.VX.VD, v); return 0; @@ -980,8 +986,9 @@ int InstrEmit_vmrghw_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb) { // (VD.y) = (VB.x) // (VD.z) = (VA.y) // (VD.w) = (VB.y) - Value* v = f.Permute(f.LoadConstant(PERMUTE_MASK(0, 0, 1, 0, 0, 1, 1, 1)), - f.LoadVR(va), f.LoadVR(vb), INT32_TYPE); + Value* v = + f.Permute(f.LoadConstantUint32(PERMUTE_MASK(0, 0, 1, 0, 0, 1, 1, 1)), + f.LoadVR(va), f.LoadVR(vb), INT32_TYPE); f.StoreVR(vd, v); return 0; } @@ -996,9 +1003,10 @@ XEEMITTER(vmrglb, 0x1000010C, VX)(PPCHIRBuilder& f, InstrData& i) { // (VD.b[i]) = (VA.b[i]) // (VD.b[i+1]) = (VB.b[i+1]) // ... - Value* v = f.Permute(f.LoadConstant(vec128b(8, 24, 9, 25, 10, 26, 11, 27, 12, - 28, 13, 29, 14, 30, 15, 31)), - f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT8_TYPE); + Value* v = + f.Permute(f.LoadConstantVec128(vec128b(8, 24, 9, 25, 10, 26, 11, 27, 12, + 28, 13, 29, 14, 30, 15, 31)), + f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT8_TYPE); f.StoreVR(i.VX.VD, v); return 0; } @@ -1007,8 +1015,9 @@ XEEMITTER(vmrglh, 0x1000014C, VX)(PPCHIRBuilder& f, InstrData& i) { // (VD.w[i]) = (VA.w[i]) // (VD.w[i+1]) = (VB.w[i+1]) // ... - Value* v = f.Permute(f.LoadConstant(vec128s(4, 12, 5, 13, 6, 14, 7, 15)), - f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT16_TYPE); + Value* v = + f.Permute(f.LoadConstantVec128(vec128s(4, 12, 5, 13, 6, 14, 7, 15)), + f.LoadVR(i.VX.VA), f.LoadVR(i.VX.VB), INT16_TYPE); f.StoreVR(i.VX.VD, v); return 0; } @@ -1018,8 +1027,9 @@ int InstrEmit_vmrglw_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb) { // (VD.y) = (VB.z) // (VD.z) = (VA.w) // (VD.w) = (VB.w) - Value* v = f.Permute(f.LoadConstant(PERMUTE_MASK(0, 2, 1, 2, 0, 3, 1, 3)), - f.LoadVR(va), f.LoadVR(vb), INT32_TYPE); + Value* v = + f.Permute(f.LoadConstantUint32(PERMUTE_MASK(0, 2, 1, 2, 0, 3, 1, 3)), + f.LoadVR(va), f.LoadVR(vb), INT32_TYPE); f.StoreVR(vd, v); return 0; } @@ -1205,7 +1215,7 @@ XEEMITTER(vpermwi128, VX128_P(6, 528), VX128_P)(PPCHIRBuilder& f, int InstrEmit_vrefp_(PPCHIRBuilder& f, uint32_t vd, uint32_t vb) { // (VD) <- 1/(VB) vec128_t one = vec128f(1.0f); - Value* v = f.Div(f.LoadConstant(one), f.LoadVR(vb)); + Value* v = f.Div(f.LoadConstantVec128(one), f.LoadVR(vb)); f.StoreVR(vd, v); return 0; } @@ -1338,7 +1348,8 @@ XEEMITTER(vrlimi128, VX128_4(6, 1808), VX128_4)(PPCHIRBuilder& f, v = f.LoadVR(vb); } if (blend_mask != PERMUTE_IDENTITY) { - v = f.Permute(f.LoadConstant(blend_mask), v, f.LoadVR(vd), INT32_TYPE); + v = f.Permute(f.LoadConstantUint32(blend_mask), v, f.LoadVR(vd), + INT32_TYPE); } f.StoreVR(vd, v); return 0; @@ -1379,7 +1390,7 @@ XEEMITTER(vsel128, VX128(5, 848), VX128)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(vsl, 0x100001C4, VX)(PPCHIRBuilder& f, InstrData& i) { Value* v = f.Shl(f.LoadVR(i.VX.VA), f.And(f.Extract(f.LoadVR(i.VX.VB), 15, INT8_TYPE), - f.LoadConstant(int8_t(0x7F)))); + f.LoadConstantInt8(0x7F))); f.StoreVR(i.VX.VD, v); return 0; } @@ -1443,7 +1454,7 @@ int InstrEmit_vsldoi_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb, // vsldoi128 vr63,vr63,vr63,4 // (ABCD ABCD) << 4b = (BCDA) // (VA << SH) OR (VB >> (16 - SH)) - Value* control = f.LoadConstant(__vsldoi_table[sh]); + Value* control = f.LoadConstantVec128(__vsldoi_table[sh]); Value* v = f.Permute(control, f.LoadVR(va), f.LoadVR(vb), INT8_TYPE); f.StoreVR(vd, v); return 0; @@ -1459,10 +1470,10 @@ XEEMITTER(vsldoi128, VX128_5(4, 16), VX128_5)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_vslo_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb) { // (VD) <- (VA) << (VB.b[F] & 0x78) (by octet) // TODO(benvanik): flag for shift-by-octet as optimization. - Value* sh = f.And(f.Extract(f.LoadVR(vb), 15, INT8_TYPE), - f.LoadConstant(int8_t(0x78))); - Value* v = f.Permute(f.LoadVectorShl(sh), f.LoadVR(va), - f.LoadZero(VEC128_TYPE), INT8_TYPE); + Value* sh = + f.And(f.Extract(f.LoadVR(vb), 15, INT8_TYPE), f.LoadConstantInt8(0x78)); + Value* v = f.Permute(f.LoadVectorShl(sh), f.LoadVR(va), f.LoadZeroVec128(), + INT8_TYPE); f.StoreVR(vd, v); return 0; } @@ -1513,10 +1524,10 @@ XEEMITTER(vspltisb, 0x1000030C, VX)(PPCHIRBuilder& f, InstrData& i) { if (i.VX.VA) { // Sign extend from 5bits -> 8 and load. int8_t simm = (i.VX.VA & 0x10) ? (i.VX.VA | 0xF0) : i.VX.VA; - v = f.Splat(f.LoadConstant(simm), VEC128_TYPE); + v = f.Splat(f.LoadConstantInt8(simm), VEC128_TYPE); } else { // Zero out the register. - v = f.LoadZero(VEC128_TYPE); + v = f.LoadZeroVec128(); } f.StoreVR(i.VX.VD, v); return 0; @@ -1528,10 +1539,10 @@ XEEMITTER(vspltish, 0x1000034C, VX)(PPCHIRBuilder& f, InstrData& i) { if (i.VX.VA) { // Sign extend from 5bits -> 16 and load. int16_t simm = (i.VX.VA & 0x10) ? (i.VX.VA | 0xFFF0) : i.VX.VA; - v = f.Splat(f.LoadConstant(simm), VEC128_TYPE); + v = f.Splat(f.LoadConstantInt16(simm), VEC128_TYPE); } else { // Zero out the register. - v = f.LoadZero(VEC128_TYPE); + v = f.LoadZeroVec128(); } f.StoreVR(i.VX.VD, v); return 0; @@ -1543,10 +1554,10 @@ int InstrEmit_vspltisw_(PPCHIRBuilder& f, uint32_t vd, uint32_t uimm) { if (uimm) { // Sign extend from 5bits -> 32 and load. int32_t simm = (uimm & 0x10) ? (uimm | 0xFFFFFFF0) : uimm; - v = f.Splat(f.LoadConstant(simm), VEC128_TYPE); + v = f.Splat(f.LoadConstantInt32(simm), VEC128_TYPE); } else { // Zero out the register. - v = f.LoadZero(VEC128_TYPE); + v = f.LoadZeroVec128(); } f.StoreVR(vd, v); return 0; @@ -1562,7 +1573,7 @@ XEEMITTER(vspltisw128, VX128_3(6, 1904), VX128_3)(PPCHIRBuilder& f, XEEMITTER(vsr, 0x100002C4, VX)(PPCHIRBuilder& f, InstrData& i) { Value* v = f.Shr(f.LoadVR(i.VX.VA), f.And(f.Extract(f.LoadVR(i.VX.VB), 15, INT8_TYPE), - f.LoadConstant(int8_t(0x7F)))); + f.LoadConstantInt8(0x7F))); f.StoreVR(i.VX.VD, v); return 0; } @@ -1611,10 +1622,10 @@ XEEMITTER(vsrh, 0x10000244, VX)(PPCHIRBuilder& f, InstrData& i) { int InstrEmit_vsro_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb) { // (VD) <- (VA) >> (VB.b[F] & 0x78) (by octet) // TODO(benvanik): flag for shift-by-octet as optimization. - Value* sh = f.And(f.Extract(f.LoadVR(vb), 15, INT8_TYPE), - f.LoadConstant(uint8_t(0x78))); - Value* v = f.Permute(f.LoadVectorShr(sh), f.LoadVR(va), - f.LoadZero(VEC128_TYPE), INT8_TYPE); + Value* sh = + f.And(f.Extract(f.LoadVR(vb), 15, INT8_TYPE), f.LoadConstantInt8(0x78)); + Value* v = f.Permute(f.LoadVectorShr(sh), f.LoadVR(va), f.LoadZeroVec128(), + INT8_TYPE); f.StoreVR(vd, v); return 0; } @@ -2106,7 +2117,7 @@ XEEMITTER(vpkd3d128, VX128_4(6, 1552), VX128_4)(PPCHIRBuilder& f, assert_unhandled_case(pack); return 1; } - v = f.Permute(f.LoadConstant(control), f.LoadVR(vd), v, INT32_TYPE); + v = f.Permute(f.LoadConstantUint32(control), f.LoadVR(vd), v, INT32_TYPE); f.StoreVR(vd, v); return 0; } @@ -2150,7 +2161,7 @@ int InstrEmit_vxor_(PPCHIRBuilder& f, uint32_t vd, uint32_t va, uint32_t vb) { Value* v; if (va == vb) { // Fast clear. - v = f.LoadZero(VEC128_TYPE); + v = f.LoadZeroVec128(); } else { v = f.Xor(f.LoadVR(va), f.LoadVR(vb)); } diff --git a/src/xenia/cpu/frontend/ppc_emit_alu.cc b/src/xenia/cpu/frontend/ppc_emit_alu.cc index 320003315..88cc89db8 100644 --- a/src/xenia/cpu/frontend/ppc_emit_alu.cc +++ b/src/xenia/cpu/frontend/ppc_emit_alu.cc @@ -102,7 +102,7 @@ XEEMITTER(addi, 0x38000000, D)(PPCHIRBuilder& f, InstrData& i) { // RT <- EXTS(SI) // else // RT <- (RA) + EXTS(SI) - Value* si = f.LoadConstant(XEEXTS16(i.D.DS)); + Value* si = f.LoadConstantInt64(XEEXTS16(i.D.DS)); Value* v = si; if (i.D.RA) { v = f.Add(f.LoadGPR(i.D.RA), si); @@ -115,9 +115,9 @@ XEEMITTER(addic, 0x30000000, D)(PPCHIRBuilder& f, InstrData& i) { // RT <- (RA) + EXTS(SI) // CA <- carry bit Value* ra = f.LoadGPR(i.D.RA); - Value* v = f.Add(ra, f.LoadConstant(XEEXTS16(i.D.DS))); + Value* v = f.Add(ra, f.LoadConstantInt64(XEEXTS16(i.D.DS))); f.StoreGPR(i.D.RT, v); - f.StoreCA(AddDidCarry(f, ra, f.LoadConstant(XEEXTS16(i.D.DS)))); + f.StoreCA(AddDidCarry(f, ra, f.LoadConstantInt64(XEEXTS16(i.D.DS)))); return 0; } @@ -125,9 +125,9 @@ XEEMITTER(addicx, 0x34000000, D)(PPCHIRBuilder& f, InstrData& i) { // RT <- (RA) + EXTS(SI) // CA <- carry bit Value* ra = f.LoadGPR(i.D.RA); - Value* v = f.Add(f.LoadGPR(i.D.RA), f.LoadConstant(XEEXTS16(i.D.DS))); + Value* v = f.Add(f.LoadGPR(i.D.RA), f.LoadConstantInt64(XEEXTS16(i.D.DS))); f.StoreGPR(i.D.RT, v); - f.StoreCA(AddDidCarry(f, ra, f.LoadConstant(XEEXTS16(i.D.DS)))); + f.StoreCA(AddDidCarry(f, ra, f.LoadConstantInt64(XEEXTS16(i.D.DS)))); f.UpdateCR(0, v); return 0; } @@ -137,7 +137,7 @@ XEEMITTER(addis, 0x3C000000, D)(PPCHIRBuilder& f, InstrData& i) { // RT <- EXTS(SI) || i16.0 // else // RT <- (RA) + EXTS(SI) || i16.0 - Value* si = f.LoadConstant(XEEXTS16(i.D.DS) << 16); + Value* si = f.LoadConstantInt64(XEEXTS16(i.D.DS) << 16); Value* v = si; if (i.D.RA) { v = f.Add(f.LoadGPR(i.D.RA), si); @@ -150,7 +150,7 @@ XEEMITTER(addmex, 0x7C0001D4, XO)(PPCHIRBuilder& f, InstrData& i) { // RT <- (RA) + CA - 1 // CA <- carry bit Value* ra = f.LoadGPR(i.XO.RA); - Value* v = f.AddWithCarry(ra, f.LoadConstant((int64_t)-1), f.LoadCA()); + Value* v = f.AddWithCarry(ra, f.LoadConstantInt64(-1), f.LoadCA()); f.StoreGPR(i.XO.RT, v); if (i.XO.OE) { // With XER[SO] update too. @@ -158,8 +158,7 @@ XEEMITTER(addmex, 0x7C0001D4, XO)(PPCHIRBuilder& f, InstrData& i) { assert_always(); } else { // Just CA update. - f.StoreCA( - AddWithCarryDidCarry(f, ra, f.LoadConstant((int64_t)-1), f.LoadCA())); + f.StoreCA(AddWithCarryDidCarry(f, ra, f.LoadConstantInt64(-1), f.LoadCA())); } if (i.XO.Rc) { f.UpdateCR(0, v); @@ -171,7 +170,7 @@ XEEMITTER(addzex, 0x7C000194, XO)(PPCHIRBuilder& f, InstrData& i) { // RT <- (RA) + CA // CA <- carry bit Value* ra = f.LoadGPR(i.XO.RA); - Value* v = f.AddWithCarry(ra, f.LoadZero(INT64_TYPE), f.LoadCA()); + Value* v = f.AddWithCarry(ra, f.LoadZeroInt64(), f.LoadCA()); f.StoreGPR(i.XO.RT, v); if (i.XO.OE) { // With XER[SO] update too. @@ -179,7 +178,7 @@ XEEMITTER(addzex, 0x7C000194, XO)(PPCHIRBuilder& f, InstrData& i) { assert_always(); } else { // Just CA update. - f.StoreCA(AddWithCarryDidCarry(f, ra, f.LoadZero(INT64_TYPE), f.LoadCA())); + f.StoreCA(AddWithCarryDidCarry(f, ra, f.LoadZeroInt64(), f.LoadCA())); } if (i.XO.Rc) { f.UpdateCR(0, v); @@ -380,7 +379,7 @@ XEEMITTER(mulldx, 0x7C0001D2, XO)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(mulli, 0x1C000000, D)(PPCHIRBuilder& f, InstrData& i) { // prod[0:127] <- (RA) × EXTS(SI) // RT <- prod[64:127] - Value* v = f.Mul(f.LoadGPR(i.D.RA), f.LoadConstant(XEEXTS16(i.D.DS))); + Value* v = f.Mul(f.LoadGPR(i.D.RA), f.LoadConstantInt64(XEEXTS16(i.D.DS))); f.StoreGPR(i.D.RT, v); return 0; } @@ -470,9 +469,9 @@ XEEMITTER(subfcx, 0x7C000010, XO)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(subficx, 0x20000000, D)(PPCHIRBuilder& f, InstrData& i) { // RT <- ¬(RA) + EXTS(SI) + 1 Value* ra = f.LoadGPR(i.D.RA); - Value* v = f.Sub(f.LoadConstant(XEEXTS16(i.D.DS)), ra); + Value* v = f.Sub(f.LoadConstantInt64(XEEXTS16(i.D.DS)), ra); f.StoreGPR(i.D.RT, v); - f.StoreCA(SubDidCarry(f, f.LoadConstant(XEEXTS16(i.D.DS)), ra)); + f.StoreCA(SubDidCarry(f, f.LoadConstantInt64(XEEXTS16(i.D.DS)), ra)); return 0; } @@ -497,14 +496,14 @@ XEEMITTER(subfex, 0x7C000110, XO)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(subfmex, 0x7C0001D0, XO)(PPCHIRBuilder& f, InstrData& i) { // RT <- ¬(RA) + CA - 1 Value* not_ra = f.Not(f.LoadGPR(i.XO.RA)); - Value* v = f.AddWithCarry(not_ra, f.LoadConstant((int64_t)-1), f.LoadCA()); + Value* v = f.AddWithCarry(not_ra, f.LoadConstantInt64(-1), f.LoadCA()); f.StoreGPR(i.XO.RT, v); if (i.XO.OE) { assert_always(); // e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1)); } else { - f.StoreCA(AddWithCarryDidCarry(f, not_ra, f.LoadConstant((int64_t)-1), - f.LoadCA())); + f.StoreCA( + AddWithCarryDidCarry(f, not_ra, f.LoadConstantInt64(-1), f.LoadCA())); } if (i.XO.Rc) { f.UpdateCR(0, v); @@ -515,14 +514,13 @@ XEEMITTER(subfmex, 0x7C0001D0, XO)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(subfzex, 0x7C000190, XO)(PPCHIRBuilder& f, InstrData& i) { // RT <- ¬(RA) + CA Value* not_ra = f.Not(f.LoadGPR(i.XO.RA)); - Value* v = f.AddWithCarry(not_ra, f.LoadZero(INT64_TYPE), f.LoadCA()); + Value* v = f.AddWithCarry(not_ra, f.LoadZeroInt64(), f.LoadCA()); f.StoreGPR(i.XO.RT, v); if (i.XO.OE) { assert_always(); // e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1)); } else { - f.StoreCA( - AddWithCarryDidCarry(f, not_ra, f.LoadZero(INT64_TYPE), f.LoadCA())); + f.StoreCA(AddWithCarryDidCarry(f, not_ra, f.LoadZeroInt64(), f.LoadCA())); } if (i.XO.Rc) { f.UpdateCR(0, v); @@ -579,10 +577,10 @@ XEEMITTER(cmpi, 0x2C000000, D)(PPCHIRBuilder& f, InstrData& i) { Value* rhs; if (L) { lhs = f.LoadGPR(i.D.RA); - rhs = f.LoadConstant(XEEXTS16(i.D.DS)); + rhs = f.LoadConstantInt64(XEEXTS16(i.D.DS)); } else { lhs = f.Truncate(f.LoadGPR(i.D.RA), INT32_TYPE); - rhs = f.LoadConstant((int32_t)XEEXTS16(i.D.DS)); + rhs = f.LoadConstantInt32(int32_t(XEEXTS16(i.D.DS))); } f.UpdateCR(BF, lhs, rhs); return 0; @@ -635,10 +633,10 @@ XEEMITTER(cmpli, 0x28000000, D)(PPCHIRBuilder& f, InstrData& i) { Value* rhs; if (L) { lhs = f.LoadGPR(i.D.RA); - rhs = f.LoadConstant((uint64_t)i.D.DS); + rhs = f.LoadConstantUint64(i.D.DS); } else { lhs = f.Truncate(f.LoadGPR(i.D.RA), INT32_TYPE); - rhs = f.LoadConstant((uint32_t)i.D.DS); + rhs = f.LoadConstantUint32(i.D.DS); } f.UpdateCR(BF, lhs, rhs, false); return 0; @@ -668,7 +666,7 @@ XEEMITTER(andcx, 0x7C000078, X)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(andix, 0x70000000, D)(PPCHIRBuilder& f, InstrData& i) { // RA <- (RS) & (i48.0 || UI) - Value* ra = f.And(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t)i.D.DS)); + Value* ra = f.And(f.LoadGPR(i.D.RT), f.LoadConstantUint64(i.D.DS)); f.StoreGPR(i.D.RA, ra); f.UpdateCR(0, ra); return 0; @@ -677,7 +675,7 @@ XEEMITTER(andix, 0x70000000, D)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(andisx, 0x74000000, D)(PPCHIRBuilder& f, InstrData& i) { // RA <- (RS) & (i32.0 || UI || i16.0) Value* ra = - f.And(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t(i.D.DS) << 16))); + f.And(f.LoadGPR(i.D.RT), f.LoadConstantUint64(uint64_t(i.D.DS) << 16)); f.StoreGPR(i.D.RA, ra); f.UpdateCR(0, ra); return 0; @@ -819,14 +817,15 @@ XEEMITTER(ori, 0x60000000, D)(PPCHIRBuilder& f, InstrData& i) { f.Nop(); return 0; } - Value* ra = f.Or(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t)i.D.DS)); + Value* ra = f.Or(f.LoadGPR(i.D.RT), f.LoadConstantUint64(i.D.DS)); f.StoreGPR(i.D.RA, ra); return 0; } XEEMITTER(oris, 0x64000000, D)(PPCHIRBuilder& f, InstrData& i) { // RA <- (RS) | (i32.0 || UI || i16.0) - Value* ra = f.Or(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t(i.D.DS) << 16))); + Value* ra = + f.Or(f.LoadGPR(i.D.RT), f.LoadConstantUint64(uint64_t(i.D.DS) << 16)); f.StoreGPR(i.D.RA, ra); return 0; } @@ -843,7 +842,7 @@ XEEMITTER(xorx, 0x7C000278, X)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(xori, 0x68000000, D)(PPCHIRBuilder& f, InstrData& i) { // RA <- (RS) XOR (i48.0 || UI) - Value* ra = f.Xor(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t)i.D.DS)); + Value* ra = f.Xor(f.LoadGPR(i.D.RT), f.LoadConstantUint64(i.D.DS)); f.StoreGPR(i.D.RA, ra); return 0; } @@ -851,7 +850,7 @@ XEEMITTER(xori, 0x68000000, D)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(xoris, 0x6C000000, D)(PPCHIRBuilder& f, InstrData& i) { // RA <- (RS) XOR (i32.0 || UI || i16.0) Value* ra = - f.Xor(f.LoadGPR(i.D.RT), f.LoadConstant((uint64_t(i.D.DS) << 16))); + f.Xor(f.LoadGPR(i.D.RT), f.LoadConstantUint64(uint64_t(i.D.DS) << 16)); f.StoreGPR(i.D.RA, ra); return 0; } @@ -875,10 +874,10 @@ XEEMITTER(rld, 0x78000000, MDS)(PPCHIRBuilder& f, InstrData& i) { v = f.Shr(v, int8_t(mb)); } else { if (sh) { - v = f.RotateLeft(v, f.LoadConstant((int8_t)sh)); + v = f.RotateLeft(v, f.LoadConstantInt8(sh)); } if (m != 0xFFFFFFFFFFFFFFFF) { - v = f.And(v, f.LoadConstant(m)); + v = f.And(v, f.LoadConstantUint64(m)); } } f.StoreGPR(i.MD.RA, v); @@ -902,10 +901,10 @@ XEEMITTER(rld, 0x78000000, MDS)(PPCHIRBuilder& f, InstrData& i) { v = f.Shl(v, int8_t(sh)); } else { if (sh) { - v = f.RotateLeft(v, f.LoadConstant((int8_t)sh)); + v = f.RotateLeft(v, f.LoadConstantInt8(sh)); } if (m != 0xFFFFFFFFFFFFFFFF) { - v = f.And(v, f.LoadConstant(m)); + v = f.And(v, f.LoadConstantUint64(m)); } } f.StoreGPR(i.MD.RA, v); @@ -937,11 +936,12 @@ XEEMITTER(rld, 0x78000000, MDS)(PPCHIRBuilder& f, InstrData& i) { uint64_t m = XEMASK(mb, ~sh); Value* v = f.LoadGPR(i.MD.RT); if (sh) { - v = f.RotateLeft(v, f.LoadConstant((int8_t)sh)); + v = f.RotateLeft(v, f.LoadConstantInt8(sh)); } if (m != 0xFFFFFFFFFFFFFFFF) { Value* ra = f.LoadGPR(i.MD.RA); - v = f.Or(f.And(v, f.LoadConstant(m)), f.And(ra, f.LoadConstant(~m))); + v = f.Or(f.And(v, f.LoadConstantUint64(m)), + f.And(ra, f.LoadConstantUint64(~m))); } f.StoreGPR(i.MD.RA, v); if (i.MD.Rc) { @@ -961,16 +961,16 @@ XEEMITTER(rlwimix, 0x50000000, M)(PPCHIRBuilder& f, InstrData& i) { // RA <- r&m | (RA)&¬m Value* v = f.Truncate(f.LoadGPR(i.M.RT), INT32_TYPE); if (i.M.SH) { - v = f.RotateLeft(v, f.LoadConstant(i.M.SH)); + v = f.RotateLeft(v, f.LoadConstantUint32(i.M.SH)); } // Compiler sometimes masks with 0xFFFFFFFF (identity) - avoid the work here // as our truncation/zero-extend does it for us. uint32_t m = (uint32_t)XEMASK(i.M.MB + 32, i.M.ME + 32); if (!(i.M.MB == 0 && i.M.ME == 31)) { - v = f.And(v, f.LoadConstant(m)); + v = f.And(v, f.LoadConstantUint32(m)); } v = f.ZeroExtend(v, INT64_TYPE); - v = f.Or(v, f.And(f.LoadGPR(i.M.RA), f.LoadConstant((~(uint64_t)m)))); + v = f.Or(v, f.And(f.LoadGPR(i.M.RA), f.LoadConstantUint64(~(uint64_t)m))); f.StoreGPR(i.M.RA, v); if (i.M.Rc) { f.UpdateCR(0, v); @@ -990,12 +990,13 @@ XEEMITTER(rlwinmx, 0x54000000, M)(PPCHIRBuilder& f, InstrData& i) { // Which seems to just select some bits and set cr0 for use with a branch. // We can detect this and do less work. if (i.M.SH) { - v = f.RotateLeft(v, f.LoadConstant(i.M.SH)); + v = f.RotateLeft(v, f.LoadConstantUint32(i.M.SH)); } // Compiler sometimes masks with 0xFFFFFFFF (identity) - avoid the work here // as our truncation/zero-extend does it for us. if (!(i.M.MB == 0 && i.M.ME == 31)) { - v = f.And(v, f.LoadConstant((uint32_t)XEMASK(i.M.MB + 32, i.M.ME + 32))); + v = f.And(v, + f.LoadConstantUint32(uint32_t(XEMASK(i.M.MB + 32, i.M.ME + 32)))); } v = f.ZeroExtend(v, INT64_TYPE); f.StoreGPR(i.M.RA, v); @@ -1011,13 +1012,14 @@ XEEMITTER(rlwnmx, 0x5C000000, M)(PPCHIRBuilder& f, InstrData& i) { // m <- MASK(MB+32, ME+32) // RA <- r & m Value* v = f.Truncate(f.LoadGPR(i.M.RT), INT32_TYPE); - Value* sh = - f.And(f.Truncate(f.LoadGPR(i.M.SH), INT32_TYPE), f.LoadConstant(0x1F)); + Value* sh = f.And(f.Truncate(f.LoadGPR(i.M.SH), INT32_TYPE), + f.LoadConstantUint32(0x1F)); v = f.RotateLeft(v, sh); // Compiler sometimes masks with 0xFFFFFFFF (identity) - avoid the work here // as our truncation/zero-extend does it for us. if (!(i.M.MB == 0 && i.M.ME == 31)) { - v = f.And(v, f.LoadConstant((uint32_t)XEMASK(i.M.MB + 32, i.M.ME + 32))); + v = f.And(v, + f.LoadConstantUint32(uint32_t(XEMASK(i.M.MB + 32, i.M.ME + 32)))); } v = f.ZeroExtend(v, INT64_TYPE); f.StoreGPR(i.M.RA, v); @@ -1037,9 +1039,9 @@ XEEMITTER(sldx, 0x7C000036, X)(PPCHIRBuilder& f, InstrData& i) { // else // m <- i64.0 // RA <- r & m - Value* sh = f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), - f.LoadConstant(int8_t(0x7F))); - Value* v = f.Select(f.IsTrue(f.Shr(sh, 6)), f.LoadConstant(int64_t(0)), + Value* sh = + f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), f.LoadConstantInt8(0x7F)); + Value* v = f.Select(f.IsTrue(f.Shr(sh, 6)), f.LoadZeroInt64(), f.Shl(f.LoadGPR(i.X.RT), sh)); f.StoreGPR(i.X.RA, v); if (i.X.Rc) { @@ -1056,9 +1058,9 @@ XEEMITTER(slwx, 0x7C000030, X)(PPCHIRBuilder& f, InstrData& i) { // else // m <- i64.0 // RA <- r & m - Value* sh = f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), - f.LoadConstant(int8_t(0x3F))); - Value* v = f.Select(f.IsTrue(f.Shr(sh, 5)), f.LoadConstant(int32_t(0)), + Value* sh = + f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), f.LoadConstantInt8(0x3F)); + Value* v = f.Select(f.IsTrue(f.Shr(sh, 5)), f.LoadZeroInt32(), f.Shl(f.Truncate(f.LoadGPR(i.X.RT), INT32_TYPE), sh)); v = f.ZeroExtend(v, INT64_TYPE); f.StoreGPR(i.X.RA, v); @@ -1078,8 +1080,8 @@ XEEMITTER(srdx, 0x7C000436, X)(PPCHIRBuilder& f, InstrData& i) { // RA <- r & m // TODO(benvanik): if >3F, zero out the result. Value* sh = f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE); - Value* v = f.Select(f.IsTrue(f.And(sh, f.LoadConstant(int8_t(0x40)))), - f.LoadConstant(int64_t(0)), f.Shr(f.LoadGPR(i.X.RT), sh)); + Value* v = f.Select(f.IsTrue(f.And(sh, f.LoadConstantInt8(0x40))), + f.LoadZeroInt64(), f.Shr(f.LoadGPR(i.X.RT), sh)); f.StoreGPR(i.X.RA, v); if (i.X.Rc) { f.UpdateCR(0, v); @@ -1097,9 +1099,9 @@ XEEMITTER(srwx, 0x7C000430, X)(PPCHIRBuilder& f, InstrData& i) { // RA <- r & m // TODO(benvanik): if >1F, zero out the result. Value* sh = f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE); - Value* v = f.Select(f.IsTrue(f.And(sh, f.LoadConstant(int8_t(0x20)))), - f.LoadConstant(int32_t(0)), - f.Shr(f.Truncate(f.LoadGPR(i.X.RT), INT32_TYPE), sh)); + Value* v = + f.Select(f.IsTrue(f.And(sh, f.LoadConstantInt8(0x20))), f.LoadZeroInt32(), + f.Shr(f.Truncate(f.LoadGPR(i.X.RT), INT32_TYPE), sh)); v = f.ZeroExtend(v, INT64_TYPE); f.StoreGPR(i.X.RA, v); if (i.X.Rc) { @@ -1119,9 +1121,9 @@ XEEMITTER(sradx, 0x7C000634, X)(PPCHIRBuilder& f, InstrData& i) { // if n == 0: rA <- rS, XER[CA] = 0 // if n >= 64: rA <- 64 sign bits of rS, XER[CA] = sign bit of rS Value* rt = f.LoadGPR(i.X.RT); - Value* sh = f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), - f.LoadConstant(int8_t(0x7F))); - Value* clamp_sh = f.Min(sh, f.LoadConstant(int8_t(0x3F))); + Value* sh = + f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), f.LoadConstantInt8(0x7F)); + Value* clamp_sh = f.Min(sh, f.LoadConstantInt8(0x3F)); Value* v = f.Sha(rt, clamp_sh); // CA is set if any bits are shifted out of the right and if the result @@ -1154,12 +1156,12 @@ XEEMITTER(sradix, 0x7C000674, XS)(PPCHIRBuilder& f, InstrData& i) { if (sh) { uint64_t mask = XEMASK(64 - sh, 63); Value* ca = f.And(f.Truncate(f.Shr(v, 63), INT8_TYPE), - f.IsTrue(f.And(v, f.LoadConstant(mask)))); + f.IsTrue(f.And(v, f.LoadConstantUint64(mask)))); f.StoreCA(ca); v = f.Sha(v, sh); } else { - f.StoreCA(f.LoadZero(INT8_TYPE)); + f.StoreCA(f.LoadZeroInt8()); } f.StoreGPR(i.XS.RA, v); @@ -1179,9 +1181,9 @@ XEEMITTER(srawx, 0x7C000630, X)(PPCHIRBuilder& f, InstrData& i) { // if n == 0: rA <- sign_extend(rS), XER[CA] = 0 // if n >= 32: rA <- 64 sign bits of rS, XER[CA] = sign bit of lo_32(rS) Value* rt = f.Truncate(f.LoadGPR(i.X.RT), INT32_TYPE); - Value* sh = f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), - f.LoadConstant(int8_t(0x3F))); - Value* clamp_sh = f.Min(sh, f.LoadConstant(int8_t(0x1F))); + Value* sh = + f.And(f.Truncate(f.LoadGPR(i.X.RB), INT8_TYPE), f.LoadConstantInt8(0x3F)); + Value* clamp_sh = f.Min(sh, f.LoadConstantInt8(0x1F)); Value* v = f.Sha(rt, f.Min(sh, clamp_sh)); // CA is set if any bits are shifted out of the right and if the result @@ -1212,13 +1214,13 @@ XEEMITTER(srawix, 0x7C000670, X)(PPCHIRBuilder& f, InstrData& i) { if (!i.X.RB) { // No shift, just a fancy sign extend and CA clearer. v = f.SignExtend(v, INT64_TYPE); - ca = f.LoadZero(INT8_TYPE); + ca = f.LoadZeroInt8(); } else { // CA is set if any bits are shifted out of the right and if the result // is negative. uint32_t mask = (uint32_t)XEMASK(64 - i.X.RB, 63); ca = f.And(f.Truncate(f.Shr(v, 31), INT8_TYPE), - f.IsTrue(f.And(v, f.LoadConstant(mask)))); + f.IsTrue(f.And(v, f.LoadConstantUint32(mask)))); v = f.Sha(v, (int8_t)i.X.RB), v = f.SignExtend(v, INT64_TYPE); } diff --git a/src/xenia/cpu/frontend/ppc_emit_control.cc b/src/xenia/cpu/frontend/ppc_emit_control.cc index 597acabd5..633844987 100644 --- a/src/xenia/cpu/frontend/ppc_emit_control.cc +++ b/src/xenia/cpu/frontend/ppc_emit_control.cc @@ -33,7 +33,7 @@ int InstrEmit_branch(PPCHIRBuilder& f, const char* src, uint64_t cia, // Note that we do the update before we branch/call as we need it to // be correct for returns. if (lk) { - Value* return_address = f.LoadConstant(cia + 4); + Value* return_address = f.LoadConstantUint64(cia + 4); f.SetReturnAddress(return_address); f.StoreLR(return_address); } @@ -156,7 +156,8 @@ XEEMITTER(bx, 0x48000000, I)(PPCHIRBuilder& f, InstrData& i) { nia = (uint32_t)(i.address + XEEXTS26(i.I.LI << 2)); } - return InstrEmit_branch(f, "bx", i.address, f.LoadConstant(nia), i.I.LK); + return InstrEmit_branch(f, "bx", i.address, f.LoadConstantUint32(nia), + i.I.LK); } XEEMITTER(bcx, 0x40000000, B)(PPCHIRBuilder& f, InstrData& i) { @@ -182,7 +183,7 @@ XEEMITTER(bcx, 0x40000000, B)(PPCHIRBuilder& f, InstrData& i) { } else { // Decrement counter. Value* ctr = f.LoadCTR(); - ctr = f.Sub(ctr, f.LoadConstant((int64_t)1)); + ctr = f.Sub(ctr, f.LoadConstantUint64(1)); f.StoreCTR(ctr); // Ctr check. ctr = f.Truncate(ctr, INT32_TYPE); @@ -232,8 +233,8 @@ XEEMITTER(bcx, 0x40000000, B)(PPCHIRBuilder& f, InstrData& i) { } else { nia = (uint32_t)(i.address + XEEXTS16(i.B.BD << 2)); } - return InstrEmit_branch(f, "bcx", i.address, f.LoadConstant(nia), i.B.LK, ok, - expect_true); + return InstrEmit_branch(f, "bcx", i.address, f.LoadConstantUint32(nia), + i.B.LK, ok, expect_true); } XEEMITTER(bcctrx, 0x4C000420, XL)(PPCHIRBuilder& f, InstrData& i) { @@ -288,7 +289,7 @@ XEEMITTER(bclrx, 0x4C000020, XL)(PPCHIRBuilder& f, InstrData& i) { } else { // Decrement counter. Value* ctr = f.LoadCTR(); - ctr = f.Sub(ctr, f.LoadConstant((int64_t)1)); + ctr = f.Sub(ctr, f.LoadConstantUint64(1)); f.StoreCTR(ctr); // Ctr check. ctr = f.Truncate(ctr, INT32_TYPE); @@ -490,7 +491,7 @@ XEEMITTER(tdi, 0x08000000, D)(PPCHIRBuilder& f, InstrData& i) { // if (a u EXTS(SI)) & TO[4] then TRAP Value* ra = f.LoadGPR(i.D.RA); - Value* rb = f.LoadConstant(XEEXTS16(i.D.DS)); + Value* rb = f.LoadConstantInt64(XEEXTS16(i.D.DS)); return InstrEmit_trap(f, i, ra, rb, i.D.RT); } @@ -524,7 +525,7 @@ XEEMITTER(twi, 0x0C000000, D)(PPCHIRBuilder& f, InstrData& i) { } Value* ra = f.SignExtend(f.Truncate(f.LoadGPR(i.D.RA), INT32_TYPE), INT64_TYPE); - Value* rb = f.LoadConstant(XEEXTS16(i.D.DS)); + Value* rb = f.LoadConstantInt64(XEEXTS16(i.D.DS)); return InstrEmit_trap(f, i, ra, rb, i.D.RT); } @@ -562,7 +563,7 @@ XEEMITTER(mfcr, 0x7C000026, XFX)(PPCHIRBuilder& f, InstrData& i) { if (count == 1) { v = f.LoadCR(cri); } else { - v = f.LoadZero(INT64_TYPE); + v = f.LoadZeroInt64(); } } else { v = f.LoadCR(); @@ -646,7 +647,7 @@ XEEMITTER(mtcrf, 0x7C000120, XFX)(PPCHIRBuilder& f, InstrData& i) { f.StoreCR(cri, v); } else { // Invalid; store zero to CR. - f.StoreCR(f.LoadZero(INT64_TYPE)); + f.StoreCR(f.LoadZeroInt64()); } } else { f.StoreCR(v); diff --git a/src/xenia/cpu/frontend/ppc_emit_fpu.cc b/src/xenia/cpu/frontend/ppc_emit_fpu.cc index e7a1e9d61..13ea7fcbf 100644 --- a/src/xenia/cpu/frontend/ppc_emit_fpu.cc +++ b/src/xenia/cpu/frontend/ppc_emit_fpu.cc @@ -116,9 +116,9 @@ XEEMITTER(fmulsx, 0xEC000032, A)(PPCHIRBuilder& f, InstrData& i) { XEEMITTER(fresx, 0xEC000030, A)(PPCHIRBuilder& f, InstrData& i) { // frD <- 1.0 / (frB) - Value* v = f.Convert( - f.Div(f.LoadConstant(1.0f), f.Convert(f.LoadFPR(i.A.FRB), FLOAT32_TYPE)), - FLOAT64_TYPE); + Value* v = f.Convert(f.Div(f.LoadConstantFloat32(1.0f), + f.Convert(f.LoadFPR(i.A.FRB), FLOAT32_TYPE)), + FLOAT64_TYPE); f.StoreFPR(i.A.FRT, v); // f.UpdateFPRF(v); if (i.A.Rc) { @@ -174,7 +174,7 @@ XEEMITTER(fselx, 0xFC00002E, A)(PPCHIRBuilder& f, InstrData& i) { // if (frA) >= 0.0 // then frD <- (frC) // else frD <- (frB) - Value* ge = f.CompareSGE(f.LoadFPR(i.A.FRA), f.LoadConstant(0.0)); + Value* ge = f.CompareSGE(f.LoadFPR(i.A.FRA), f.LoadZeroFloat64()); Value* v = f.Select(ge, f.LoadFPR(i.A.FRC), f.LoadFPR(i.A.FRB)); f.StoreFPR(i.A.FRT, v); if (i.A.Rc) { diff --git a/src/xenia/cpu/frontend/ppc_emit_memory.cc b/src/xenia/cpu/frontend/ppc_emit_memory.cc index 38ef567e3..f66ee34b6 100644 --- a/src/xenia/cpu/frontend/ppc_emit_memory.cc +++ b/src/xenia/cpu/frontend/ppc_emit_memory.cc @@ -58,7 +58,7 @@ Value* CalculateEA_i(PPCHIRBuilder& f, uint32_t ra, uint64_t imm) { f.LoadConstant((int32_t)imm)), INT64_TYPE); #else - return f.Add(f.LoadGPR(ra), f.LoadConstant(imm)); + return f.Add(f.LoadGPR(ra), f.LoadConstantUint64(imm)); #endif // TRUNCATE_ADDRESSES } @@ -73,9 +73,9 @@ Value* CalculateEA_0_i(PPCHIRBuilder& f, uint32_t ra, uint64_t imm) { } #else if (ra) { - return f.Add(f.LoadGPR(ra), f.LoadConstant(imm)); + return f.Add(f.LoadGPR(ra), f.LoadConstantUint64(imm)); } else { - return f.LoadConstant(imm); + return f.LoadConstantUint64(imm); } #endif // TRUNCATE_ADDRESSES } @@ -1000,8 +1000,8 @@ XEEMITTER(dcbz, 0x7C0007EC, X)(PPCHIRBuilder& f, InstrData& i) { block_size = 32; address_mask = ~31; } - f.Memset(f.And(ea, f.LoadConstant(int64_t(address_mask))), - f.LoadZero(INT8_TYPE), f.LoadConstant(int64_t(block_size))); + f.Memset(f.And(ea, f.LoadConstantInt64(address_mask)), f.LoadZeroInt8(), + f.LoadConstantInt64(block_size)); return 0; } diff --git a/src/xenia/cpu/frontend/ppc_hir_builder.cc b/src/xenia/cpu/frontend/ppc_hir_builder.cc index 326d62cac..757fb2833 100644 --- a/src/xenia/cpu/frontend/ppc_hir_builder.cc +++ b/src/xenia/cpu/frontend/ppc_hir_builder.cc @@ -135,7 +135,7 @@ bool PPCHIRBuilder::Emit(FunctionInfo* symbol_info, uint32_t flags) { DebugBreak(); } else { auto left = LoadGPR(FLAGS_break_condition_gpr); - auto right = LoadConstant(FLAGS_break_condition_value); + auto right = LoadConstantUint64(FLAGS_break_condition_value); if (FLAGS_break_condition_truncate) { left = Truncate(left, INT32_TYPE); right = Truncate(right, INT32_TYPE); @@ -289,16 +289,16 @@ void PPCHIRBuilder::StoreCR(uint32_t n, Value* value) { // Optimization passes will kill any unneeded stores (mostly). StoreContext(offsetof(PPCContext, cr0) + (4 * n) + 0, And(Truncate(Shr(value, 4 * (7 - n) + 3), INT8_TYPE), - LoadConstant(uint8_t(1)))); + LoadConstantUint8(1))); StoreContext(offsetof(PPCContext, cr0) + (4 * n) + 1, And(Truncate(Shr(value, 4 * (7 - n) + 2), INT8_TYPE), - LoadConstant(uint8_t(1)))); + LoadConstantUint8(1))); StoreContext(offsetof(PPCContext, cr0) + (4 * n) + 2, And(Truncate(Shr(value, 4 * (7 - n) + 1), INT8_TYPE), - LoadConstant(uint8_t(1)))); + LoadConstantUint8(1))); StoreContext(offsetof(PPCContext, cr0) + (4 * n) + 3, And(Truncate(Shr(value, 4 * (7 - n) + 0), INT8_TYPE), - LoadConstant(uint8_t(1)))); + LoadConstantUint8(1))); } void PPCHIRBuilder::StoreCRField(uint32_t n, uint32_t bit, Value* value) { @@ -308,7 +308,7 @@ void PPCHIRBuilder::StoreCRField(uint32_t n, uint32_t bit, Value* value) { } void PPCHIRBuilder::UpdateCR(uint32_t n, Value* lhs, bool is_signed) { - UpdateCR(n, Truncate(lhs, INT32_TYPE), LoadZero(INT32_TYPE), is_signed); + UpdateCR(n, Truncate(lhs, INT32_TYPE), LoadZeroInt32(), is_signed); } void PPCHIRBuilder::UpdateCR(uint32_t n, Value* lhs, Value* rhs, @@ -337,8 +337,8 @@ void PPCHIRBuilder::UpdateCR6(Value* src_value) { // Testing for all 1's and all 0's. // if (Rc) CR6 = all_equal | 0 | none_equal | 0 // TODO(benvanik): efficient instruction? - StoreContext(offsetof(PPCContext, cr6.cr6_1), LoadZero(INT8_TYPE)); - StoreContext(offsetof(PPCContext, cr6.cr6_3), LoadZero(INT8_TYPE)); + StoreContext(offsetof(PPCContext, cr6.cr6_1), LoadZeroInt8()); + StoreContext(offsetof(PPCContext, cr6.cr6_3), LoadZeroInt8()); StoreContext(offsetof(PPCContext, cr6.cr6_all_equal), IsFalse(Not(src_value))); StoreContext(offsetof(PPCContext, cr6.cr6_none_equal), IsFalse(src_value)); @@ -456,12 +456,12 @@ Value* PPCHIRBuilder::StoreRelease(Value* address, Value* value, uint32_t store_flags) { Value* old_address = AtomicExchange( LoadContext(offsetof(PPCContext, reserve_address), INT64_TYPE), - LoadZero(INT32_TYPE)); + LoadZeroInt32()); // Ensure the reservation addresses match. Value* eq = CompareEQ(Truncate(address, INT32_TYPE), old_address); StoreContext(offsetof(PPCContext, cr0.cr0_eq), eq); - StoreContext(offsetof(PPCContext, cr0.cr0_lt), LoadZero(INT8_TYPE)); - StoreContext(offsetof(PPCContext, cr0.cr0_gt), LoadZero(INT8_TYPE)); + StoreContext(offsetof(PPCContext, cr0.cr0_lt), LoadZeroInt8()); + StoreContext(offsetof(PPCContext, cr0.cr0_gt), LoadZeroInt8()); auto skip_label = NewLabel(); BranchFalse(eq, skip_label, BRANCH_UNLIKELY); Store(address, value, store_flags); diff --git a/src/xenia/cpu/hir/hir_builder.cc b/src/xenia/cpu/hir/hir_builder.cc index d77e1f634..fbbf3b226 100644 --- a/src/xenia/cpu/hir/hir_builder.cc +++ b/src/xenia/cpu/hir/hir_builder.cc @@ -1083,67 +1083,67 @@ Value* HIRBuilder::LoadZero(TypeName type) { return dest; } -Value* HIRBuilder::LoadConstant(int8_t value) { +Value* HIRBuilder::LoadConstantInt8(int8_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(uint8_t value) { +Value* HIRBuilder::LoadConstantUint8(uint8_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(int16_t value) { +Value* HIRBuilder::LoadConstantInt16(int16_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(uint16_t value) { +Value* HIRBuilder::LoadConstantUint16(uint16_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(int32_t value) { +Value* HIRBuilder::LoadConstantInt32(int32_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(uint32_t value) { +Value* HIRBuilder::LoadConstantUint32(uint32_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(int64_t value) { +Value* HIRBuilder::LoadConstantInt64(int64_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(uint64_t value) { +Value* HIRBuilder::LoadConstantUint64(uint64_t value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(float value) { +Value* HIRBuilder::LoadConstantFloat32(float value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(double value) { +Value* HIRBuilder::LoadConstantFloat64(double value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; } -Value* HIRBuilder::LoadConstant(const vec128_t& value) { +Value* HIRBuilder::LoadConstantVec128(const vec128_t& value) { Value* dest = AllocValue(); dest->set_constant(value); return dest; @@ -1332,7 +1332,7 @@ Value* HIRBuilder::Select(Value* cond, Value* value1, Value* value2) { Value* HIRBuilder::IsTrue(Value* value) { if (value->IsConstant()) { - return LoadConstant(value->IsConstantTrue() ? 1 : 0); + return LoadConstantInt8(value->IsConstantTrue() ? 1 : 0); } Instr* i = AppendInstr(OPCODE_IS_TRUE_info, 0, AllocValue(INT8_TYPE)); @@ -1343,7 +1343,7 @@ Value* HIRBuilder::IsTrue(Value* value) { Value* HIRBuilder::IsFalse(Value* value) { if (value->IsConstant()) { - return LoadConstant(value->IsConstantFalse() ? 1 : 0); + return LoadConstantInt8(value->IsConstantFalse() ? 1 : 0); } Instr* i = AppendInstr(OPCODE_IS_FALSE_info, 0, AllocValue(INT8_TYPE)); @@ -1356,8 +1356,7 @@ Value* HIRBuilder::CompareXX(const OpcodeInfo& opcode, Value* value1, Value* value2) { ASSERT_TYPES_EQUAL(value1, value2); if (value1->IsConstant() && value2->IsConstant()) { - return LoadConstant(value1->Compare(opcode.num, value2) ? int8_t(1) - : int8_t(0)); + return LoadConstantInt8(value1->Compare(opcode.num, value2) ? 1 : 0); } Instr* i = AppendInstr(opcode, 0, AllocValue(INT8_TYPE)); @@ -1785,7 +1784,7 @@ Value* HIRBuilder::Shl(Value* value1, Value* value2) { return i->dest; } Value* HIRBuilder::Shl(Value* value1, int8_t value2) { - return Shl(value1, LoadConstant(value2)); + return Shl(value1, LoadConstantInt8(value2)); } Value* HIRBuilder::VectorShl(Value* value1, Value* value2, TypeName part_type) { @@ -1818,7 +1817,7 @@ Value* HIRBuilder::Shr(Value* value1, Value* value2) { return i->dest; } Value* HIRBuilder::Shr(Value* value1, int8_t value2) { - return Shr(value1, LoadConstant(value2)); + return Shr(value1, LoadConstantInt8(value2)); } Value* HIRBuilder::VectorShr(Value* value1, Value* value2, TypeName part_type) { @@ -1851,7 +1850,7 @@ Value* HIRBuilder::Sha(Value* value1, Value* value2) { return i->dest; } Value* HIRBuilder::Sha(Value* value1, int8_t value2) { - return Sha(value1, LoadConstant(value2)); + return Sha(value1, LoadConstantInt8(value2)); } Value* HIRBuilder::VectorSha(Value* value1, Value* value2, TypeName part_type) { @@ -1935,7 +1934,7 @@ Value* HIRBuilder::CountLeadingZeros(Value* value) { 8, 16, 32, 64, }; assert_true(value->type <= INT64_TYPE); - return LoadConstant(zeros[value->type]); + return LoadConstantUint8(zeros[value->type]); } Instr* i = AppendInstr(OPCODE_CNTLZ_info, 0, AllocValue(INT8_TYPE)); @@ -1958,7 +1957,7 @@ Value* HIRBuilder::Insert(Value* value, Value* index, Value* part) { } Value* HIRBuilder::Insert(Value* value, uint64_t index, Value* part) { - return Insert(value, LoadConstant(index), part); + return Insert(value, LoadConstantUint64(index), part); } Value* HIRBuilder::Extract(Value* value, Value* index, TypeName target_type) { @@ -1975,7 +1974,7 @@ Value* HIRBuilder::Extract(Value* value, Value* index, TypeName target_type) { } Value* HIRBuilder::Extract(Value* value, uint8_t index, TypeName target_type) { - return Extract(value, LoadConstant(index), target_type); + return Extract(value, LoadConstantUint8(index), target_type); } Value* HIRBuilder::Splat(Value* value, TypeName target_type) { @@ -2022,7 +2021,7 @@ Value* HIRBuilder::Swizzle(Value* value, TypeName part_type, } Value* HIRBuilder::Pack(Value* value, uint32_t pack_flags) { - return Pack(value, LoadZero(VEC128_TYPE), pack_flags); + return Pack(value, LoadZeroVec128(), pack_flags); } Value* HIRBuilder::Pack(Value* value1, Value* value2, uint32_t pack_flags) { diff --git a/src/xenia/cpu/hir/hir_builder.h b/src/xenia/cpu/hir/hir_builder.h index 89e8d0396..faa10bc12 100644 --- a/src/xenia/cpu/hir/hir_builder.h +++ b/src/xenia/cpu/hir/hir_builder.h @@ -112,17 +112,25 @@ class HIRBuilder { Value* VectorConvertF2I(Value* value, uint32_t arithmetic_flags = 0); Value* LoadZero(TypeName type); - Value* LoadConstant(int8_t value); - Value* LoadConstant(uint8_t value); - Value* LoadConstant(int16_t value); - Value* LoadConstant(uint16_t value); - Value* LoadConstant(int32_t value); - Value* LoadConstant(uint32_t value); - Value* LoadConstant(int64_t value); - Value* LoadConstant(uint64_t value); - Value* LoadConstant(float value); - Value* LoadConstant(double value); - Value* LoadConstant(const vec128_t& value); + Value* LoadZeroInt8() { return LoadZero(INT8_TYPE); } + Value* LoadZeroInt16() { return LoadZero(INT16_TYPE); } + Value* LoadZeroInt32() { return LoadZero(INT32_TYPE); } + Value* LoadZeroInt64() { return LoadZero(INT64_TYPE); } + Value* LoadZeroFloat32() { return LoadZero(FLOAT32_TYPE); } + Value* LoadZeroFloat64() { return LoadZero(FLOAT64_TYPE); } + Value* LoadZeroVec128() { return LoadZero(VEC128_TYPE); } + + Value* LoadConstantInt8(int8_t value); + Value* LoadConstantUint8(uint8_t value); + Value* LoadConstantInt16(int16_t value); + Value* LoadConstantUint16(uint16_t value); + Value* LoadConstantInt32(int32_t value); + Value* LoadConstantUint32(uint32_t value); + Value* LoadConstantInt64(int64_t value); + Value* LoadConstantUint64(uint64_t value); + Value* LoadConstantFloat32(float value); + Value* LoadConstantFloat64(double value); + Value* LoadConstantVec128(const vec128_t& value); Value* LoadVectorShl(Value* sh); Value* LoadVectorShr(Value* sh); diff --git a/src/xenia/cpu/test/test_extract.cc b/src/xenia/cpu/test/test_extract.cc index 7b90c97f7..d58399d9d 100644 --- a/src/xenia/cpu/test/test_extract.cc +++ b/src/xenia/cpu/test/test_extract.cc @@ -43,8 +43,8 @@ TEST_CASE("EXTRACT_INT8_CONSTANT", "[instr]") { TestFunction( [i](HIRBuilder& b) { StoreGPR(b, 3, - b.ZeroExtend(b.Extract(LoadVR(b, 4), - b.LoadConstant(int8_t(i)), INT8_TYPE), + b.ZeroExtend(b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i), + INT8_TYPE), INT64_TYPE)); b.Return(); }).Run([i](PPCContext* ctx) { @@ -83,11 +83,10 @@ TEST_CASE("EXTRACT_INT16", "[instr]") { TEST_CASE("EXTRACT_INT16_CONSTANT", "[instr]") { for (int i = 0; i < 8; ++i) { TestFunction([i](HIRBuilder& b) { - StoreGPR(b, 3, - b.ZeroExtend(b.Extract(LoadVR(b, 4), - b.LoadConstant(int8_t(i)), - INT16_TYPE), - INT64_TYPE)); + StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4), + b.LoadConstantInt8(i), + INT16_TYPE), + INT64_TYPE)); b.Return(); }).Run([i](PPCContext* ctx) { ctx->r[4] = i; @@ -123,11 +122,10 @@ TEST_CASE("EXTRACT_INT32", "[instr]") { TEST_CASE("EXTRACT_INT32_CONSTANT", "[instr]") { for (int i = 0; i < 4; ++i) { TestFunction([i](HIRBuilder& b) { - StoreGPR(b, 3, - b.ZeroExtend(b.Extract(LoadVR(b, 4), - b.LoadConstant(int8_t(i)), - INT32_TYPE), - INT64_TYPE)); + StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4), + b.LoadConstantInt8(i), + INT32_TYPE), + INT64_TYPE)); b.Return(); }).Run([i](PPCContext* ctx) { ctx->r[4] = i; diff --git a/src/xenia/cpu/test/test_insert.cc b/src/xenia/cpu/test/test_insert.cc index 882ed1488..423c97bef 100644 --- a/src/xenia/cpu/test/test_insert.cc +++ b/src/xenia/cpu/test/test_insert.cc @@ -20,7 +20,7 @@ using xe::cpu::frontend::PPCContext; TEST_CASE("INSERT_INT8", "[instr]") { for (int i = 0; i < 16; ++i) { TestFunction test([i](HIRBuilder& b) { - StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstant(i), + StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i), b.Truncate(LoadGPR(b, 5), INT8_TYPE))); b.Return(); }); @@ -43,7 +43,7 @@ TEST_CASE("INSERT_INT8", "[instr]") { TEST_CASE("INSERT_INT16", "[instr]") { for (int i = 0; i < 8; ++i) { TestFunction test([i](HIRBuilder& b) { - StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstant(i), + StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i), b.Truncate(LoadGPR(b, 5), INT16_TYPE))); b.Return(); }); @@ -64,7 +64,7 @@ TEST_CASE("INSERT_INT16", "[instr]") { TEST_CASE("INSERT_INT32", "[instr]") { for (int i = 0; i < 4; ++i) { TestFunction test([i](HIRBuilder& b) { - StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstant(i), + StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i), b.Truncate(LoadGPR(b, 5), INT32_TYPE))); b.Return(); }); diff --git a/src/xenia/cpu/test/test_permute.cc b/src/xenia/cpu/test/test_permute.cc index 4530eb562..a4be8fbe3 100644 --- a/src/xenia/cpu/test/test_permute.cc +++ b/src/xenia/cpu/test/test_permute.cc @@ -19,8 +19,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") { { uint32_t mask = PERMUTE_MASK(0, 0, 0, 1, 0, 2, 0, 3); TestFunction([mask](HIRBuilder& b) { - StoreVR(b, 3, b.Permute(b.LoadConstant(mask), LoadVR(b, 4), - LoadVR(b, 5), INT32_TYPE)); + StoreVR(b, 3, + b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), + LoadVR(b, 5), INT32_TYPE)); b.Return(); }).Run([](PPCContext* ctx) { ctx->v[4] = vec128i(0, 1, 2, 3); @@ -34,8 +35,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") { { uint32_t mask = PERMUTE_MASK(1, 0, 1, 1, 1, 2, 1, 3); TestFunction([mask](HIRBuilder& b) { - StoreVR(b, 3, b.Permute(b.LoadConstant(mask), LoadVR(b, 4), - LoadVR(b, 5), INT32_TYPE)); + StoreVR(b, 3, + b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), + LoadVR(b, 5), INT32_TYPE)); b.Return(); }).Run([](PPCContext* ctx) { ctx->v[4] = vec128i(0, 1, 2, 3); @@ -49,8 +51,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") { { uint32_t mask = PERMUTE_MASK(0, 3, 0, 2, 0, 1, 0, 0); TestFunction([mask](HIRBuilder& b) { - StoreVR(b, 3, b.Permute(b.LoadConstant(mask), LoadVR(b, 4), - LoadVR(b, 5), INT32_TYPE)); + StoreVR(b, 3, + b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), + LoadVR(b, 5), INT32_TYPE)); b.Return(); }).Run([](PPCContext* ctx) { ctx->v[4] = vec128i(0, 1, 2, 3); @@ -64,8 +67,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") { { uint32_t mask = PERMUTE_MASK(1, 3, 1, 2, 1, 1, 1, 0); TestFunction([mask](HIRBuilder& b) { - StoreVR(b, 3, b.Permute(b.LoadConstant(mask), LoadVR(b, 4), - LoadVR(b, 5), INT32_TYPE)); + StoreVR(b, 3, + b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), + LoadVR(b, 5), INT32_TYPE)); b.Return(); }).Run([](PPCContext* ctx) { ctx->v[4] = vec128i(0, 1, 2, 3); diff --git a/src/xenia/cpu/test/test_vector_sha.cc b/src/xenia/cpu/test/test_vector_sha.cc index c6d33c91b..81cc4cc75 100644 --- a/src/xenia/cpu/test/test_vector_sha.cc +++ b/src/xenia/cpu/test/test_vector_sha.cc @@ -37,7 +37,7 @@ TEST_CASE("VECTOR_SHA_I8", "[instr]") { TEST_CASE("VECTOR_SHA_I8_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstant(vec128b( + StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstantVec128(vec128b( 0, 1, 2, 8, 4, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), INT8_TYPE)); @@ -75,7 +75,7 @@ TEST_CASE("VECTOR_SHA_I16", "[instr]") { TEST_CASE("VECTOR_SHA_I16_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstant(vec128s( + StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstantVec128(vec128s( 0, 1, 8, 15, 15, 8, 1, 16)), INT16_TYPE)); b.Return(); @@ -120,12 +120,12 @@ TEST_CASE("VECTOR_SHA_I32", "[instr]") { TEST_CASE("VECTOR_SHA_I32_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, - b.VectorSha(LoadVR(b, 4), b.LoadConstant(vec128i(0, 1, 16, 31)), - INT32_TYPE)); - StoreVR(b, 4, - b.VectorSha(LoadVR(b, 5), b.LoadConstant(vec128i(31, 16, 1, 32)), - INT32_TYPE)); + StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), + b.LoadConstantVec128(vec128i(0, 1, 16, 31)), + INT32_TYPE)); + StoreVR(b, 4, b.VectorSha(LoadVR(b, 5), + b.LoadConstantVec128(vec128i(31, 16, 1, 32)), + INT32_TYPE)); b.Return(); }); test.Run( diff --git a/src/xenia/cpu/test/test_vector_shl.cc b/src/xenia/cpu/test/test_vector_shl.cc index 506c6b1bd..fdbb3db35 100644 --- a/src/xenia/cpu/test/test_vector_shl.cc +++ b/src/xenia/cpu/test/test_vector_shl.cc @@ -37,7 +37,7 @@ TEST_CASE("VECTOR_SHL_I8", "[instr]") { TEST_CASE("VECTOR_SHL_I8_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstant(vec128b( + StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstantVec128(vec128b( 0, 1, 2, 8, 4, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), INT8_TYPE)); @@ -75,7 +75,7 @@ TEST_CASE("VECTOR_SHL_I16", "[instr]") { TEST_CASE("VECTOR_SHL_I16_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstant(vec128s( + StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstantVec128(vec128s( 0, 1, 8, 15, 15, 8, 1, 16)), INT16_TYPE)); b.Return(); @@ -120,12 +120,12 @@ TEST_CASE("VECTOR_SHL_I32", "[instr]") { TEST_CASE("VECTOR_SHL_I32_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, - b.VectorShl(LoadVR(b, 4), b.LoadConstant(vec128i(0, 1, 16, 31)), - INT32_TYPE)); - StoreVR(b, 4, - b.VectorShl(LoadVR(b, 5), b.LoadConstant(vec128i(31, 16, 1, 32)), - INT32_TYPE)); + StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), + b.LoadConstantVec128(vec128i(0, 1, 16, 31)), + INT32_TYPE)); + StoreVR(b, 4, b.VectorShl(LoadVR(b, 5), + b.LoadConstantVec128(vec128i(31, 16, 1, 32)), + INT32_TYPE)); b.Return(); }); test.Run( diff --git a/src/xenia/cpu/test/test_vector_shr.cc b/src/xenia/cpu/test/test_vector_shr.cc index 55bbe989f..437bcdf71 100644 --- a/src/xenia/cpu/test/test_vector_shr.cc +++ b/src/xenia/cpu/test/test_vector_shr.cc @@ -37,7 +37,7 @@ TEST_CASE("VECTOR_SHR_I8", "[instr]") { TEST_CASE("VECTOR_SHR_I8_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstant(vec128b( + StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstantVec128(vec128b( 0, 1, 2, 8, 4, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), INT8_TYPE)); @@ -75,7 +75,7 @@ TEST_CASE("VECTOR_SHR_I16", "[instr]") { TEST_CASE("VECTOR_SHR_I16_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstant(vec128s( + StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstantVec128(vec128s( 0, 1, 8, 15, 15, 8, 1, 16)), INT16_TYPE)); b.Return(); @@ -120,12 +120,12 @@ TEST_CASE("VECTOR_SHR_I32", "[instr]") { TEST_CASE("VECTOR_SHR_I32_CONSTANT", "[instr]") { TestFunction test([](HIRBuilder& b) { - StoreVR(b, 3, - b.VectorShr(LoadVR(b, 4), b.LoadConstant(vec128i(0, 1, 16, 31)), - INT32_TYPE)); - StoreVR(b, 4, - b.VectorShr(LoadVR(b, 5), b.LoadConstant(vec128i(31, 16, 1, 32)), - INT32_TYPE)); + StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), + b.LoadConstantVec128(vec128i(0, 1, 16, 31)), + INT32_TYPE)); + StoreVR(b, 4, b.VectorShr(LoadVR(b, 5), + b.LoadConstantVec128(vec128i(31, 16, 1, 32)), + INT32_TYPE)); b.Return(); }); test.Run(