[JIT] New opcodes: OPCODE_LOAD_OFFSET and OPCODE_STORE_OFFSET

These take full advantage of x86 addressing, and eliminate extra add operations.
This commit is contained in:
DrChat 2018-02-14 16:26:49 -06:00
parent 1de598e4ce
commit e54c24e150
8 changed files with 352 additions and 35 deletions

View File

@ -2127,6 +2127,176 @@ struct STORE_MMIO_I32
};
EMITTER_OPCODE_TABLE(OPCODE_STORE_MMIO, STORE_MMIO_I32);
// ============================================================================
// OPCODE_LOAD_OFFSET
// ============================================================================
template <typename T>
RegExp ComputeMemoryAddressOffset(X64Emitter& e, const T& guest,
const T& offset) {
int32_t offset_const = static_cast<int32_t>(offset.constant());
if (guest.is_constant) {
uint32_t address = static_cast<uint32_t>(guest.constant());
address += static_cast<int32_t>(offset.constant());
if (address < 0x80000000) {
return e.GetMembaseReg() + address;
} else {
e.mov(e.eax, address);
return e.GetMembaseReg() + e.rax;
}
} else {
// Clear the top 32 bits, as they are likely garbage.
// TODO(benvanik): find a way to avoid doing this.
e.mov(e.eax, guest.reg().cvt32());
return e.GetMembaseReg() + e.rax + offset_const;
}
}
struct LOAD_OFFSET_I8
: Sequence<LOAD_OFFSET_I8, I<OPCODE_LOAD_OFFSET, I8Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
e.mov(i.dest, e.byte[addr]);
}
};
struct LOAD_OFFSET_I16
: Sequence<LOAD_OFFSET_I16, I<OPCODE_LOAD_OFFSET, I16Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(i.dest, e.word[addr]);
} else {
e.mov(i.dest, e.word[addr]);
e.ror(i.dest, 8);
}
} else {
e.mov(i.dest, e.word[addr]);
}
}
};
struct LOAD_OFFSET_I32
: Sequence<LOAD_OFFSET_I32, I<OPCODE_LOAD_OFFSET, I32Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(i.dest, e.dword[addr]);
} else {
e.mov(i.dest, e.dword[addr]);
e.bswap(i.dest);
}
} else {
e.mov(i.dest, e.dword[addr]);
}
}
};
struct LOAD_OFFSET_I64
: Sequence<LOAD_OFFSET_I64, I<OPCODE_LOAD_OFFSET, I64Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(i.dest, e.qword[addr]);
} else {
e.mov(i.dest, e.qword[addr]);
e.bswap(i.dest);
}
} else {
e.mov(i.dest, e.qword[addr]);
}
}
};
EMITTER_OPCODE_TABLE(OPCODE_LOAD_OFFSET, LOAD_OFFSET_I8, LOAD_OFFSET_I16,
LOAD_OFFSET_I32, LOAD_OFFSET_I64);
// ============================================================================
// OPCODE_STORE_OFFSET
// ============================================================================
struct STORE_OFFSET_I8
: Sequence<STORE_OFFSET_I8,
I<OPCODE_STORE_OFFSET, VoidOp, I64Op, I64Op, I8Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.src3.is_constant) {
e.mov(e.byte[addr], i.src3.constant());
} else {
e.mov(e.byte[addr], i.src3);
}
}
};
struct STORE_OFFSET_I16
: Sequence<STORE_OFFSET_I16,
I<OPCODE_STORE_OFFSET, VoidOp, I64Op, I64Op, I16Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
assert_false(i.src3.is_constant);
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(e.word[addr], i.src3);
} else {
assert_always("not implemented");
}
} else {
if (i.src3.is_constant) {
e.mov(e.word[addr], i.src3.constant());
} else {
e.mov(e.word[addr], i.src3);
}
}
}
};
struct STORE_OFFSET_I32
: Sequence<STORE_OFFSET_I32,
I<OPCODE_STORE_OFFSET, VoidOp, I64Op, I64Op, I32Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
assert_false(i.src3.is_constant);
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(e.dword[addr], i.src3);
} else {
assert_always("not implemented");
}
} else {
if (i.src3.is_constant) {
e.mov(e.dword[addr], i.src3.constant());
} else {
e.mov(e.dword[addr], i.src3);
}
}
}
};
struct STORE_OFFSET_I64
: Sequence<STORE_OFFSET_I64,
I<OPCODE_STORE_OFFSET, VoidOp, I64Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto addr = ComputeMemoryAddressOffset(e, i.src1, i.src2);
if (i.instr->flags & LoadStoreFlags::LOAD_STORE_BYTE_SWAP) {
assert_false(i.src3.is_constant);
if (e.IsFeatureEnabled(kX64EmitMovbe)) {
e.movbe(e.qword[addr], i.src3);
} else {
assert_always("not implemented");
}
} else {
if (i.src3.is_constant) {
e.MovMem64(addr, i.src3.constant());
} else {
e.mov(e.qword[addr], i.src3);
}
}
}
};
EMITTER_OPCODE_TABLE(OPCODE_STORE_OFFSET, STORE_OFFSET_I8, STORE_OFFSET_I16,
STORE_OFFSET_I32, STORE_OFFSET_I64);
// ============================================================================
// OPCODE_LOAD
// ============================================================================
@ -7650,6 +7820,8 @@ void RegisterSequences() {
Register_OPCODE_CONTEXT_BARRIER();
Register_OPCODE_LOAD_MMIO();
Register_OPCODE_STORE_MMIO();
Register_OPCODE_LOAD_OFFSET();
Register_OPCODE_STORE_OFFSET();
Register_OPCODE_LOAD();
Register_OPCODE_STORE();
Register_OPCODE_MEMSET();

View File

@ -195,10 +195,15 @@ bool ConstantPropagationPass::Run(HIRBuilder* builder) {
break;
case OPCODE_LOAD:
case OPCODE_LOAD_OFFSET:
if (i->src1.value->IsConstant()) {
assert_false(i->flags & LOAD_STORE_BYTE_SWAP);
auto memory = processor_->memory();
auto address = i->src1.value->constant.i32;
if (i->opcode->num == OPCODE_LOAD_OFFSET) {
address += i->src2.value->constant.i32;
}
auto mmio_range =
processor_->memory()->LookupVirtualMappedRange(address);
if (FLAGS_inline_mmio_access && mmio_range) {
@ -246,12 +251,21 @@ bool ConstantPropagationPass::Run(HIRBuilder* builder) {
}
break;
case OPCODE_STORE:
case OPCODE_STORE_OFFSET:
if (FLAGS_inline_mmio_access && i->src1.value->IsConstant()) {
auto address = i->src1.value->constant.i32;
if (i->opcode->num == OPCODE_STORE_OFFSET) {
address += i->src2.value->constant.i32;
}
auto mmio_range =
processor_->memory()->LookupVirtualMappedRange(address);
if (mmio_range) {
auto value = i->src2.value;
if (i->opcode->num == OPCODE_STORE_OFFSET) {
value = i->src3.value;
}
i->Replace(&OPCODE_STORE_MMIO_info, 0);
i->src1.offset = reinterpret_cast<uint64_t>(mmio_range);
i->src2.offset = address;

View File

@ -35,9 +35,11 @@ bool MemorySequenceCombinationPass::Run(HIRBuilder* builder) {
while (block) {
auto i = block->instr_head;
while (i) {
if (i->opcode == &OPCODE_LOAD_info) {
if (i->opcode == &OPCODE_LOAD_info ||
i->opcode == &OPCODE_LOAD_OFFSET_info) {
CombineLoadSequence(i);
} else if (i->opcode == &OPCODE_STORE_info) {
} else if (i->opcode == &OPCODE_STORE_info ||
i->opcode == &OPCODE_STORE_OFFSET_info) {
CombineStoreSequence(i);
}
i = i->next;
@ -112,6 +114,10 @@ void MemorySequenceCombinationPass::CombineStoreSequence(Instr* i) {
// store_convert v0, v1.i64, [swap|i64->i32,trunc]
auto src = i->src2.value;
if (i->opcode == &OPCODE_STORE_OFFSET_info) {
src = i->src3.value;
}
if (src->IsConstant()) {
// Constant value write - ignore.
return;
@ -135,7 +141,11 @@ void MemorySequenceCombinationPass::CombineStoreSequence(Instr* i) {
// Pull the original value (from before the byte swap).
// The byte swap itself will go away in DCE.
i->set_src2(def->src1.value);
if (i->opcode == &OPCODE_STORE_info) {
i->set_src2(def->src1.value);
} else if (i->opcode == &OPCODE_STORE_OFFSET_info) {
i->set_src3(def->src1.value);
}
// TODO(benvanik): extend/truncate.
}

View File

@ -1232,6 +1232,25 @@ void HIRBuilder::StoreMmio(cpu::MMIORange* mmio_range, uint32_t address,
i->set_src3(value);
}
Value* HIRBuilder::LoadOffset(Value* address, Value* offset, TypeName type,
uint32_t load_flags) {
ASSERT_ADDRESS_TYPE(address);
Instr* i = AppendInstr(OPCODE_LOAD_OFFSET_info, load_flags, AllocValue(type));
i->set_src1(address);
i->set_src2(offset);
i->src3.value = NULL;
return i->dest;
}
void HIRBuilder::StoreOffset(Value* address, Value* offset, Value* value,
uint32_t store_flags) {
ASSERT_ADDRESS_TYPE(address);
Instr* i = AppendInstr(OPCODE_STORE_OFFSET_info, store_flags);
i->set_src1(address);
i->set_src2(offset);
i->set_src3(value);
}
Value* HIRBuilder::Load(Value* address, TypeName type, uint32_t load_flags) {
ASSERT_ADDRESS_TYPE(address);
Instr* i = AppendInstr(OPCODE_LOAD_info, load_flags, AllocValue(type));

View File

@ -147,6 +147,11 @@ class HIRBuilder {
Value* LoadMmio(cpu::MMIORange* mmio_range, uint32_t address, TypeName type);
void StoreMmio(cpu::MMIORange* mmio_range, uint32_t address, Value* value);
Value* LoadOffset(Value* address, Value* offset, TypeName type,
uint32_t load_flags = 0);
void StoreOffset(Value* address, Value* offset, Value* value,
uint32_t store_flags = 0);
Value* Load(Value* address, TypeName type, uint32_t load_flags = 0);
void Store(Value* address, Value* value, uint32_t store_flags = 0);
void Memset(Value* address, Value* value, Value* length);

View File

@ -152,6 +152,8 @@ enum Opcode {
OPCODE_CONTEXT_BARRIER,
OPCODE_LOAD_MMIO,
OPCODE_STORE_MMIO,
OPCODE_LOAD_OFFSET,
OPCODE_STORE_OFFSET,
OPCODE_LOAD,
OPCODE_STORE,
OPCODE_MEMSET,

View File

@ -231,6 +231,18 @@ DEFINE_OPCODE(
OPCODE_SIG_X_O_O_V,
OPCODE_FLAG_MEMORY)
DEFINE_OPCODE(
OPCODE_LOAD_OFFSET,
"load_offset",
OPCODE_SIG_V_V_V,
OPCODE_FLAG_MEMORY)
DEFINE_OPCODE(
OPCODE_STORE_OFFSET,
"store_offset",
OPCODE_SIG_X_V_V_V,
OPCODE_FLAG_MEMORY)
DEFINE_OPCODE(
OPCODE_LOAD,
"load",

View File

@ -63,8 +63,15 @@ int InstrEmit_lbz(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// RT <- i56.0 || MEM(EA, 1)
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.Load(ea, INT8_TYPE), INT64_TYPE);
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.LoadOffset(b, offset, INT8_TYPE), INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
return 0;
}
@ -73,10 +80,11 @@ int InstrEmit_lbzu(PPCHIRBuilder& f, const InstrData& i) {
// EA <- (RA) + EXTS(D)
// RT <- i56.0 || MEM(EA, 1)
// RA <- EA
Value* ea = CalculateEA_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.Load(ea, INT8_TYPE), INT64_TYPE);
Value* ra = f.LoadGPR(i.D.RA);
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.LoadOffset(ra, offset, INT8_TYPE), INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
StoreEA(f, i.D.RA, ea);
StoreEA(f, i.D.RA, f.Add(ra, offset));
return 0;
}
@ -111,8 +119,16 @@ int InstrEmit_lha(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// RT <- EXTS(MEM(EA, 2))
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.SignExtend(f.ByteSwap(f.Load(ea, INT16_TYPE)), INT64_TYPE);
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt =
f.SignExtend(f.ByteSwap(f.LoadOffset(b, offset, INT16_TYPE)), INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
return 0;
}
@ -121,10 +137,12 @@ int InstrEmit_lhau(PPCHIRBuilder& f, const InstrData& i) {
// EA <- (RA) + EXTS(D)
// RT <- EXTS(MEM(EA, 2))
// RA <- EA
Value* ea = CalculateEA_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.SignExtend(f.ByteSwap(f.Load(ea, INT16_TYPE)), INT64_TYPE);
Value* ra = f.LoadGPR(i.D.RA);
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt = f.SignExtend(f.ByteSwap(f.LoadOffset(ra, offset, INT16_TYPE)),
INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
StoreEA(f, i.D.RA, ea);
StoreEA(f, i.D.RA, f.Add(ra, offset));
return 0;
}
@ -159,8 +177,16 @@ int InstrEmit_lhz(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// RT <- i48.0 || MEM(EA, 2)
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.Load(ea, INT16_TYPE)), INT64_TYPE);
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt =
f.ZeroExtend(f.ByteSwap(f.LoadOffset(b, offset, INT16_TYPE)), INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
return 0;
}
@ -169,10 +195,12 @@ int InstrEmit_lhzu(PPCHIRBuilder& f, const InstrData& i) {
// EA <- (RA) + EXTS(D)
// RT <- i48.0 || MEM(EA, 2)
// RA <- EA
Value* ea = CalculateEA_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.Load(ea, INT16_TYPE)), INT64_TYPE);
Value* ra = f.LoadGPR(i.D.RA);
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.LoadOffset(ra, offset, INT16_TYPE)),
INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
StoreEA(f, i.D.RA, ea);
StoreEA(f, i.D.RA, f.Add(ra, offset));
return 0;
}
@ -207,8 +235,16 @@ int InstrEmit_lwa(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D || 00)
// RT <- EXTS(MEM(EA, 4))
Value* ea = CalculateEA_0_i(f, i.DS.RA, XEEXTS16(i.DS.DS << 2));
Value* rt = f.SignExtend(f.ByteSwap(f.Load(ea, INT32_TYPE)), INT64_TYPE);
Value* b;
if (i.DS.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.DS.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.DS.DS << 2));
Value* rt =
f.SignExtend(f.ByteSwap(f.LoadOffset(b, offset, INT32_TYPE)), INT64_TYPE);
f.StoreGPR(i.DS.RT, rt);
return 0;
}
@ -244,8 +280,16 @@ int InstrEmit_lwz(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// RT <- i32.0 || MEM(EA, 4)
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.Load(ea, INT32_TYPE)), INT64_TYPE);
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt =
f.ZeroExtend(f.ByteSwap(f.LoadOffset(b, offset, INT32_TYPE)), INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
return 0;
}
@ -254,10 +298,12 @@ int InstrEmit_lwzu(PPCHIRBuilder& f, const InstrData& i) {
// EA <- (RA) + EXTS(D)
// RT <- i32.0 || MEM(EA, 4)
// RA <- EA
Value* ea = CalculateEA_i(f, i.D.RA, XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.Load(ea, INT32_TYPE)), INT64_TYPE);
Value* ra = f.LoadGPR(i.D.RA);
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
Value* rt = f.ZeroExtend(f.ByteSwap(f.LoadOffset(ra, offset, INT32_TYPE)),
INT64_TYPE);
f.StoreGPR(i.D.RT, rt);
StoreEA(f, i.D.RA, ea);
StoreEA(f, i.D.RA, f.Add(ra, offset));
return 0;
}
@ -292,8 +338,15 @@ int InstrEmit_ld(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(DS || 0b00)
// RT <- MEM(EA, 8)
Value* ea = CalculateEA_0_i(f, i.DS.RA, XEEXTS16(i.DS.DS << 2));
Value* rt = f.ByteSwap(f.Load(ea, INT64_TYPE));
Value* b;
if (i.DS.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.DS.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.DS.DS << 2));
Value* rt = f.ByteSwap(f.LoadOffset(b, offset, INT64_TYPE));
f.StoreGPR(i.DS.RT, rt);
return 0;
}
@ -342,8 +395,15 @@ int InstrEmit_stb(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// MEM(EA, 1) <- (RS)[56:63]
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
f.Store(ea, f.Truncate(f.LoadGPR(i.D.RT), INT8_TYPE));
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
f.StoreOffset(b, offset, f.Truncate(f.LoadGPR(i.D.RT), INT8_TYPE));
return 0;
}
@ -386,8 +446,16 @@ int InstrEmit_sth(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// MEM(EA, 2) <- (RS)[48:63]
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
f.Store(ea, f.ByteSwap(f.Truncate(f.LoadGPR(i.D.RT), INT16_TYPE)));
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
f.StoreOffset(b, offset,
f.ByteSwap(f.Truncate(f.LoadGPR(i.D.RT), INT16_TYPE)));
return 0;
}
@ -430,8 +498,16 @@ int InstrEmit_stw(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(D)
// MEM(EA, 4) <- (RS)[32:63]
Value* ea = CalculateEA_0_i(f, i.D.RA, XEEXTS16(i.D.DS));
f.Store(ea, f.ByteSwap(f.Truncate(f.LoadGPR(i.D.RT), INT32_TYPE)));
Value* b;
if (i.D.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.D.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.D.DS));
f.StoreOffset(b, offset,
f.ByteSwap(f.Truncate(f.LoadGPR(i.D.RT), INT32_TYPE)));
return 0;
}
@ -474,8 +550,15 @@ int InstrEmit_std(PPCHIRBuilder& f, const InstrData& i) {
// b <- (RA)
// EA <- b + EXTS(DS || 0b00)
// MEM(EA, 8) <- (RS)
Value* ea = CalculateEA_0_i(f, i.DS.RA, XEEXTS16(i.DS.DS << 2));
f.Store(ea, f.ByteSwap(f.LoadGPR(i.DS.RT)));
Value* b;
if (i.DS.RA == 0) {
b = f.LoadZeroInt64();
} else {
b = f.LoadGPR(i.DS.RA);
}
Value* offset = f.LoadConstantInt64(XEEXTS16(i.DS.DS << 2));
f.StoreOffset(b, offset, f.ByteSwap(f.LoadGPR(i.DS.RT)));
return 0;
}