mirror of https://github.com/xqemu/xqemu.git
target-ppc: convert st64 to use new macro
Use macro for st64 as well, this changes the function signature from gen_qemu_st64 => gen_qemu_st64_i64. Replace this at all the call sites. Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
761a89c641
commit
2468f23dcb
|
@ -2519,12 +2519,7 @@ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
|
|||
}
|
||||
|
||||
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
|
||||
|
||||
static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
|
||||
{
|
||||
TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
|
||||
tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
|
||||
}
|
||||
GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
|
||||
|
||||
#define GEN_LD(name, ldop, opc, type) \
|
||||
static void glue(gen_, name)(DisasContext *ctx) \
|
||||
|
@ -2769,9 +2764,9 @@ GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
|
|||
/* stw stwu stwux stwx */
|
||||
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
|
||||
#if defined(TARGET_PPC64)
|
||||
GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
|
||||
GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
|
||||
GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
|
||||
GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
|
||||
GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
|
||||
GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
|
||||
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
|
||||
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
|
||||
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
|
||||
|
@ -2808,16 +2803,16 @@ static void gen_std(DisasContext *ctx)
|
|||
EA = tcg_temp_new();
|
||||
gen_addr_imm_index(ctx, EA, 0x03);
|
||||
|
||||
/* We only need to swap high and low halves. gen_qemu_st64 does
|
||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
|
||||
necessary 64-bit byteswap already. */
|
||||
if (unlikely(ctx->le_mode)) {
|
||||
gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
|
||||
gen_addr_add(ctx, EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
|
||||
} else {
|
||||
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
|
||||
gen_addr_add(ctx, EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
|
||||
}
|
||||
tcg_temp_free(EA);
|
||||
} else {
|
||||
|
@ -2831,7 +2826,7 @@ static void gen_std(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_INT);
|
||||
EA = tcg_temp_new();
|
||||
gen_addr_imm_index(ctx, EA, 0x03);
|
||||
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
|
||||
if (Rc(ctx->opcode))
|
||||
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
|
||||
tcg_temp_free(EA);
|
||||
|
@ -3113,7 +3108,7 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
|
|||
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
|
||||
#if defined(TARGET_PPC64)
|
||||
if (size == 8) {
|
||||
gen_qemu_st64(ctx, cpu_gpr[reg], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_gpr[reg], EA);
|
||||
} else
|
||||
#endif
|
||||
if (size == 4) {
|
||||
|
@ -3130,10 +3125,10 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
|
|||
gpr1 = cpu_gpr[reg];
|
||||
gpr2 = cpu_gpr[reg+1];
|
||||
}
|
||||
gen_qemu_st64(ctx, gpr1, EA);
|
||||
gen_qemu_st64_i64(ctx, gpr1, EA);
|
||||
EA8 = tcg_temp_local_new();
|
||||
gen_addr_add(ctx, EA8, EA, 8);
|
||||
gen_qemu_st64(ctx, gpr2, EA8);
|
||||
gen_qemu_st64_i64(ctx, gpr2, EA8);
|
||||
tcg_temp_free(EA8);
|
||||
#endif
|
||||
} else {
|
||||
|
@ -6622,10 +6617,10 @@ GEN_STS(stb, st8, 0x06, PPC_INTEGER)
|
|||
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
|
||||
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
|
||||
#if defined(TARGET_PPC64)
|
||||
GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
|
||||
GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
|
||||
GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
|
||||
GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
|
||||
GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
|
||||
GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
|
||||
GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
|
||||
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
|
||||
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
|
||||
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
|
||||
|
|
|
@ -848,7 +848,7 @@ static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
|
|||
}
|
||||
|
||||
/* stfd stfdu stfdux stfdx */
|
||||
GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
|
||||
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
|
||||
/* stfs stfsu stfsux stfsx */
|
||||
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
|
||||
|
||||
|
@ -863,16 +863,16 @@ static void gen_stfdp(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
EA = tcg_temp_new();
|
||||
gen_addr_imm_index(ctx, EA, 0);
|
||||
/* We only need to swap high and low halves. gen_qemu_st64 does necessary
|
||||
64-bit byteswap already. */
|
||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
|
||||
necessary 64-bit byteswap already. */
|
||||
if (unlikely(ctx->le_mode)) {
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
tcg_gen_addi_tl(EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
} else {
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
tcg_gen_addi_tl(EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
}
|
||||
tcg_temp_free(EA);
|
||||
}
|
||||
|
@ -888,16 +888,16 @@ static void gen_stfdpx(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
EA = tcg_temp_new();
|
||||
gen_addr_reg_index(ctx, EA);
|
||||
/* We only need to swap high and low halves. gen_qemu_st64 does necessary
|
||||
64-bit byteswap already. */
|
||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
|
||||
necessary 64-bit byteswap already. */
|
||||
if (unlikely(ctx->le_mode)) {
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
tcg_gen_addi_tl(EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
} else {
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
|
||||
tcg_gen_addi_tl(EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
|
||||
}
|
||||
tcg_temp_free(EA);
|
||||
}
|
||||
|
@ -990,9 +990,9 @@ static void gen_stfq(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
t0 = tcg_temp_new();
|
||||
gen_addr_imm_index(ctx, t0, 0);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
|
||||
gen_addr_add(ctx, t0, t0, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
|
||||
tcg_temp_free(t0);
|
||||
}
|
||||
|
||||
|
@ -1005,10 +1005,10 @@ static void gen_stfqu(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
t0 = tcg_temp_new();
|
||||
gen_addr_imm_index(ctx, t0, 0);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
|
||||
t1 = tcg_temp_new();
|
||||
gen_addr_add(ctx, t1, t0, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
|
||||
tcg_temp_free(t1);
|
||||
if (ra != 0)
|
||||
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
||||
|
@ -1024,10 +1024,10 @@ static void gen_stfqux(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
t0 = tcg_temp_new();
|
||||
gen_addr_reg_index(ctx, t0);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
|
||||
t1 = tcg_temp_new();
|
||||
gen_addr_add(ctx, t1, t0, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
|
||||
tcg_temp_free(t1);
|
||||
if (ra != 0)
|
||||
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
||||
|
@ -1042,9 +1042,9 @@ static void gen_stfqx(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_FLOAT);
|
||||
t0 = tcg_temp_new();
|
||||
gen_addr_reg_index(ctx, t0);
|
||||
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
|
||||
gen_addr_add(ctx, t0, t0, 8);
|
||||
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
|
||||
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
|
||||
tcg_temp_free(t0);
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ GEN_STUF(name, stop, op | 0x21, type) \
|
|||
GEN_STUXF(name, stop, op | 0x01, type) \
|
||||
GEN_STXF(name, stop, 0x17, op | 0x00, type)
|
||||
|
||||
GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
|
||||
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
|
||||
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
|
||||
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
|
||||
GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
|
||||
|
|
|
@ -725,7 +725,7 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
|
|||
{
|
||||
TCGv_i64 t0 = tcg_temp_new_i64();
|
||||
gen_load_gpr64(t0, rS(ctx->opcode));
|
||||
gen_qemu_st64(ctx, t0, addr);
|
||||
gen_qemu_st64_i64(ctx, t0, addr);
|
||||
tcg_temp_free_i64(t0);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,16 +52,16 @@ static void gen_st##name(DisasContext *ctx) \
|
|||
EA = tcg_temp_new(); \
|
||||
gen_addr_reg_index(ctx, EA); \
|
||||
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
||||
/* We only need to swap high and low halves. gen_qemu_st64 does necessary \
|
||||
64-bit byteswap already. */ \
|
||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does \
|
||||
necessary 64-bit byteswap already. */ \
|
||||
if (ctx->le_mode) { \
|
||||
gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
} else { \
|
||||
gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
} \
|
||||
tcg_temp_free(EA); \
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
tcg_temp_free(EA); \
|
||||
}
|
||||
|
||||
VSX_STORE_SCALAR(stxsdx, st64)
|
||||
VSX_STORE_SCALAR(stxsdx, st64_i64)
|
||||
VSX_STORE_SCALAR(stxsiwx, st32_i64)
|
||||
VSX_STORE_SCALAR(stxsspx, st32fs)
|
||||
|
||||
|
@ -129,9 +129,9 @@ static void gen_stxvd2x(DisasContext *ctx)
|
|||
gen_set_access_type(ctx, ACCESS_INT);
|
||||
EA = tcg_temp_new();
|
||||
gen_addr_reg_index(ctx, EA);
|
||||
gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
|
||||
tcg_gen_addi_tl(EA, EA, 8);
|
||||
gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
|
||||
gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
|
||||
tcg_temp_free(EA);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue