target-ppc: convert ld64 to use new macro

Use macro for ld64 as well, this changes the function signature from
gen_qemu_ld64 => gen_qemu_ld64_i64. Replace this at all the call sites.

Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Nikunj A Dadhania 2016-09-12 12:11:31 +05:30 committed by David Gibson
parent 09bfe50d57
commit 4f364fe76f
5 changed files with 49 additions and 54 deletions

View File

@ -2488,12 +2488,7 @@ static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
}
static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2) static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
{ {
@ -2612,12 +2607,12 @@ GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
/* lwax */ /* lwax */
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B); GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */ /* ldux */
GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B); GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
/* ldx */ /* ldx */
GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B); GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
/* CI load/store variants */ /* CI load/store variants */
GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@ -2640,7 +2635,7 @@ static void gen_ld(DisasContext *ctx)
gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA); gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else { } else {
/* ld - ldu */ /* ld - ldu */
gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA); gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} }
if (Rc(ctx->opcode)) if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
@ -2677,16 +2672,16 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new(); EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F); gen_addr_imm_index(ctx, EA, 0x0F);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
64-bit byteswap already. */ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) { if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA); gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
gen_addr_add(ctx, EA, EA, 8); gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, cpu_gpr[rd], EA); gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
} else { } else {
gen_qemu_ld64(ctx, cpu_gpr[rd], EA); gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8); gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA); gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
} }
tcg_temp_free(EA); tcg_temp_free(EA);
} }
@ -3184,7 +3179,7 @@ STCX(stwcx_, 4);
#if defined(TARGET_PPC64) #if defined(TARGET_PPC64)
/* ldarx */ /* ldarx */
LARX(ldarx, 8, ld64); LARX(ldarx, 8, ld64_i64);
/* lqarx */ /* lqarx */
static void gen_lqarx(DisasContext *ctx) static void gen_lqarx(DisasContext *ctx)
@ -3210,11 +3205,11 @@ static void gen_lqarx(DisasContext *ctx)
gpr1 = cpu_gpr[rd]; gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1]; gpr2 = cpu_gpr[rd+1];
} }
gen_qemu_ld64(ctx, gpr1, EA); gen_qemu_ld64_i64(ctx, gpr1, EA);
tcg_gen_mov_tl(cpu_reserve, EA); tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8); gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, gpr2, EA); gen_qemu_ld64_i64(ctx, gpr2, EA);
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2)); tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
@ -6601,12 +6596,12 @@ GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
#if defined(TARGET_PPC64) #if defined(TARGET_PPC64)
GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B) GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B) GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B) GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B) GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE) GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
/* HV/P7 and later only */ /* HV/P7 and later only */
GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)

View File

@ -672,7 +672,7 @@ static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
} }
/* lfd lfdu lfdux lfdx */ /* lfd lfdu lfdux lfdx */
GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT); GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */ /* lfs lfsu lfsux lfsx */
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
@ -687,16 +687,16 @@ static void gen_lfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT); gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new(); EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0); gen_addr_imm_index(ctx, EA, 0);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
64-bit byteswap already. */ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) { if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8); tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else { } else {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8); tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
} }
tcg_temp_free(EA); tcg_temp_free(EA);
} }
@ -712,16 +712,16 @@ static void gen_lfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT); gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new(); EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA); gen_addr_reg_index(ctx, EA);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
64-bit byteswap already. */ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) { if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8); tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else { } else {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8); tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
} }
tcg_temp_free(EA); tcg_temp_free(EA);
} }
@ -924,9 +924,9 @@ static void gen_lfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT); gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new(); t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0); gen_addr_imm_index(ctx, t0, 0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8); gen_addr_add(ctx, t0, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0); tcg_temp_free(t0);
} }
@ -940,9 +940,9 @@ static void gen_lfqu(DisasContext *ctx)
t0 = tcg_temp_new(); t0 = tcg_temp_new();
t1 = tcg_temp_new(); t1 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0); gen_addr_imm_index(ctx, t0, 0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t1, t0, 8); gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1); gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
if (ra != 0) if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0); tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0); tcg_temp_free(t0);
@ -958,10 +958,10 @@ static void gen_lfqux(DisasContext *ctx)
TCGv t0, t1; TCGv t0, t1;
t0 = tcg_temp_new(); t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0); gen_addr_reg_index(ctx, t0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new(); t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8); gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1); gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1); tcg_temp_free(t1);
if (ra != 0) if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0); tcg_gen_mov_tl(cpu_gpr[ra], t0);
@ -976,9 +976,9 @@ static void gen_lfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT); gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new(); t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0); gen_addr_reg_index(ctx, t0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8); gen_addr_add(ctx, t0, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0); gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0); tcg_temp_free(t0);
} }

View File

@ -617,7 +617,7 @@ static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
static inline void gen_op_evldd(DisasContext *ctx, TCGv addr) static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
{ {
TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t0 = tcg_temp_new_i64();
gen_qemu_ld64(ctx, t0, addr); gen_qemu_ld64_i64(ctx, t0, addr);
gen_store_gpr64(rD(ctx->opcode), t0); gen_store_gpr64(rD(ctx->opcode), t0);
tcg_temp_free_i64(t0); tcg_temp_free_i64(t0);
} }

View File

@ -26,16 +26,16 @@ static void glue(gen_, name)(DisasContext *ctx)
EA = tcg_temp_new(); \ EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \ gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \ tcg_gen_andi_tl(EA, EA, ~0xf); \
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary \ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
64-bit byteswap already. */ \ necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \ if (ctx->le_mode) { \
gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \ tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \ } else { \
gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \ tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \ } \
tcg_temp_free(EA); \ tcg_temp_free(EA); \
} }

View File

@ -34,7 +34,7 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \ tcg_temp_free(EA); \
} }
VSX_LOAD_SCALAR(lxsdx, ld64) VSX_LOAD_SCALAR(lxsdx, ld64_i64)
VSX_LOAD_SCALAR(lxsiwax, ld32s_i64) VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64) VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
VSX_LOAD_SCALAR(lxsspx, ld32fs) VSX_LOAD_SCALAR(lxsspx, ld32fs)
@ -49,9 +49,9 @@ static void gen_lxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT); gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new(); EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA); gen_addr_reg_index(ctx, EA);
gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA); gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8); tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_vsrl(xT(ctx->opcode)), EA); gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
tcg_temp_free(EA); tcg_temp_free(EA);
} }
@ -65,7 +65,7 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT); gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new(); EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA); gen_addr_reg_index(ctx, EA);
gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA); gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode))); tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
tcg_temp_free(EA); tcg_temp_free(EA);
} }