mirror of https://github.com/xemu-project/xemu.git
tcg/optimize: Split out fold_call
Calls are special in that they have a variable number of arguments, and need to be able to clobber globals. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
8774dded02
commit
5cf32be7d8
|
@ -624,10 +624,42 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool fold_call(OptContext *ctx, TCGOp *op)
|
||||||
|
{
|
||||||
|
TCGContext *s = ctx->tcg;
|
||||||
|
int nb_oargs = TCGOP_CALLO(op);
|
||||||
|
int nb_iargs = TCGOP_CALLI(op);
|
||||||
|
int flags, i;
|
||||||
|
|
||||||
|
init_arguments(ctx, op, nb_oargs + nb_iargs);
|
||||||
|
copy_propagate(ctx, op, nb_oargs, nb_iargs);
|
||||||
|
|
||||||
|
/* If the function reads or writes globals, reset temp data. */
|
||||||
|
flags = tcg_call_flags(op);
|
||||||
|
if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
|
||||||
|
int nb_globals = s->nb_globals;
|
||||||
|
|
||||||
|
for (i = 0; i < nb_globals; i++) {
|
||||||
|
if (test_bit(i, ctx->temps_used.l)) {
|
||||||
|
reset_ts(&ctx->tcg->temps[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Reset temp data for outputs. */
|
||||||
|
for (i = 0; i < nb_oargs; i++) {
|
||||||
|
reset_temp(op->args[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Stop optimizing MB across calls. */
|
||||||
|
ctx->prev_mb = NULL;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/* Propagate constants and copies, fold constant expressions. */
|
/* Propagate constants and copies, fold constant expressions. */
|
||||||
void tcg_optimize(TCGContext *s)
|
void tcg_optimize(TCGContext *s)
|
||||||
{
|
{
|
||||||
int nb_temps, nb_globals, i;
|
int nb_temps, i;
|
||||||
TCGOp *op, *op_next;
|
TCGOp *op, *op_next;
|
||||||
OptContext ctx = { .tcg = s };
|
OptContext ctx = { .tcg = s };
|
||||||
|
|
||||||
|
@ -637,8 +669,6 @@ void tcg_optimize(TCGContext *s)
|
||||||
available through the doubly linked circular list. */
|
available through the doubly linked circular list. */
|
||||||
|
|
||||||
nb_temps = s->nb_temps;
|
nb_temps = s->nb_temps;
|
||||||
nb_globals = s->nb_globals;
|
|
||||||
|
|
||||||
for (i = 0; i < nb_temps; ++i) {
|
for (i = 0; i < nb_temps; ++i) {
|
||||||
s->temps[i].state_ptr = NULL;
|
s->temps[i].state_ptr = NULL;
|
||||||
}
|
}
|
||||||
|
@ -647,17 +677,17 @@ void tcg_optimize(TCGContext *s)
|
||||||
uint64_t z_mask, partmask, affected, tmp;
|
uint64_t z_mask, partmask, affected, tmp;
|
||||||
int nb_oargs, nb_iargs;
|
int nb_oargs, nb_iargs;
|
||||||
TCGOpcode opc = op->opc;
|
TCGOpcode opc = op->opc;
|
||||||
const TCGOpDef *def = &tcg_op_defs[opc];
|
const TCGOpDef *def;
|
||||||
|
|
||||||
/* Count the arguments, and initialize the temps that are
|
/* Calls are special. */
|
||||||
going to be used */
|
|
||||||
if (opc == INDEX_op_call) {
|
if (opc == INDEX_op_call) {
|
||||||
nb_oargs = TCGOP_CALLO(op);
|
fold_call(&ctx, op);
|
||||||
nb_iargs = TCGOP_CALLI(op);
|
continue;
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
def = &tcg_op_defs[opc];
|
||||||
nb_oargs = def->nb_oargs;
|
nb_oargs = def->nb_oargs;
|
||||||
nb_iargs = def->nb_iargs;
|
nb_iargs = def->nb_iargs;
|
||||||
}
|
|
||||||
init_arguments(&ctx, op, nb_oargs + nb_iargs);
|
init_arguments(&ctx, op, nb_oargs + nb_iargs);
|
||||||
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
|
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
|
||||||
|
|
||||||
|
@ -1549,16 +1579,6 @@ void tcg_optimize(TCGContext *s)
|
||||||
if (def->flags & TCG_OPF_BB_END) {
|
if (def->flags & TCG_OPF_BB_END) {
|
||||||
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
|
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
|
||||||
} else {
|
} else {
|
||||||
if (opc == INDEX_op_call &&
|
|
||||||
!(tcg_call_flags(op)
|
|
||||||
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
|
|
||||||
for (i = 0; i < nb_globals; i++) {
|
|
||||||
if (test_bit(i, ctx.temps_used.l)) {
|
|
||||||
reset_ts(&s->temps[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < nb_oargs; i++) {
|
for (i = 0; i < nb_oargs; i++) {
|
||||||
reset_temp(op->args[i]);
|
reset_temp(op->args[i]);
|
||||||
/* Save the corresponding known-zero bits mask for the
|
/* Save the corresponding known-zero bits mask for the
|
||||||
|
@ -1599,7 +1619,6 @@ void tcg_optimize(TCGContext *s)
|
||||||
case INDEX_op_qemu_st_i32:
|
case INDEX_op_qemu_st_i32:
|
||||||
case INDEX_op_qemu_st8_i32:
|
case INDEX_op_qemu_st8_i32:
|
||||||
case INDEX_op_qemu_st_i64:
|
case INDEX_op_qemu_st_i64:
|
||||||
case INDEX_op_call:
|
|
||||||
/* Opcodes that touch guest memory stop the optimization. */
|
/* Opcodes that touch guest memory stop the optimization. */
|
||||||
ctx.prev_mb = NULL;
|
ctx.prev_mb = NULL;
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue