mirror of https://github.com/xemu-project/xemu.git
* Fix all next_page checks for overflow.
* Convert six targets to the translator loop. -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJa8y3rAAoJEGTfOOivfiFf2C0H/3vvTgrXUru2fNbyX/Gw/qEP u0V4XPb94gdjMV16oKWfOIGlCb2f7u+z3cvKFJoyiaqIHCBzK28yGkDyLHQvDeqU uTo4OyB2AX0MbkSjWsR3ym61RWoLCzzP3OtHGKsTGAh9+OuFKYxUBMVciLwJ9/X+ utx7xm20vM72JOEK9g3Zf7VcVI75s5JhgLmXbwfyN5Y2M1VzGuWOO6RV3vGxJnml F1wwPf+9ZC61eqYWOzi1IruHmBVDi4k9xV50inDtxvLNmaKmPDcE0vq8YkrP/Vcx 3j2FmxfUEGnc3e5OW4jtvdfn15ITKYlANkN7YMQJA7XuiaHgyXr74yh3EZzRNvI= =7fmA -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/cota-target-pull-request' into staging * Fix all next_page checks for overflow. * Convert six targets to the translator loop. # gpg: Signature made Wed 09 May 2018 18:20:43 BST # gpg: using RSA key 64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/cota-target-pull-request: (28 commits) target/riscv: convert to TranslatorOps target/riscv: convert to DisasContextBase target/riscv: convert to DisasJumpType target/openrisc: convert to TranslatorOps target/openrisc: convert to DisasContextBase target/s390x: convert to TranslatorOps target/s390x: convert to DisasContextBase target/s390x: convert to DisasJumpType target/mips: convert to TranslatorOps target/mips: use *ctx for DisasContext target/mips: convert to DisasContextBase target/mips: convert to DisasJumpType target/mips: use lookup_and_goto_ptr on BS_STOP target/sparc: convert to TranslatorOps target/sparc: convert to DisasContextBase target/sparc: convert to DisasJumpType target/sh4: convert to TranslatorOps translator: merge max_insns into DisasContextBase target/mips: avoid integer overflow in next_page PC check target/s390x: avoid integer overflow in next_page PC check ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
c74e62ee3e
|
@ -34,8 +34,6 @@ void translator_loop_temp_check(DisasContextBase *db)
|
||||||
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||||
CPUState *cpu, TranslationBlock *tb)
|
CPUState *cpu, TranslationBlock *tb)
|
||||||
{
|
{
|
||||||
int max_insns;
|
|
||||||
|
|
||||||
/* Initialize DisasContext */
|
/* Initialize DisasContext */
|
||||||
db->tb = tb;
|
db->tb = tb;
|
||||||
db->pc_first = tb->pc;
|
db->pc_first = tb->pc;
|
||||||
|
@ -45,18 +43,18 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||||
db->singlestep_enabled = cpu->singlestep_enabled;
|
db->singlestep_enabled = cpu->singlestep_enabled;
|
||||||
|
|
||||||
/* Instruction counting */
|
/* Instruction counting */
|
||||||
max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
|
db->max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (db->max_insns == 0) {
|
||||||
max_insns = CF_COUNT_MASK;
|
db->max_insns = CF_COUNT_MASK;
|
||||||
}
|
}
|
||||||
if (max_insns > TCG_MAX_INSNS) {
|
if (db->max_insns > TCG_MAX_INSNS) {
|
||||||
max_insns = TCG_MAX_INSNS;
|
db->max_insns = TCG_MAX_INSNS;
|
||||||
}
|
}
|
||||||
if (db->singlestep_enabled || singlestep) {
|
if (db->singlestep_enabled || singlestep) {
|
||||||
max_insns = 1;
|
db->max_insns = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_insns = ops->init_disas_context(db, cpu, max_insns);
|
ops->init_disas_context(db, cpu);
|
||||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||||
|
|
||||||
/* Reset the temp count so that we can identify leaks */
|
/* Reset the temp count so that we can identify leaks */
|
||||||
|
@ -95,7 +93,8 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||||
update db->pc_next and db->is_jmp to indicate what should be
|
update db->pc_next and db->is_jmp to indicate what should be
|
||||||
done next -- either exiting this loop or locate the start of
|
done next -- either exiting this loop or locate the start of
|
||||||
the next instruction. */
|
the next instruction. */
|
||||||
if (db->num_insns == max_insns && (tb_cflags(db->tb) & CF_LAST_IO)) {
|
if (db->num_insns == db->max_insns
|
||||||
|
&& (tb_cflags(db->tb) & CF_LAST_IO)) {
|
||||||
/* Accept I/O on the last instruction. */
|
/* Accept I/O on the last instruction. */
|
||||||
gen_io_start();
|
gen_io_start();
|
||||||
ops->translate_insn(db, cpu);
|
ops->translate_insn(db, cpu);
|
||||||
|
@ -111,7 +110,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||||
|
|
||||||
/* Stop translation if the output buffer is full,
|
/* Stop translation if the output buffer is full,
|
||||||
or we have executed all of the allowed instructions. */
|
or we have executed all of the allowed instructions. */
|
||||||
if (tcg_op_buf_full() || db->num_insns >= max_insns) {
|
if (tcg_op_buf_full() || db->num_insns >= db->max_insns) {
|
||||||
db->is_jmp = DISAS_TOO_MANY;
|
db->is_jmp = DISAS_TOO_MANY;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,7 @@ typedef enum DisasJumpType {
|
||||||
* disassembly).
|
* disassembly).
|
||||||
* @is_jmp: What instruction to disassemble next.
|
* @is_jmp: What instruction to disassemble next.
|
||||||
* @num_insns: Number of translated instructions (including current).
|
* @num_insns: Number of translated instructions (including current).
|
||||||
|
* @max_insns: Maximum number of instructions to be translated in this TB.
|
||||||
* @singlestep_enabled: "Hardware" single stepping enabled.
|
* @singlestep_enabled: "Hardware" single stepping enabled.
|
||||||
*
|
*
|
||||||
* Architecture-agnostic disassembly context.
|
* Architecture-agnostic disassembly context.
|
||||||
|
@ -67,7 +68,8 @@ typedef struct DisasContextBase {
|
||||||
target_ulong pc_first;
|
target_ulong pc_first;
|
||||||
target_ulong pc_next;
|
target_ulong pc_next;
|
||||||
DisasJumpType is_jmp;
|
DisasJumpType is_jmp;
|
||||||
unsigned int num_insns;
|
int num_insns;
|
||||||
|
int max_insns;
|
||||||
bool singlestep_enabled;
|
bool singlestep_enabled;
|
||||||
} DisasContextBase;
|
} DisasContextBase;
|
||||||
|
|
||||||
|
@ -76,7 +78,6 @@ typedef struct DisasContextBase {
|
||||||
* @init_disas_context:
|
* @init_disas_context:
|
||||||
* Initialize the target-specific portions of DisasContext struct.
|
* Initialize the target-specific portions of DisasContext struct.
|
||||||
* The generic DisasContextBase has already been initialized.
|
* The generic DisasContextBase has already been initialized.
|
||||||
* Return max_insns, modified as necessary by db->tb->flags.
|
|
||||||
*
|
*
|
||||||
* @tb_start:
|
* @tb_start:
|
||||||
* Emit any code required before the start of the main loop,
|
* Emit any code required before the start of the main loop,
|
||||||
|
@ -106,8 +107,7 @@ typedef struct DisasContextBase {
|
||||||
* Print instruction disassembly to log.
|
* Print instruction disassembly to log.
|
||||||
*/
|
*/
|
||||||
typedef struct TranslatorOps {
|
typedef struct TranslatorOps {
|
||||||
int (*init_disas_context)(DisasContextBase *db, CPUState *cpu,
|
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
|
||||||
int max_insns);
|
|
||||||
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
|
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
|
||||||
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
|
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
|
||||||
bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
|
bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
|
||||||
|
|
|
@ -2919,8 +2919,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alpha_tr_init_disas_context(DisasContextBase *dcbase,
|
static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
CPUState *cpu, int max_insns)
|
|
||||||
{
|
{
|
||||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
CPUAlphaState *env = cpu->env_ptr;
|
CPUAlphaState *env = cpu->env_ptr;
|
||||||
|
@ -2959,8 +2958,7 @@ static int alpha_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
mask = TARGET_PAGE_MASK;
|
mask = TARGET_PAGE_MASK;
|
||||||
}
|
}
|
||||||
bound = -(ctx->base.pc_first | mask) / 4;
|
bound = -(ctx->base.pc_first | mask) / 4;
|
||||||
|
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
|
||||||
return MIN(max_insns, bound);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||||
|
|
|
@ -13224,8 +13224,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
||||||
free_tmp_a64(s);
|
free_tmp_a64(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
CPUState *cpu, int max_insns)
|
CPUState *cpu)
|
||||||
{
|
{
|
||||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
CPUARMState *env = cpu->env_ptr;
|
CPUARMState *env = cpu->env_ptr;
|
||||||
|
@ -13288,11 +13288,9 @@ static int aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
if (dc->ss_active) {
|
if (dc->ss_active) {
|
||||||
bound = 1;
|
bound = 1;
|
||||||
}
|
}
|
||||||
max_insns = MIN(max_insns, bound);
|
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||||
|
|
||||||
init_tmp_a64_array(dc);
|
init_tmp_a64_array(dc);
|
||||||
|
|
||||||
return max_insns;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||||
|
|
|
@ -9930,7 +9930,7 @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
|
if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
|
||||||
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
|
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
|
||||||
* is not on the next page; we merge this into a 32-bit
|
* is not on the next page; we merge this into a 32-bit
|
||||||
* insn.
|
* insn.
|
||||||
|
@ -12243,8 +12243,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
||||||
return !thumb_insn_is_16bit(s, insn);
|
return !thumb_insn_is_16bit(s, insn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
CPUState *cs, int max_insns)
|
|
||||||
{
|
{
|
||||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
CPUARMState *env = cs->env_ptr;
|
CPUARMState *env = cs->env_ptr;
|
||||||
|
@ -12301,19 +12300,18 @@ static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
dc->is_ldex = false;
|
dc->is_ldex = false;
|
||||||
dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
|
dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
|
||||||
|
|
||||||
dc->next_page_start =
|
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
|
||||||
(dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
|
||||||
|
|
||||||
/* If architectural single step active, limit to 1. */
|
/* If architectural single step active, limit to 1. */
|
||||||
if (is_singlestepping(dc)) {
|
if (is_singlestepping(dc)) {
|
||||||
max_insns = 1;
|
dc->base.max_insns = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ARM is a fixed-length ISA. Bound the number of insns to execute
|
/* ARM is a fixed-length ISA. Bound the number of insns to execute
|
||||||
to those left on the page. */
|
to those left on the page. */
|
||||||
if (!dc->thumb) {
|
if (!dc->thumb) {
|
||||||
int bound = (dc->next_page_start - dc->base.pc_first) / 4;
|
int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||||
max_insns = MIN(max_insns, bound);
|
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_F0s = tcg_temp_new_i32();
|
cpu_F0s = tcg_temp_new_i32();
|
||||||
|
@ -12324,8 +12322,6 @@ static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
cpu_V1 = cpu_F1d;
|
cpu_V1 = cpu_F1d;
|
||||||
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
|
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
|
||||||
cpu_M0 = tcg_temp_new_i64();
|
cpu_M0 = tcg_temp_new_i64();
|
||||||
|
|
||||||
return max_insns;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
|
static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
|
@ -12584,8 +12580,8 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
* but isn't very efficient).
|
* but isn't very efficient).
|
||||||
*/
|
*/
|
||||||
if (dc->base.is_jmp == DISAS_NEXT
|
if (dc->base.is_jmp == DISAS_NEXT
|
||||||
&& (dc->pc >= dc->next_page_start
|
&& (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
|
||||||
|| (dc->pc >= dc->next_page_start - 3
|
|| (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
|
||||||
&& insn_crosses_page(env, dc)))) {
|
&& insn_crosses_page(env, dc)))) {
|
||||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ typedef struct DisasContext {
|
||||||
DisasContextBase base;
|
DisasContextBase base;
|
||||||
|
|
||||||
target_ulong pc;
|
target_ulong pc;
|
||||||
target_ulong next_page_start;
|
target_ulong page_start;
|
||||||
uint32_t insn;
|
uint32_t insn;
|
||||||
/* Nonzero if this instruction has been conditionally skipped. */
|
/* Nonzero if this instruction has been conditionally skipped. */
|
||||||
int condjmp;
|
int condjmp;
|
||||||
|
|
|
@ -3091,7 +3091,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
unsigned int insn_len;
|
unsigned int insn_len;
|
||||||
struct DisasContext ctx;
|
struct DisasContext ctx;
|
||||||
struct DisasContext *dc = &ctx;
|
struct DisasContext *dc = &ctx;
|
||||||
uint32_t next_page_start;
|
uint32_t page_start;
|
||||||
target_ulong npc;
|
target_ulong npc;
|
||||||
int num_insns;
|
int num_insns;
|
||||||
int max_insns;
|
int max_insns;
|
||||||
|
@ -3138,7 +3138,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
|
|
||||||
dc->cpustate_changed = 0;
|
dc->cpustate_changed = 0;
|
||||||
|
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
num_insns = 0;
|
num_insns = 0;
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
|
@ -3234,7 +3234,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
} while (!dc->is_jmp && !dc->cpustate_changed
|
} while (!dc->is_jmp && !dc->cpustate_changed
|
||||||
&& !tcg_op_buf_full()
|
&& !tcg_op_buf_full()
|
||||||
&& !singlestep
|
&& !singlestep
|
||||||
&& (dc->pc < next_page_start)
|
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
|
||||||
&& num_insns < max_insns);
|
&& num_insns < max_insns);
|
||||||
|
|
||||||
if (dc->clear_locked_irq) {
|
if (dc->clear_locked_irq) {
|
||||||
|
|
|
@ -4669,8 +4669,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
|
||||||
return gen_illegal(ctx);
|
return gen_illegal(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
|
static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
CPUState *cs, int max_insns)
|
|
||||||
{
|
{
|
||||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
int bound;
|
int bound;
|
||||||
|
@ -4700,14 +4699,12 @@ static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
|
|
||||||
/* Bound the number of instructions by those left on the page. */
|
/* Bound the number of instructions by those left on the page. */
|
||||||
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
|
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||||
bound = MIN(max_insns, bound);
|
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
|
||||||
|
|
||||||
ctx->ntempr = 0;
|
ctx->ntempr = 0;
|
||||||
ctx->ntempl = 0;
|
ctx->ntempl = 0;
|
||||||
memset(ctx->tempr, 0, sizeof(ctx->tempr));
|
memset(ctx->tempr, 0, sizeof(ctx->tempr));
|
||||||
memset(ctx->templ, 0, sizeof(ctx->templ));
|
memset(ctx->templ, 0, sizeof(ctx->templ));
|
||||||
|
|
||||||
return bound;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
|
static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
|
|
@ -8402,8 +8402,7 @@ void tcg_x86_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
|
static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
int max_insns)
|
|
||||||
{
|
{
|
||||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
CPUX86State *env = cpu->env_ptr;
|
CPUX86State *env = cpu->env_ptr;
|
||||||
|
@ -8470,8 +8469,6 @@ static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
|
||||||
cpu_ptr0 = tcg_temp_new_ptr();
|
cpu_ptr0 = tcg_temp_new_ptr();
|
||||||
cpu_ptr1 = tcg_temp_new_ptr();
|
cpu_ptr1 = tcg_temp_new_ptr();
|
||||||
cpu_cc_srcT = tcg_temp_local_new();
|
cpu_cc_srcT = tcg_temp_local_new();
|
||||||
|
|
||||||
return max_insns;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||||
|
|
|
@ -1055,7 +1055,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
LM32CPU *cpu = lm32_env_get_cpu(env);
|
LM32CPU *cpu = lm32_env_get_cpu(env);
|
||||||
struct DisasContext ctx, *dc = &ctx;
|
struct DisasContext ctx, *dc = &ctx;
|
||||||
uint32_t pc_start;
|
uint32_t pc_start;
|
||||||
uint32_t next_page_start;
|
uint32_t page_start;
|
||||||
int num_insns;
|
int num_insns;
|
||||||
int max_insns;
|
int max_insns;
|
||||||
|
|
||||||
|
@ -1075,7 +1075,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
pc_start &= ~3;
|
pc_start &= ~3;
|
||||||
}
|
}
|
||||||
|
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
num_insns = 0;
|
num_insns = 0;
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
|
@ -1115,7 +1115,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
&& !tcg_op_buf_full()
|
&& !tcg_op_buf_full()
|
||||||
&& !cs->singlestep_enabled
|
&& !cs->singlestep_enabled
|
||||||
&& !singlestep
|
&& !singlestep
|
||||||
&& (dc->pc < next_page_start)
|
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
|
||||||
&& num_insns < max_insns);
|
&& num_insns < max_insns);
|
||||||
|
|
||||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
if (tb_cflags(tb) & CF_LAST_IO) {
|
||||||
|
|
|
@ -1635,7 +1635,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
uint32_t pc_start;
|
uint32_t pc_start;
|
||||||
struct DisasContext ctx;
|
struct DisasContext ctx;
|
||||||
struct DisasContext *dc = &ctx;
|
struct DisasContext *dc = &ctx;
|
||||||
uint32_t next_page_start, org_flags;
|
uint32_t page_start, org_flags;
|
||||||
target_ulong npc;
|
target_ulong npc;
|
||||||
int num_insns;
|
int num_insns;
|
||||||
int max_insns;
|
int max_insns;
|
||||||
|
@ -1661,7 +1661,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
|
cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
num_insns = 0;
|
num_insns = 0;
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
|
@ -1747,7 +1747,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
} while (!dc->is_jmp && !dc->cpustate_changed
|
} while (!dc->is_jmp && !dc->cpustate_changed
|
||||||
&& !tcg_op_buf_full()
|
&& !tcg_op_buf_full()
|
||||||
&& !singlestep
|
&& !singlestep
|
||||||
&& (dc->pc < next_page_start)
|
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
|
||||||
&& num_insns < max_insns);
|
&& num_insns < max_insns);
|
||||||
|
|
||||||
npc = dc->pc;
|
npc = dc->pc;
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,7 +36,8 @@
|
||||||
#include "exec/log.h"
|
#include "exec/log.h"
|
||||||
|
|
||||||
#define LOG_DIS(str, ...) \
|
#define LOG_DIS(str, ...) \
|
||||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
|
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->base.pc_next, \
|
||||||
|
## __VA_ARGS__)
|
||||||
|
|
||||||
/* is_jmp field values */
|
/* is_jmp field values */
|
||||||
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
|
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
|
||||||
|
@ -44,13 +45,10 @@
|
||||||
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
|
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
|
||||||
|
|
||||||
typedef struct DisasContext {
|
typedef struct DisasContext {
|
||||||
TranslationBlock *tb;
|
DisasContextBase base;
|
||||||
target_ulong pc;
|
|
||||||
uint32_t is_jmp;
|
|
||||||
uint32_t mem_idx;
|
uint32_t mem_idx;
|
||||||
uint32_t tb_flags;
|
uint32_t tb_flags;
|
||||||
uint32_t delayed_branch;
|
uint32_t delayed_branch;
|
||||||
bool singlestep_enabled;
|
|
||||||
} DisasContext;
|
} DisasContext;
|
||||||
|
|
||||||
static TCGv cpu_sr;
|
static TCGv cpu_sr;
|
||||||
|
@ -126,9 +124,9 @@ static void gen_exception(DisasContext *dc, unsigned int excp)
|
||||||
|
|
||||||
static void gen_illegal_exception(DisasContext *dc)
|
static void gen_illegal_exception(DisasContext *dc)
|
||||||
{
|
{
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
|
||||||
gen_exception(dc, EXCP_ILLEGAL);
|
gen_exception(dc, EXCP_ILLEGAL);
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* not used yet, open it when we need or64. */
|
/* not used yet, open it when we need or64. */
|
||||||
|
@ -166,12 +164,12 @@ static void check_ov64s(DisasContext *dc)
|
||||||
|
|
||||||
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
|
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
|
||||||
{
|
{
|
||||||
if (unlikely(dc->singlestep_enabled)) {
|
if (unlikely(dc->base.singlestep_enabled)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
return (dc->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
||||||
#else
|
#else
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
|
@ -182,10 +180,10 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
|
||||||
if (use_goto_tb(dc, dest)) {
|
if (use_goto_tb(dc, dest)) {
|
||||||
tcg_gen_movi_tl(cpu_pc, dest);
|
tcg_gen_movi_tl(cpu_pc, dest);
|
||||||
tcg_gen_goto_tb(n);
|
tcg_gen_goto_tb(n);
|
||||||
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
|
tcg_gen_exit_tb((uintptr_t)dc->base.tb + n);
|
||||||
} else {
|
} else {
|
||||||
tcg_gen_movi_tl(cpu_pc, dest);
|
tcg_gen_movi_tl(cpu_pc, dest);
|
||||||
if (dc->singlestep_enabled) {
|
if (dc->base.singlestep_enabled) {
|
||||||
gen_exception(dc, EXCP_DEBUG);
|
gen_exception(dc, EXCP_DEBUG);
|
||||||
}
|
}
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
|
@ -194,16 +192,16 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
|
||||||
|
|
||||||
static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
|
static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
|
||||||
{
|
{
|
||||||
target_ulong tmp_pc = dc->pc + n26 * 4;
|
target_ulong tmp_pc = dc->base.pc_next + n26 * 4;
|
||||||
|
|
||||||
switch (op0) {
|
switch (op0) {
|
||||||
case 0x00: /* l.j */
|
case 0x00: /* l.j */
|
||||||
tcg_gen_movi_tl(jmp_pc, tmp_pc);
|
tcg_gen_movi_tl(jmp_pc, tmp_pc);
|
||||||
break;
|
break;
|
||||||
case 0x01: /* l.jal */
|
case 0x01: /* l.jal */
|
||||||
tcg_gen_movi_tl(cpu_R[9], dc->pc + 8);
|
tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
|
||||||
/* Optimize jal being used to load the PC for PIC. */
|
/* Optimize jal being used to load the PC for PIC. */
|
||||||
if (tmp_pc == dc->pc + 8) {
|
if (tmp_pc == dc->base.pc_next + 8) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tcg_gen_movi_tl(jmp_pc, tmp_pc);
|
tcg_gen_movi_tl(jmp_pc, tmp_pc);
|
||||||
|
@ -211,7 +209,7 @@ static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
|
||||||
case 0x03: /* l.bnf */
|
case 0x03: /* l.bnf */
|
||||||
case 0x04: /* l.bf */
|
case 0x04: /* l.bf */
|
||||||
{
|
{
|
||||||
TCGv t_next = tcg_const_tl(dc->pc + 8);
|
TCGv t_next = tcg_const_tl(dc->base.pc_next + 8);
|
||||||
TCGv t_true = tcg_const_tl(tmp_pc);
|
TCGv t_true = tcg_const_tl(tmp_pc);
|
||||||
TCGv t_zero = tcg_const_tl(0);
|
TCGv t_zero = tcg_const_tl(0);
|
||||||
|
|
||||||
|
@ -227,7 +225,7 @@ static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
|
||||||
tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
|
tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
|
||||||
break;
|
break;
|
||||||
case 0x12: /* l.jalr */
|
case 0x12: /* l.jalr */
|
||||||
tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
|
tcg_gen_movi_tl(cpu_R[9], (dc->base.pc_next + 8));
|
||||||
tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
|
tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -795,7 +793,7 @@ static void dec_misc(DisasContext *dc, uint32_t insn)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
gen_helper_rfe(cpu_env);
|
gen_helper_rfe(cpu_env);
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
dc->base.is_jmp = DISAS_UPDATE;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1254,15 +1252,16 @@ static void dec_sys(DisasContext *dc, uint32_t insn)
|
||||||
switch (op0) {
|
switch (op0) {
|
||||||
case 0x000: /* l.sys */
|
case 0x000: /* l.sys */
|
||||||
LOG_DIS("l.sys %d\n", K16);
|
LOG_DIS("l.sys %d\n", K16);
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
|
||||||
gen_exception(dc, EXCP_SYSCALL);
|
gen_exception(dc, EXCP_SYSCALL);
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 0x100: /* l.trap */
|
case 0x100: /* l.trap */
|
||||||
LOG_DIS("l.trap %d\n", K16);
|
LOG_DIS("l.trap %d\n", K16);
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
|
||||||
gen_exception(dc, EXCP_TRAP);
|
gen_exception(dc, EXCP_TRAP);
|
||||||
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 0x300: /* l.csync */
|
case 0x300: /* l.csync */
|
||||||
|
@ -1479,7 +1478,7 @@ static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
|
||||||
{
|
{
|
||||||
uint32_t op0;
|
uint32_t op0;
|
||||||
uint32_t insn;
|
uint32_t insn;
|
||||||
insn = cpu_ldl_code(&cpu->env, dc->pc);
|
insn = cpu_ldl_code(&cpu->env, dc->base.pc_next);
|
||||||
op0 = extract32(insn, 26, 6);
|
op0 = extract32(insn, 26, 6);
|
||||||
|
|
||||||
switch (op0) {
|
switch (op0) {
|
||||||
|
@ -1521,45 +1520,22 @@ static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
|
||||||
{
|
{
|
||||||
|
DisasContext *dc = container_of(dcb, DisasContext, base);
|
||||||
CPUOpenRISCState *env = cs->env_ptr;
|
CPUOpenRISCState *env = cs->env_ptr;
|
||||||
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
|
int bound;
|
||||||
struct DisasContext ctx, *dc = &ctx;
|
|
||||||
uint32_t pc_start;
|
|
||||||
uint32_t next_page_start;
|
|
||||||
int num_insns;
|
|
||||||
int max_insns;
|
|
||||||
|
|
||||||
pc_start = tb->pc;
|
dc->mem_idx = cpu_mmu_index(env, false);
|
||||||
dc->tb = tb;
|
dc->tb_flags = dc->base.tb->flags;
|
||||||
|
|
||||||
dc->is_jmp = DISAS_NEXT;
|
|
||||||
dc->pc = pc_start;
|
|
||||||
dc->mem_idx = cpu_mmu_index(&cpu->env, false);
|
|
||||||
dc->tb_flags = tb->flags;
|
|
||||||
dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
|
dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
|
||||||
dc->singlestep_enabled = cs->singlestep_enabled;
|
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||||
|
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||||
|
}
|
||||||
|
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
|
||||||
num_insns = 0;
|
{
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
DisasContext *dc = container_of(db, DisasContext, base);
|
||||||
|
|
||||||
if (max_insns == 0) {
|
|
||||||
max_insns = CF_COUNT_MASK;
|
|
||||||
}
|
|
||||||
if (max_insns > TCG_MAX_INSNS) {
|
|
||||||
max_insns = TCG_MAX_INSNS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
|
||||||
&& qemu_log_in_addr_range(pc_start)) {
|
|
||||||
qemu_log_lock();
|
|
||||||
qemu_log("----------------\n");
|
|
||||||
qemu_log("IN: %s\n", lookup_symbol(pc_start));
|
|
||||||
}
|
|
||||||
|
|
||||||
gen_tb_start(tb);
|
|
||||||
|
|
||||||
/* Allow the TCG optimizer to see that R0 == 0,
|
/* Allow the TCG optimizer to see that R0 == 0,
|
||||||
when it's true, which is the common case. */
|
when it's true, which is the common case. */
|
||||||
|
@ -1568,92 +1544,110 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
} else {
|
} else {
|
||||||
cpu_R[0] = cpu_R0;
|
cpu_R[0] = cpu_R0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
|
||||||
tcg_gen_insn_start(dc->pc, (dc->delayed_branch ? 1 : 0)
|
{
|
||||||
| (num_insns ? 2 : 0));
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
num_insns++;
|
|
||||||
|
|
||||||
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
|
tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
| (dc->base.num_insns > 1 ? 2 : 0));
|
||||||
gen_exception(dc, EXCP_DEBUG);
|
}
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
|
||||||
/* The address covered by the breakpoint must be included in
|
static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
|
||||||
[tb->pc, tb->pc + tb->size) in order to for it to be
|
const CPUBreakpoint *bp)
|
||||||
properly cleared -- thus we increment the PC here so that
|
{
|
||||||
the logic setting tb->size below does the right thing. */
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
dc->pc += 4;
|
|
||||||
break;
|
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
|
||||||
|
gen_exception(dc, EXCP_DEBUG);
|
||||||
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
|
/* The address covered by the breakpoint must be included in
|
||||||
|
[tb->pc, tb->pc + tb->size) in order to for it to be
|
||||||
|
properly cleared -- thus we increment the PC here so that
|
||||||
|
the logic setting tb->size below does the right thing. */
|
||||||
|
dc->base.pc_next += 4;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
|
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
|
||||||
|
|
||||||
|
disas_openrisc_insn(dc, cpu);
|
||||||
|
dc->base.pc_next += 4;
|
||||||
|
|
||||||
|
/* delay slot */
|
||||||
|
if (dc->delayed_branch) {
|
||||||
|
dc->delayed_branch--;
|
||||||
|
if (!dc->delayed_branch) {
|
||||||
|
tcg_gen_mov_tl(cpu_pc, jmp_pc);
|
||||||
|
tcg_gen_discard_tl(jmp_pc);
|
||||||
|
dc->base.is_jmp = DISAS_UPDATE;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
|
|
||||||
gen_io_start();
|
|
||||||
}
|
|
||||||
disas_openrisc_insn(dc, cpu);
|
|
||||||
dc->pc = dc->pc + 4;
|
|
||||||
|
|
||||||
/* delay slot */
|
|
||||||
if (dc->delayed_branch) {
|
|
||||||
dc->delayed_branch--;
|
|
||||||
if (!dc->delayed_branch) {
|
|
||||||
tcg_gen_mov_tl(cpu_pc, jmp_pc);
|
|
||||||
tcg_gen_discard_tl(jmp_pc);
|
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} while (!dc->is_jmp
|
|
||||||
&& !tcg_op_buf_full()
|
|
||||||
&& !cs->singlestep_enabled
|
|
||||||
&& !singlestep
|
|
||||||
&& (dc->pc < next_page_start)
|
|
||||||
&& num_insns < max_insns);
|
|
||||||
|
|
||||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
|
||||||
gen_io_end();
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
|
if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
|
||||||
tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
|
tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_gen_movi_tl(cpu_ppc, dc->pc - 4);
|
tcg_gen_movi_tl(cpu_ppc, dc->base.pc_next - 4);
|
||||||
if (dc->is_jmp == DISAS_NEXT) {
|
if (dc->base.is_jmp == DISAS_NEXT) {
|
||||||
dc->is_jmp = DISAS_UPDATE;
|
dc->base.is_jmp = DISAS_UPDATE;
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
|
||||||
}
|
}
|
||||||
if (unlikely(cs->singlestep_enabled)) {
|
if (unlikely(dc->base.singlestep_enabled)) {
|
||||||
gen_exception(dc, EXCP_DEBUG);
|
gen_exception(dc, EXCP_DEBUG);
|
||||||
} else {
|
} else {
|
||||||
switch (dc->is_jmp) {
|
switch (dc->base.is_jmp) {
|
||||||
case DISAS_NEXT:
|
case DISAS_TOO_MANY:
|
||||||
gen_goto_tb(dc, 0, dc->pc);
|
gen_goto_tb(dc, 0, dc->base.pc_next);
|
||||||
break;
|
break;
|
||||||
default:
|
case DISAS_NORETURN:
|
||||||
case DISAS_JUMP:
|
case DISAS_JUMP:
|
||||||
|
case DISAS_TB_JUMP:
|
||||||
break;
|
break;
|
||||||
case DISAS_UPDATE:
|
case DISAS_UPDATE:
|
||||||
/* indicate that the hash table must be used
|
/* indicate that the hash table must be used
|
||||||
to find the next TB */
|
to find the next TB */
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
break;
|
break;
|
||||||
case DISAS_TB_JUMP:
|
default:
|
||||||
/* nothing more to generate */
|
g_assert_not_reached();
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
gen_tb_end(tb, num_insns);
|
static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *s = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
tb->size = dc->pc - pc_start;
|
qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
|
||||||
tb->icount = num_insns;
|
log_target_disas(cs, s->base.pc_first, s->base.tb->size);
|
||||||
|
}
|
||||||
|
|
||||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
static const TranslatorOps openrisc_tr_ops = {
|
||||||
&& qemu_log_in_addr_range(pc_start)) {
|
.init_disas_context = openrisc_tr_init_disas_context,
|
||||||
log_target_disas(cs, pc_start, tb->size);
|
.tb_start = openrisc_tr_tb_start,
|
||||||
qemu_log("\n");
|
.insn_start = openrisc_tr_insn_start,
|
||||||
qemu_log_unlock();
|
.breakpoint_check = openrisc_tr_breakpoint_check,
|
||||||
}
|
.translate_insn = openrisc_tr_translate_insn,
|
||||||
|
.tb_stop = openrisc_tr_tb_stop,
|
||||||
|
.disas_log = openrisc_tr_disas_log,
|
||||||
|
};
|
||||||
|
|
||||||
|
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
|
{
|
||||||
|
DisasContext ctx;
|
||||||
|
|
||||||
|
translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb);
|
||||||
}
|
}
|
||||||
|
|
||||||
void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
|
void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
|
||||||
|
|
|
@ -7215,8 +7215,7 @@ void ppc_cpu_dump_statistics(CPUState *cs, FILE*f,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppc_tr_init_disas_context(DisasContextBase *dcbase,
|
static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
CPUState *cs, int max_insns)
|
|
||||||
{
|
{
|
||||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
CPUPPCState *env = cs->env_ptr;
|
CPUPPCState *env = cs->env_ptr;
|
||||||
|
@ -7281,7 +7280,7 @@ static int ppc_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
|
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||||
return MIN(max_insns, bound);
|
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
|
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
#include "exec/helper-gen.h"
|
#include "exec/helper-gen.h"
|
||||||
|
|
||||||
|
#include "exec/translator.h"
|
||||||
#include "exec/log.h"
|
#include "exec/log.h"
|
||||||
|
|
||||||
#include "instmap.h"
|
#include "instmap.h"
|
||||||
|
@ -39,14 +40,12 @@ static TCGv load_val;
|
||||||
#include "exec/gen-icount.h"
|
#include "exec/gen-icount.h"
|
||||||
|
|
||||||
typedef struct DisasContext {
|
typedef struct DisasContext {
|
||||||
struct TranslationBlock *tb;
|
DisasContextBase base;
|
||||||
target_ulong pc;
|
/* pc_succ_insn points to the instruction following base.pc_next */
|
||||||
target_ulong next_pc;
|
target_ulong pc_succ_insn;
|
||||||
uint32_t opcode;
|
uint32_t opcode;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t mem_idx;
|
uint32_t mem_idx;
|
||||||
int singlestep_enabled;
|
|
||||||
int bstate;
|
|
||||||
/* Remember the rounding mode encoded in the previous fp instruction,
|
/* Remember the rounding mode encoded in the previous fp instruction,
|
||||||
which we have already installed into env->fp_status. Or -1 for
|
which we have already installed into env->fp_status. Or -1 for
|
||||||
no previous fp instruction. Note that we exit the TB when writing
|
no previous fp instruction. Note that we exit the TB when writing
|
||||||
|
@ -55,13 +54,6 @@ typedef struct DisasContext {
|
||||||
int frm;
|
int frm;
|
||||||
} DisasContext;
|
} DisasContext;
|
||||||
|
|
||||||
enum {
|
|
||||||
BS_NONE = 0, /* When seen outside of translation while loop, indicates
|
|
||||||
need to exit tb due to end of page. */
|
|
||||||
BS_STOP = 1, /* Need to exit tb for syscall, sret, etc. */
|
|
||||||
BS_BRANCH = 2, /* Need to exit tb for branch, jal, etc. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* convert riscv funct3 to qemu memop for load/store */
|
/* convert riscv funct3 to qemu memop for load/store */
|
||||||
static const int tcg_memop_lookup[8] = {
|
static const int tcg_memop_lookup[8] = {
|
||||||
[0 ... 7] = -1,
|
[0 ... 7] = -1,
|
||||||
|
@ -84,21 +76,21 @@ static const int tcg_memop_lookup[8] = {
|
||||||
|
|
||||||
static void generate_exception(DisasContext *ctx, int excp)
|
static void generate_exception(DisasContext *ctx, int excp)
|
||||||
{
|
{
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||||
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
||||||
gen_helper_raise_exception(cpu_env, helper_tmp);
|
gen_helper_raise_exception(cpu_env, helper_tmp);
|
||||||
tcg_temp_free_i32(helper_tmp);
|
tcg_temp_free_i32(helper_tmp);
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
|
static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
|
||||||
{
|
{
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||||
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
|
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
|
||||||
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
||||||
gen_helper_raise_exception(cpu_env, helper_tmp);
|
gen_helper_raise_exception(cpu_env, helper_tmp);
|
||||||
tcg_temp_free_i32(helper_tmp);
|
tcg_temp_free_i32(helper_tmp);
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_exception_debug(void)
|
static void gen_exception_debug(void)
|
||||||
|
@ -120,12 +112,12 @@ static void gen_exception_inst_addr_mis(DisasContext *ctx)
|
||||||
|
|
||||||
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
||||||
{
|
{
|
||||||
if (unlikely(ctx->singlestep_enabled)) {
|
if (unlikely(ctx->base.singlestep_enabled)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
||||||
#else
|
#else
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
|
@ -137,10 +129,10 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
|
||||||
/* chaining is only allowed when the jump is to the same page */
|
/* chaining is only allowed when the jump is to the same page */
|
||||||
tcg_gen_goto_tb(n);
|
tcg_gen_goto_tb(n);
|
||||||
tcg_gen_movi_tl(cpu_pc, dest);
|
tcg_gen_movi_tl(cpu_pc, dest);
|
||||||
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
|
tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
|
||||||
} else {
|
} else {
|
||||||
tcg_gen_movi_tl(cpu_pc, dest);
|
tcg_gen_movi_tl(cpu_pc, dest);
|
||||||
if (ctx->singlestep_enabled) {
|
if (ctx->base.singlestep_enabled) {
|
||||||
gen_exception_debug();
|
gen_exception_debug();
|
||||||
} else {
|
} else {
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
|
@ -519,7 +511,7 @@ static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
|
||||||
target_ulong next_pc;
|
target_ulong next_pc;
|
||||||
|
|
||||||
/* check misaligned: */
|
/* check misaligned: */
|
||||||
next_pc = ctx->pc + imm;
|
next_pc = ctx->base.pc_next + imm;
|
||||||
if (!riscv_has_ext(env, RVC)) {
|
if (!riscv_has_ext(env, RVC)) {
|
||||||
if ((next_pc & 0x3) != 0) {
|
if ((next_pc & 0x3) != 0) {
|
||||||
gen_exception_inst_addr_mis(ctx);
|
gen_exception_inst_addr_mis(ctx);
|
||||||
|
@ -527,11 +519,11 @@ static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (rd != 0) {
|
if (rd != 0) {
|
||||||
tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
|
tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
|
||||||
}
|
}
|
||||||
|
|
||||||
gen_goto_tb(ctx, 0, ctx->pc + imm); /* must use this for safety */
|
gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
|
@ -554,7 +546,7 @@ static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rd != 0) {
|
if (rd != 0) {
|
||||||
tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
|
tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
|
||||||
}
|
}
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
|
|
||||||
|
@ -562,7 +554,7 @@ static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
gen_set_label(misaligned);
|
gen_set_label(misaligned);
|
||||||
gen_exception_inst_addr_mis(ctx);
|
gen_exception_inst_addr_mis(ctx);
|
||||||
}
|
}
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -608,15 +600,15 @@ static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
tcg_temp_free(source1);
|
tcg_temp_free(source1);
|
||||||
tcg_temp_free(source2);
|
tcg_temp_free(source2);
|
||||||
|
|
||||||
gen_goto_tb(ctx, 1, ctx->next_pc);
|
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
|
||||||
gen_set_label(l); /* branch taken */
|
gen_set_label(l); /* branch taken */
|
||||||
if (!riscv_has_ext(env, RVC) && ((ctx->pc + bimm) & 0x3)) {
|
if (!riscv_has_ext(env, RVC) && ((ctx->base.pc_next + bimm) & 0x3)) {
|
||||||
/* misaligned */
|
/* misaligned */
|
||||||
gen_exception_inst_addr_mis(ctx);
|
gen_exception_inst_addr_mis(ctx);
|
||||||
} else {
|
} else {
|
||||||
gen_goto_tb(ctx, 0, ctx->pc + bimm);
|
gen_goto_tb(ctx, 0, ctx->base.pc_next + bimm);
|
||||||
}
|
}
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
|
static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
|
||||||
|
@ -842,7 +834,7 @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
|
||||||
if (rl) {
|
if (rl) {
|
||||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
||||||
}
|
}
|
||||||
if (tb_cflags(ctx->tb) & CF_PARALLEL) {
|
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||||
l1 = gen_new_label();
|
l1 = gen_new_label();
|
||||||
gen_set_label(l1);
|
gen_set_label(l1);
|
||||||
} else {
|
} else {
|
||||||
|
@ -859,7 +851,7 @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
|
||||||
tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
|
tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
|
||||||
tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
|
tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
|
||||||
|
|
||||||
if (tb_cflags(ctx->tb) & CF_PARALLEL) {
|
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||||
/* Parallel context. Make this operation atomic by verifying
|
/* Parallel context. Make this operation atomic by verifying
|
||||||
that the memory didn't change while we computed the result. */
|
that the memory didn't change while we computed the result. */
|
||||||
tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
|
tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
|
||||||
|
@ -1323,7 +1315,7 @@ static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
rs1_pass = tcg_temp_new();
|
rs1_pass = tcg_temp_new();
|
||||||
imm_rs1 = tcg_temp_new();
|
imm_rs1 = tcg_temp_new();
|
||||||
gen_get_gpr(source1, rs1);
|
gen_get_gpr(source1, rs1);
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||||
tcg_gen_movi_tl(rs1_pass, rs1);
|
tcg_gen_movi_tl(rs1_pass, rs1);
|
||||||
tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
|
tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
|
||||||
|
|
||||||
|
@ -1344,12 +1336,12 @@ static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
/* always generates U-level ECALL, fixed in do_interrupt handler */
|
/* always generates U-level ECALL, fixed in do_interrupt handler */
|
||||||
generate_exception(ctx, RISCV_EXCP_U_ECALL);
|
generate_exception(ctx, RISCV_EXCP_U_ECALL);
|
||||||
tcg_gen_exit_tb(0); /* no chaining */
|
tcg_gen_exit_tb(0); /* no chaining */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
case 0x1: /* EBREAK */
|
case 0x1: /* EBREAK */
|
||||||
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
|
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
|
||||||
tcg_gen_exit_tb(0); /* no chaining */
|
tcg_gen_exit_tb(0); /* no chaining */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
case 0x002: /* URET */
|
case 0x002: /* URET */
|
||||||
|
@ -1359,7 +1351,7 @@ static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
if (riscv_has_ext(env, RVS)) {
|
if (riscv_has_ext(env, RVS)) {
|
||||||
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
|
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
|
||||||
tcg_gen_exit_tb(0); /* no chaining */
|
tcg_gen_exit_tb(0); /* no chaining */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
} else {
|
} else {
|
||||||
gen_exception_illegal(ctx);
|
gen_exception_illegal(ctx);
|
||||||
}
|
}
|
||||||
|
@ -1370,13 +1362,13 @@ static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
case 0x302: /* MRET */
|
case 0x302: /* MRET */
|
||||||
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
|
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
|
||||||
tcg_gen_exit_tb(0); /* no chaining */
|
tcg_gen_exit_tb(0); /* no chaining */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
case 0x7b2: /* DRET */
|
case 0x7b2: /* DRET */
|
||||||
gen_exception_illegal(ctx);
|
gen_exception_illegal(ctx);
|
||||||
break;
|
break;
|
||||||
case 0x105: /* WFI */
|
case 0x105: /* WFI */
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
|
||||||
gen_helper_wfi(cpu_env);
|
gen_helper_wfi(cpu_env);
|
||||||
break;
|
break;
|
||||||
case 0x104: /* SFENCE.VM */
|
case 0x104: /* SFENCE.VM */
|
||||||
|
@ -1417,9 +1409,9 @@ static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
|
||||||
gen_io_end();
|
gen_io_end();
|
||||||
gen_set_gpr(rd, dest);
|
gen_set_gpr(rd, dest);
|
||||||
/* end tb since we may be changing priv modes, to get mmu_index right */
|
/* end tb since we may be changing priv modes, to get mmu_index right */
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
|
||||||
tcg_gen_exit_tb(0); /* no chaining */
|
tcg_gen_exit_tb(0); /* no chaining */
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
tcg_temp_free(source1);
|
tcg_temp_free(source1);
|
||||||
|
@ -1737,7 +1729,7 @@ static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
|
||||||
break; /* NOP */
|
break; /* NOP */
|
||||||
}
|
}
|
||||||
tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
|
tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
|
||||||
ctx->pc);
|
ctx->base.pc_next);
|
||||||
break;
|
break;
|
||||||
case OPC_RISC_JAL:
|
case OPC_RISC_JAL:
|
||||||
imm = GET_JAL_IMM(ctx->opcode);
|
imm = GET_JAL_IMM(ctx->opcode);
|
||||||
|
@ -1810,9 +1802,9 @@ static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
|
||||||
if (ctx->opcode & 0x1000) {
|
if (ctx->opcode & 0x1000) {
|
||||||
/* FENCE_I is a no-op in QEMU,
|
/* FENCE_I is a no-op in QEMU,
|
||||||
* however we need to end the translation block */
|
* however we need to end the translation block */
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
|
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
ctx->bstate = BS_BRANCH;
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
} else {
|
} else {
|
||||||
/* FENCE is a full memory barrier. */
|
/* FENCE is a full memory barrier. */
|
||||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
||||||
|
@ -1836,120 +1828,113 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx)
|
||||||
if (!riscv_has_ext(env, RVC)) {
|
if (!riscv_has_ext(env, RVC)) {
|
||||||
gen_exception_illegal(ctx);
|
gen_exception_illegal(ctx);
|
||||||
} else {
|
} else {
|
||||||
ctx->next_pc = ctx->pc + 2;
|
ctx->pc_succ_insn = ctx->base.pc_next + 2;
|
||||||
decode_RV32_64C(env, ctx);
|
decode_RV32_64C(env, ctx);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ctx->next_pc = ctx->pc + 4;
|
ctx->pc_succ_insn = ctx->base.pc_next + 4;
|
||||||
decode_RV32_64G(env, ctx);
|
decode_RV32_64G(env, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
{
|
{
|
||||||
CPURISCVState *env = cs->env_ptr;
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
DisasContext ctx;
|
|
||||||
target_ulong pc_start;
|
|
||||||
target_ulong next_page_start;
|
|
||||||
int num_insns;
|
|
||||||
int max_insns;
|
|
||||||
pc_start = tb->pc;
|
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
|
||||||
ctx.pc = pc_start;
|
|
||||||
|
|
||||||
/* once we have GDB, the rest of the translate.c implementation should be
|
ctx->pc_succ_insn = ctx->base.pc_first;
|
||||||
ready for singlestep */
|
ctx->flags = ctx->base.tb->flags;
|
||||||
ctx.singlestep_enabled = cs->singlestep_enabled;
|
ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
|
||||||
|
ctx->frm = -1; /* unknown rounding mode */
|
||||||
|
}
|
||||||
|
|
||||||
ctx.tb = tb;
|
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||||
ctx.bstate = BS_NONE;
|
{
|
||||||
ctx.flags = tb->flags;
|
}
|
||||||
ctx.mem_idx = tb->flags & TB_FLAGS_MMU_MASK;
|
|
||||||
ctx.frm = -1; /* unknown rounding mode */
|
|
||||||
|
|
||||||
num_insns = 0;
|
static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
max_insns = tb->cflags & CF_COUNT_MASK;
|
{
|
||||||
if (max_insns == 0) {
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
max_insns = CF_COUNT_MASK;
|
|
||||||
|
tcg_gen_insn_start(ctx->base.pc_next);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
||||||
|
const CPUBreakpoint *bp)
|
||||||
|
{
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||||
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
|
gen_exception_debug();
|
||||||
|
/* The address covered by the breakpoint must be included in
|
||||||
|
[tb->pc, tb->pc + tb->size) in order to for it to be
|
||||||
|
properly cleared -- thus we increment the PC here so that
|
||||||
|
the logic setting tb->size below does the right thing. */
|
||||||
|
ctx->base.pc_next += 4;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
|
{
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
CPURISCVState *env = cpu->env_ptr;
|
||||||
|
|
||||||
|
ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
|
||||||
|
decode_opc(env, ctx);
|
||||||
|
ctx->base.pc_next = ctx->pc_succ_insn;
|
||||||
|
|
||||||
|
if (ctx->base.is_jmp == DISAS_NEXT) {
|
||||||
|
target_ulong page_start;
|
||||||
|
|
||||||
|
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
|
||||||
|
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
|
||||||
|
ctx->base.is_jmp = DISAS_TOO_MANY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (max_insns > TCG_MAX_INSNS) {
|
}
|
||||||
max_insns = TCG_MAX_INSNS;
|
|
||||||
}
|
|
||||||
gen_tb_start(tb);
|
|
||||||
|
|
||||||
while (ctx.bstate == BS_NONE) {
|
static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
tcg_gen_insn_start(ctx.pc);
|
{
|
||||||
num_insns++;
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
|
switch (ctx->base.is_jmp) {
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx.pc);
|
case DISAS_TOO_MANY:
|
||||||
ctx.bstate = BS_BRANCH;
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||||
gen_exception_debug();
|
if (ctx->base.singlestep_enabled) {
|
||||||
/* The address covered by the breakpoint must be included in
|
|
||||||
[tb->pc, tb->pc + tb->size) in order to for it to be
|
|
||||||
properly cleared -- thus we increment the PC here so that
|
|
||||||
the logic setting tb->size below does the right thing. */
|
|
||||||
ctx.pc += 4;
|
|
||||||
goto done_generating;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
|
|
||||||
gen_io_start();
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.opcode = cpu_ldl_code(env, ctx.pc);
|
|
||||||
decode_opc(env, &ctx);
|
|
||||||
ctx.pc = ctx.next_pc;
|
|
||||||
|
|
||||||
if (cs->singlestep_enabled) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (ctx.pc >= next_page_start) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (tcg_op_buf_full()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (num_insns >= max_insns) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (singlestep) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
if (tb->cflags & CF_LAST_IO) {
|
|
||||||
gen_io_end();
|
|
||||||
}
|
|
||||||
switch (ctx.bstate) {
|
|
||||||
case BS_STOP:
|
|
||||||
gen_goto_tb(&ctx, 0, ctx.pc);
|
|
||||||
break;
|
|
||||||
case BS_NONE: /* handle end of page - DO NOT CHAIN. See gen_goto_tb. */
|
|
||||||
tcg_gen_movi_tl(cpu_pc, ctx.pc);
|
|
||||||
if (cs->singlestep_enabled) {
|
|
||||||
gen_exception_debug();
|
gen_exception_debug();
|
||||||
} else {
|
} else {
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BS_BRANCH: /* ops using BS_BRANCH generate own exit seq */
|
case DISAS_NORETURN:
|
||||||
default:
|
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
done_generating:
|
}
|
||||||
gen_tb_end(tb, num_insns);
|
|
||||||
tb->size = ctx.pc - pc_start;
|
|
||||||
tb->icount = num_insns;
|
|
||||||
|
|
||||||
#ifdef DEBUG_DISAS
|
static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
|
||||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
{
|
||||||
&& qemu_log_in_addr_range(pc_start)) {
|
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
|
||||||
qemu_log("IN: %s\n", lookup_symbol(pc_start));
|
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
|
||||||
log_target_disas(cs, pc_start, ctx.pc - pc_start);
|
}
|
||||||
qemu_log("\n");
|
|
||||||
}
|
static const TranslatorOps riscv_tr_ops = {
|
||||||
#endif
|
.init_disas_context = riscv_tr_init_disas_context,
|
||||||
|
.tb_start = riscv_tr_tb_start,
|
||||||
|
.insn_start = riscv_tr_insn_start,
|
||||||
|
.breakpoint_check = riscv_tr_breakpoint_check,
|
||||||
|
.translate_insn = riscv_tr_translate_insn,
|
||||||
|
.tb_stop = riscv_tr_tb_stop,
|
||||||
|
.disas_log = riscv_tr_disas_log,
|
||||||
|
};
|
||||||
|
|
||||||
|
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
|
{
|
||||||
|
DisasContext ctx;
|
||||||
|
|
||||||
|
translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
|
||||||
}
|
}
|
||||||
|
|
||||||
void riscv_translate_init(void)
|
void riscv_translate_init(void)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2258,126 +2258,127 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
{
|
{
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
CPUSH4State *env = cs->env_ptr;
|
CPUSH4State *env = cs->env_ptr;
|
||||||
DisasContext ctx;
|
int bound;
|
||||||
target_ulong pc_start;
|
|
||||||
int num_insns;
|
|
||||||
int max_insns;
|
|
||||||
|
|
||||||
pc_start = tb->pc;
|
ctx->tbflags = (uint32_t)ctx->base.tb->flags;
|
||||||
ctx.base.pc_next = pc_start;
|
ctx->envflags = ctx->base.tb->flags & TB_FLAG_ENVFLAGS_MASK;
|
||||||
ctx.tbflags = (uint32_t)tb->flags;
|
ctx->memidx = (ctx->tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
|
||||||
ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
|
|
||||||
ctx.base.is_jmp = DISAS_NEXT;
|
|
||||||
ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
|
|
||||||
/* We don't know if the delayed pc came from a dynamic or static branch,
|
/* We don't know if the delayed pc came from a dynamic or static branch,
|
||||||
so assume it is a dynamic branch. */
|
so assume it is a dynamic branch. */
|
||||||
ctx.delayed_pc = -1; /* use delayed pc from env pointer */
|
ctx->delayed_pc = -1; /* use delayed pc from env pointer */
|
||||||
ctx.base.tb = tb;
|
ctx->features = env->features;
|
||||||
ctx.base.singlestep_enabled = cs->singlestep_enabled;
|
ctx->has_movcal = (ctx->tbflags & TB_FLAG_PENDING_MOVCA);
|
||||||
ctx.features = env->features;
|
ctx->gbank = ((ctx->tbflags & (1 << SR_MD)) &&
|
||||||
ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
|
(ctx->tbflags & (1 << SR_RB))) * 0x10;
|
||||||
ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
|
ctx->fbank = ctx->tbflags & FPSCR_FR ? 0x10 : 0;
|
||||||
(ctx.tbflags & (1 << SR_RB))) * 0x10;
|
|
||||||
ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
|
|
||||||
|
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
|
||||||
if (max_insns == 0) {
|
|
||||||
max_insns = CF_COUNT_MASK;
|
|
||||||
}
|
|
||||||
max_insns = MIN(max_insns, TCG_MAX_INSNS);
|
|
||||||
|
|
||||||
/* Since the ISA is fixed-width, we can bound by the number
|
/* Since the ISA is fixed-width, we can bound by the number
|
||||||
of instructions remaining on the page. */
|
of instructions remaining on the page. */
|
||||||
num_insns = -(ctx.base.pc_next | TARGET_PAGE_MASK) / 2;
|
bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
|
||||||
max_insns = MIN(max_insns, num_insns);
|
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
|
||||||
|
}
|
||||||
/* Single stepping means just that. */
|
|
||||||
if (ctx.base.singlestep_enabled || singlestep) {
|
|
||||||
max_insns = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
gen_tb_start(tb);
|
|
||||||
num_insns = 0;
|
|
||||||
|
|
||||||
|
static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
if (ctx.tbflags & GUSA_MASK) {
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
num_insns = decode_gusa(&ctx, env, &max_insns);
|
CPUSH4State *env = cs->env_ptr;
|
||||||
|
|
||||||
|
if (ctx->tbflags & GUSA_MASK) {
|
||||||
|
ctx->base.num_insns = decode_gusa(ctx, env, &ctx->base.max_insns);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
while (ctx.base.is_jmp == DISAS_NEXT
|
static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
|
||||||
&& num_insns < max_insns
|
{
|
||||||
&& !tcg_op_buf_full()) {
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
tcg_gen_insn_start(ctx.base.pc_next, ctx.envflags);
|
|
||||||
num_insns++;
|
|
||||||
|
|
||||||
if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) {
|
tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
|
||||||
/* We have hit a breakpoint - make sure PC is up-to-date */
|
}
|
||||||
gen_save_cpu_state(&ctx, true);
|
|
||||||
gen_helper_debug(cpu_env);
|
|
||||||
ctx.base.is_jmp = DISAS_NORETURN;
|
|
||||||
/* The address covered by the breakpoint must be included in
|
|
||||||
[tb->pc, tb->pc + tb->size) in order to for it to be
|
|
||||||
properly cleared -- thus we increment the PC here so that
|
|
||||||
the logic setting tb->size below does the right thing. */
|
|
||||||
ctx.base.pc_next += 2;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
|
static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
|
||||||
gen_io_start();
|
const CPUBreakpoint *bp)
|
||||||
}
|
{
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
|
/* We have hit a breakpoint - make sure PC is up-to-date */
|
||||||
decode_opc(&ctx);
|
gen_save_cpu_state(ctx, true);
|
||||||
ctx.base.pc_next += 2;
|
gen_helper_debug(cpu_env);
|
||||||
}
|
ctx->base.is_jmp = DISAS_NORETURN;
|
||||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
/* The address covered by the breakpoint must be included in
|
||||||
gen_io_end();
|
[tb->pc, tb->pc + tb->size) in order to for it to be
|
||||||
}
|
properly cleared -- thus we increment the PC here so that
|
||||||
|
the logic setting tb->size below does the right thing. */
|
||||||
|
ctx->base.pc_next += 2;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
if (ctx.tbflags & GUSA_EXCLUSIVE) {
|
static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
CPUSH4State *env = cs->env_ptr;
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
|
||||||
|
decode_opc(ctx);
|
||||||
|
ctx->base.pc_next += 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
if (ctx->tbflags & GUSA_EXCLUSIVE) {
|
||||||
/* Ending the region of exclusivity. Clear the bits. */
|
/* Ending the region of exclusivity. Clear the bits. */
|
||||||
ctx.envflags &= ~GUSA_MASK;
|
ctx->envflags &= ~GUSA_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (ctx.base.is_jmp) {
|
switch (ctx->base.is_jmp) {
|
||||||
case DISAS_STOP:
|
case DISAS_STOP:
|
||||||
gen_save_cpu_state(&ctx, true);
|
gen_save_cpu_state(ctx, true);
|
||||||
if (ctx.base.singlestep_enabled) {
|
if (ctx->base.singlestep_enabled) {
|
||||||
gen_helper_debug(cpu_env);
|
gen_helper_debug(cpu_env);
|
||||||
} else {
|
} else {
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DISAS_NEXT:
|
case DISAS_NEXT:
|
||||||
gen_save_cpu_state(&ctx, false);
|
case DISAS_TOO_MANY:
|
||||||
gen_goto_tb(&ctx, 0, ctx.base.pc_next);
|
gen_save_cpu_state(ctx, false);
|
||||||
|
gen_goto_tb(ctx, 0, ctx->base.pc_next);
|
||||||
break;
|
break;
|
||||||
case DISAS_NORETURN:
|
case DISAS_NORETURN:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
gen_tb_end(tb, num_insns);
|
static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
|
||||||
|
log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
|
||||||
|
}
|
||||||
|
|
||||||
tb->size = ctx.base.pc_next - pc_start;
|
static const TranslatorOps sh4_tr_ops = {
|
||||||
tb->icount = num_insns;
|
.init_disas_context = sh4_tr_init_disas_context,
|
||||||
|
.tb_start = sh4_tr_tb_start,
|
||||||
|
.insn_start = sh4_tr_insn_start,
|
||||||
|
.breakpoint_check = sh4_tr_breakpoint_check,
|
||||||
|
.translate_insn = sh4_tr_translate_insn,
|
||||||
|
.tb_stop = sh4_tr_tb_stop,
|
||||||
|
.disas_log = sh4_tr_disas_log,
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef DEBUG_DISAS
|
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
{
|
||||||
&& qemu_log_in_addr_range(pc_start)) {
|
DisasContext ctx;
|
||||||
qemu_log_lock();
|
|
||||||
qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
|
translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
|
||||||
log_target_disas(cs, pc_start, ctx.base.pc_next - pc_start);
|
|
||||||
qemu_log("\n");
|
|
||||||
qemu_log_unlock();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
|
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "exec/helper-gen.h"
|
#include "exec/helper-gen.h"
|
||||||
|
|
||||||
#include "trace-tcg.h"
|
#include "trace-tcg.h"
|
||||||
|
#include "exec/translator.h"
|
||||||
#include "exec/log.h"
|
#include "exec/log.h"
|
||||||
#include "asi.h"
|
#include "asi.h"
|
||||||
|
|
||||||
|
@ -66,14 +67,13 @@ static TCGv_i64 cpu_fpr[TARGET_DPREGS];
|
||||||
#include "exec/gen-icount.h"
|
#include "exec/gen-icount.h"
|
||||||
|
|
||||||
typedef struct DisasContext {
|
typedef struct DisasContext {
|
||||||
|
DisasContextBase base;
|
||||||
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
|
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
|
||||||
target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
|
target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
|
||||||
target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
|
target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
|
||||||
int is_br;
|
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
bool fpu_enabled;
|
bool fpu_enabled;
|
||||||
bool address_mask_32bit;
|
bool address_mask_32bit;
|
||||||
bool singlestep;
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
bool supervisor;
|
bool supervisor;
|
||||||
#ifdef TARGET_SPARC64
|
#ifdef TARGET_SPARC64
|
||||||
|
@ -82,7 +82,6 @@ typedef struct DisasContext {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uint32_t cc_op; /* current CC operation */
|
uint32_t cc_op; /* current CC operation */
|
||||||
struct TranslationBlock *tb;
|
|
||||||
sparc_def_t *def;
|
sparc_def_t *def;
|
||||||
TCGv_i32 t32[3];
|
TCGv_i32 t32[3];
|
||||||
TCGv ttl[5];
|
TCGv ttl[5];
|
||||||
|
@ -341,13 +340,13 @@ static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
|
||||||
static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
|
static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
|
||||||
target_ulong npc)
|
target_ulong npc)
|
||||||
{
|
{
|
||||||
if (unlikely(s->singlestep)) {
|
if (unlikely(s->base.singlestep_enabled || singlestep)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
|
return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) &&
|
||||||
(npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
|
(npc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK);
|
||||||
#else
|
#else
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
|
@ -361,7 +360,7 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num,
|
||||||
tcg_gen_goto_tb(tb_num);
|
tcg_gen_goto_tb(tb_num);
|
||||||
tcg_gen_movi_tl(cpu_pc, pc);
|
tcg_gen_movi_tl(cpu_pc, pc);
|
||||||
tcg_gen_movi_tl(cpu_npc, npc);
|
tcg_gen_movi_tl(cpu_npc, npc);
|
||||||
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
|
tcg_gen_exit_tb((uintptr_t)s->base.tb + tb_num);
|
||||||
} else {
|
} else {
|
||||||
/* jump to another page: currently not optimized */
|
/* jump to another page: currently not optimized */
|
||||||
tcg_gen_movi_tl(cpu_pc, pc);
|
tcg_gen_movi_tl(cpu_pc, pc);
|
||||||
|
@ -995,7 +994,7 @@ static void gen_branch_a(DisasContext *dc, target_ulong pc1)
|
||||||
gen_set_label(l1);
|
gen_set_label(l1);
|
||||||
gen_goto_tb(dc, 1, npc + 4, npc + 8);
|
gen_goto_tb(dc, 1, npc + 4, npc + 8);
|
||||||
|
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_branch_n(DisasContext *dc, target_ulong pc1)
|
static void gen_branch_n(DisasContext *dc, target_ulong pc1)
|
||||||
|
@ -1078,7 +1077,7 @@ static void gen_exception(DisasContext *dc, int which)
|
||||||
t = tcg_const_i32(which);
|
t = tcg_const_i32(which);
|
||||||
gen_helper_raise_exception(cpu_env, t);
|
gen_helper_raise_exception(cpu_env, t);
|
||||||
tcg_temp_free_i32(t);
|
tcg_temp_free_i32(t);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_check_align(TCGv addr, int mask)
|
static void gen_check_align(TCGv addr, int mask)
|
||||||
|
@ -2441,7 +2440,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
|
||||||
default:
|
default:
|
||||||
/* ??? In theory, this should be raise DAE_invalid_asi.
|
/* ??? In theory, this should be raise DAE_invalid_asi.
|
||||||
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
|
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
|
||||||
if (tb_cflags(dc->tb) & CF_PARALLEL) {
|
if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
|
||||||
gen_helper_exit_atomic(cpu_env);
|
gen_helper_exit_atomic(cpu_env);
|
||||||
} else {
|
} else {
|
||||||
TCGv_i32 r_asi = tcg_const_i32(da.asi);
|
TCGv_i32 r_asi = tcg_const_i32(da.asi);
|
||||||
|
@ -3351,7 +3350,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
|
|
||||||
if (cond == 8) {
|
if (cond == 8) {
|
||||||
/* An unconditional trap ends the TB. */
|
/* An unconditional trap ends the TB. */
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
goto jmp_insn;
|
goto jmp_insn;
|
||||||
} else {
|
} else {
|
||||||
/* A conditional trap falls through to the next insn. */
|
/* A conditional trap falls through to the next insn. */
|
||||||
|
@ -4331,7 +4330,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
save_state(dc);
|
save_state(dc);
|
||||||
gen_op_next_insn();
|
gen_op_next_insn();
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
case 0x6: /* V9 wrfprs */
|
case 0x6: /* V9 wrfprs */
|
||||||
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
|
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
|
||||||
|
@ -4340,7 +4339,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
save_state(dc);
|
save_state(dc);
|
||||||
gen_op_next_insn();
|
gen_op_next_insn();
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
case 0xf: /* V9 sir, nop if user */
|
case 0xf: /* V9 sir, nop if user */
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
@ -4468,7 +4467,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
save_state(dc);
|
save_state(dc);
|
||||||
gen_op_next_insn();
|
gen_op_next_insn();
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -4624,7 +4623,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
save_state(dc);
|
save_state(dc);
|
||||||
gen_op_next_insn();
|
gen_op_next_insn();
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
break;
|
break;
|
||||||
case 1: // htstate
|
case 1: // htstate
|
||||||
// XXX gen_op_wrhtstate();
|
// XXX gen_op_wrhtstate();
|
||||||
|
@ -5690,7 +5689,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
} else if (dc->npc == JUMP_PC) {
|
} else if (dc->npc == JUMP_PC) {
|
||||||
/* we can do a static jump */
|
/* we can do a static jump */
|
||||||
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
|
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
|
||||||
dc->is_br = 1;
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
} else {
|
} else {
|
||||||
dc->pc = dc->npc;
|
dc->pc = dc->npc;
|
||||||
dc->npc = dc->npc + 4;
|
dc->npc = dc->npc + 4;
|
||||||
|
@ -5738,99 +5737,92 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
|
static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
{
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
CPUSPARCState *env = cs->env_ptr;
|
CPUSPARCState *env = cs->env_ptr;
|
||||||
target_ulong pc_start, last_pc;
|
int bound;
|
||||||
DisasContext dc1, *dc = &dc1;
|
|
||||||
int num_insns;
|
|
||||||
int max_insns;
|
|
||||||
unsigned int insn;
|
|
||||||
|
|
||||||
memset(dc, 0, sizeof(DisasContext));
|
dc->pc = dc->base.pc_first;
|
||||||
dc->tb = tb;
|
dc->npc = (target_ulong)dc->base.tb->cs_base;
|
||||||
pc_start = tb->pc;
|
|
||||||
dc->pc = pc_start;
|
|
||||||
last_pc = dc->pc;
|
|
||||||
dc->npc = (target_ulong) tb->cs_base;
|
|
||||||
dc->cc_op = CC_OP_DYNAMIC;
|
dc->cc_op = CC_OP_DYNAMIC;
|
||||||
dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
|
dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
|
||||||
dc->def = &env->def;
|
dc->def = &env->def;
|
||||||
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
|
dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
|
||||||
dc->address_mask_32bit = tb_am_enabled(tb->flags);
|
dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
|
||||||
dc->singlestep = (cs->singlestep_enabled || singlestep);
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
|
dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
|
||||||
#endif
|
#endif
|
||||||
#ifdef TARGET_SPARC64
|
#ifdef TARGET_SPARC64
|
||||||
dc->fprs_dirty = 0;
|
dc->fprs_dirty = 0;
|
||||||
dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
|
dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
|
dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
|
* if we reach a page boundary, we stop generation so that the
|
||||||
|
* PC of a TT_TFAULT exception is always in the right page
|
||||||
|
*/
|
||||||
|
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||||
|
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||||
|
}
|
||||||
|
|
||||||
num_insns = 0;
|
static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
{
|
||||||
if (max_insns == 0) {
|
}
|
||||||
max_insns = CF_COUNT_MASK;
|
|
||||||
|
static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
if (dc->npc & JUMP_PC) {
|
||||||
|
assert(dc->jump_pc[1] == dc->pc + 4);
|
||||||
|
tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
|
||||||
|
} else {
|
||||||
|
tcg_gen_insn_start(dc->pc, dc->npc);
|
||||||
}
|
}
|
||||||
if (max_insns > TCG_MAX_INSNS) {
|
}
|
||||||
max_insns = TCG_MAX_INSNS;
|
|
||||||
|
static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
|
||||||
|
const CPUBreakpoint *bp)
|
||||||
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
if (dc->pc != dc->base.pc_first) {
|
||||||
|
save_state(dc);
|
||||||
}
|
}
|
||||||
|
gen_helper_debug(cpu_env);
|
||||||
|
tcg_gen_exit_tb(0);
|
||||||
|
dc->base.is_jmp = DISAS_NORETURN;
|
||||||
|
/* update pc_next so that the current instruction is included in tb->size */
|
||||||
|
dc->base.pc_next += 4;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
gen_tb_start(tb);
|
static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||||
do {
|
{
|
||||||
if (dc->npc & JUMP_PC) {
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
assert(dc->jump_pc[1] == dc->pc + 4);
|
CPUSPARCState *env = cs->env_ptr;
|
||||||
tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
|
unsigned int insn;
|
||||||
} else {
|
|
||||||
tcg_gen_insn_start(dc->pc, dc->npc);
|
|
||||||
}
|
|
||||||
num_insns++;
|
|
||||||
last_pc = dc->pc;
|
|
||||||
|
|
||||||
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
|
insn = cpu_ldl_code(env, dc->pc);
|
||||||
if (dc->pc != pc_start) {
|
dc->base.pc_next += 4;
|
||||||
save_state(dc);
|
disas_sparc_insn(dc, insn);
|
||||||
}
|
|
||||||
gen_helper_debug(cpu_env);
|
|
||||||
tcg_gen_exit_tb(0);
|
|
||||||
dc->is_br = 1;
|
|
||||||
goto exit_gen_loop;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
|
if (dc->base.is_jmp == DISAS_NORETURN) {
|
||||||
gen_io_start();
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
insn = cpu_ldl_code(env, dc->pc);
|
|
||||||
|
|
||||||
disas_sparc_insn(dc, insn);
|
|
||||||
|
|
||||||
if (dc->is_br)
|
|
||||||
break;
|
|
||||||
/* if the next PC is different, we abort now */
|
|
||||||
if (dc->pc != (last_pc + 4))
|
|
||||||
break;
|
|
||||||
/* if we reach a page boundary, we stop generation so that the
|
|
||||||
PC of a TT_TFAULT exception is always in the right page */
|
|
||||||
if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
|
|
||||||
break;
|
|
||||||
/* if single step mode, we generate only one instruction and
|
|
||||||
generate an exception */
|
|
||||||
if (dc->singlestep) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} while (!tcg_op_buf_full() &&
|
|
||||||
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
|
|
||||||
num_insns < max_insns);
|
|
||||||
|
|
||||||
exit_gen_loop:
|
|
||||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
|
||||||
gen_io_end();
|
|
||||||
}
|
}
|
||||||
if (!dc->is_br) {
|
if (dc->pc != dc->base.pc_next) {
|
||||||
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
{
|
||||||
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||||
|
|
||||||
|
if (dc->base.is_jmp != DISAS_NORETURN) {
|
||||||
if (dc->pc != DYNAMIC_PC &&
|
if (dc->pc != DYNAMIC_PC &&
|
||||||
(dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
|
(dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
|
||||||
/* static PC and NPC: we can use direct chaining */
|
/* static PC and NPC: we can use direct chaining */
|
||||||
|
@ -5843,22 +5835,29 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
|
||||||
tcg_gen_exit_tb(0);
|
tcg_gen_exit_tb(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gen_tb_end(tb, num_insns);
|
}
|
||||||
|
|
||||||
tb->size = last_pc + 4 - pc_start;
|
static void sparc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
|
||||||
tb->icount = num_insns;
|
{
|
||||||
|
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
|
||||||
|
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef DEBUG_DISAS
|
static const TranslatorOps sparc_tr_ops = {
|
||||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
.init_disas_context = sparc_tr_init_disas_context,
|
||||||
&& qemu_log_in_addr_range(pc_start)) {
|
.tb_start = sparc_tr_tb_start,
|
||||||
qemu_log_lock();
|
.insn_start = sparc_tr_insn_start,
|
||||||
qemu_log("--------------\n");
|
.breakpoint_check = sparc_tr_breakpoint_check,
|
||||||
qemu_log("IN: %s\n", lookup_symbol(pc_start));
|
.translate_insn = sparc_tr_translate_insn,
|
||||||
log_target_disas(cs, pc_start, last_pc + 4 - pc_start);
|
.tb_stop = sparc_tr_tb_stop,
|
||||||
qemu_log("\n");
|
.disas_log = sparc_tr_disas_log,
|
||||||
qemu_log_unlock();
|
};
|
||||||
}
|
|
||||||
#endif
|
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
|
{
|
||||||
|
DisasContext dc = {};
|
||||||
|
|
||||||
|
translator_loop(&sparc_tr_ops, &dc.base, cs, tb);
|
||||||
}
|
}
|
||||||
|
|
||||||
void sparc_tcg_init(void)
|
void sparc_tcg_init(void)
|
||||||
|
|
|
@ -2375,7 +2375,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
DisasContext ctx;
|
DisasContext ctx;
|
||||||
DisasContext *dc = &ctx;
|
DisasContext *dc = &ctx;
|
||||||
uint64_t pc_start = tb->pc;
|
uint64_t pc_start = tb->pc;
|
||||||
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
uint64_t page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
int num_insns = 0;
|
int num_insns = 0;
|
||||||
int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
|
|
||||||
|
@ -2415,7 +2415,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||||
}
|
}
|
||||||
dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
|
dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
|
||||||
if (num_insns >= max_insns
|
if (num_insns >= max_insns
|
||||||
|| dc->pc >= next_page_start
|
|| (dc->pc - page_start >= TARGET_PAGE_SIZE)
|
||||||
|| tcg_op_buf_full()) {
|
|| tcg_op_buf_full()) {
|
||||||
/* Ending the TB due to TB size or page boundary. Set PC. */
|
/* Ending the TB due to TB size or page boundary. Set PC. */
|
||||||
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
tcg_gen_movi_tl(cpu_pc, dc->pc);
|
||||||
|
|
|
@ -1875,7 +1875,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
CPUUniCore32State *env = cs->env_ptr;
|
CPUUniCore32State *env = cs->env_ptr;
|
||||||
DisasContext dc1, *dc = &dc1;
|
DisasContext dc1, *dc = &dc1;
|
||||||
target_ulong pc_start;
|
target_ulong pc_start;
|
||||||
uint32_t next_page_start;
|
uint32_t page_start;
|
||||||
int num_insns;
|
int num_insns;
|
||||||
int max_insns;
|
int max_insns;
|
||||||
|
|
||||||
|
@ -1894,7 +1894,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
cpu_F1s = tcg_temp_new_i32();
|
cpu_F1s = tcg_temp_new_i32();
|
||||||
cpu_F0d = tcg_temp_new_i64();
|
cpu_F0d = tcg_temp_new_i64();
|
||||||
cpu_F1d = tcg_temp_new_i64();
|
cpu_F1d = tcg_temp_new_i64();
|
||||||
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
num_insns = 0;
|
num_insns = 0;
|
||||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
|
@ -1951,7 +1951,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
} while (!dc->is_jmp && !tcg_op_buf_full() &&
|
} while (!dc->is_jmp && !tcg_op_buf_full() &&
|
||||||
!cs->singlestep_enabled &&
|
!cs->singlestep_enabled &&
|
||||||
!singlestep &&
|
!singlestep &&
|
||||||
dc->pc < next_page_start &&
|
dc->pc - page_start < TARGET_PAGE_SIZE &&
|
||||||
num_insns < max_insns);
|
num_insns < max_insns);
|
||||||
|
|
||||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
if (tb_cflags(tb) & CF_LAST_IO) {
|
||||||
|
|
|
@ -1061,8 +1061,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
int insn_count = 0;
|
int insn_count = 0;
|
||||||
int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||||
uint32_t pc_start = tb->pc;
|
uint32_t pc_start = tb->pc;
|
||||||
uint32_t next_page_start =
|
uint32_t page_start = pc_start & TARGET_PAGE_MASK;
|
||||||
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
|
||||||
|
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
max_insns = CF_COUNT_MASK;
|
max_insns = CF_COUNT_MASK;
|
||||||
|
@ -1162,9 +1161,9 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
|
||||||
}
|
}
|
||||||
} while (dc.is_jmp == DISAS_NEXT &&
|
} while (dc.is_jmp == DISAS_NEXT &&
|
||||||
insn_count < max_insns &&
|
insn_count < max_insns &&
|
||||||
dc.pc < next_page_start &&
|
dc.pc - page_start < TARGET_PAGE_SIZE &&
|
||||||
dc.pc + xtensa_insn_len(env, &dc) <= next_page_start &&
|
dc.pc - page_start + xtensa_insn_len(env, &dc) <= TARGET_PAGE_SIZE
|
||||||
!tcg_op_buf_full());
|
&& !tcg_op_buf_full());
|
||||||
done:
|
done:
|
||||||
reset_sar_tracker(&dc);
|
reset_sar_tracker(&dc);
|
||||||
if (dc.icount) {
|
if (dc.icount) {
|
||||||
|
|
Loading…
Reference in New Issue