mirror of https://github.com/xemu-project/xemu.git
target/hppa: Store full iaoq_f and page offset of iaoq_b in TB
In preparation for CF_PCREL. store the iaoq_f in 3 parts: high bits in cs_base, middle bits in pc, and low bits in priv. For iaoq_b, set a bit for either of space or page differing, else the page offset. Install iaq entries before goto_tb. The change to not record the full direct branch difference in TB means that we have to store at least iaoq_b before goto_tb. But since a later change to enable CF_PCREL will require both iaoq_f and iaoq_b to be updated before goto_tb, go ahead and update both fields now. Reviewed-by: Helge Deller <deller@gmx.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
3c13b0ffe7
commit
9dfcd24349
|
@ -48,36 +48,43 @@ static vaddr hppa_cpu_get_pc(CPUState *cs)
|
|||
}
|
||||
|
||||
void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
|
||||
uint64_t *cs_base, uint32_t *pflags)
|
||||
uint64_t *pcsbase, uint32_t *pflags)
|
||||
{
|
||||
uint32_t flags = env->psw_n * PSW_N;
|
||||
uint64_t cs_base = 0;
|
||||
|
||||
/*
|
||||
* TB lookup assumes that PC contains the complete virtual address.
|
||||
* If we leave space+offset separate, we'll get ITLB misses to an
|
||||
* incomplete virtual address. This also means that we must separate
|
||||
* out current cpu privilege from the low bits of IAOQ_F.
|
||||
*/
|
||||
*pc = hppa_cpu_get_pc(env_cpu(env));
|
||||
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
|
||||
|
||||
if (hppa_is_pa20(env)) {
|
||||
cs_base = env->iaoq_f & MAKE_64BIT_MASK(32, 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* The only really interesting case is if IAQ_Back is on the same page
|
||||
* as IAQ_Front, so that we can use goto_tb between the blocks. In all
|
||||
* other cases, we'll be ending the TranslationBlock with one insn and
|
||||
* not linking between them.
|
||||
*/
|
||||
if (env->iasq_f != env->iasq_b) {
|
||||
cs_base |= CS_BASE_DIFFSPACE;
|
||||
} else if ((env->iaoq_f ^ env->iaoq_b) & TARGET_PAGE_MASK) {
|
||||
cs_base |= CS_BASE_DIFFPAGE;
|
||||
} else {
|
||||
cs_base |= env->iaoq_b & ~TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
/* TB lookup assumes that PC contains the complete virtual address.
|
||||
If we leave space+offset separate, we'll get ITLB misses to an
|
||||
incomplete virtual address. This also means that we must separate
|
||||
out current cpu privilege from the low bits of IAOQ_F. */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
*pc = env->iaoq_f & -4;
|
||||
*cs_base = env->iaoq_b & -4;
|
||||
flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
|
||||
#else
|
||||
/* ??? E, T, H, L, B bits need to be here, when implemented. */
|
||||
flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
|
||||
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
|
||||
|
||||
*pc = hppa_cpu_get_pc(env_cpu(env));
|
||||
*cs_base = env->iasq_f;
|
||||
|
||||
/* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
|
||||
low 32-bits of CS_BASE. This will succeed for all direct branches,
|
||||
which is the primary case we care about -- using goto_tb within a page.
|
||||
Failure is indicated by a zero difference. */
|
||||
if (env->iasq_f == env->iasq_b) {
|
||||
target_long diff = env->iaoq_b - env->iaoq_f;
|
||||
if (diff == (int32_t)diff) {
|
||||
*cs_base |= (uint32_t)diff;
|
||||
}
|
||||
}
|
||||
if ((env->sr[4] == env->sr[5])
|
||||
& (env->sr[4] == env->sr[6])
|
||||
& (env->sr[4] == env->sr[7])) {
|
||||
|
@ -85,6 +92,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
|
|||
}
|
||||
#endif
|
||||
|
||||
*pcsbase = cs_base;
|
||||
*pflags = flags;
|
||||
}
|
||||
|
||||
|
@ -93,25 +101,7 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
|
|||
{
|
||||
HPPACPU *cpu = HPPA_CPU(cs);
|
||||
|
||||
tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
cpu->env.iaoq_f = tb->pc | PRIV_USER;
|
||||
cpu->env.iaoq_b = tb->cs_base | PRIV_USER;
|
||||
#else
|
||||
/* Recover the IAOQ values from the GVA + PRIV. */
|
||||
uint32_t priv = (tb->flags >> TB_FLAG_PRIV_SHIFT) & 3;
|
||||
target_ulong cs_base = tb->cs_base;
|
||||
target_ulong iasq_f = cs_base & ~0xffffffffull;
|
||||
int32_t diff = cs_base;
|
||||
|
||||
cpu->env.iasq_f = iasq_f;
|
||||
cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
|
||||
if (diff) {
|
||||
cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* IAQ is always up-to-date before goto_tb. */
|
||||
cpu->env.psw_n = (tb->flags & PSW_N) != 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -341,6 +341,8 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
|
|||
#define TB_FLAG_SR_SAME PSW_I
|
||||
#define TB_FLAG_PRIV_SHIFT 8
|
||||
#define TB_FLAG_UNALIGN 0x400
|
||||
#define CS_BASE_DIFFPAGE (1 << 12)
|
||||
#define CS_BASE_DIFFSPACE (1 << 13)
|
||||
|
||||
void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
|
||||
uint64_t *cs_base, uint32_t *pflags);
|
||||
|
|
|
@ -770,12 +770,11 @@ static bool use_nullify_skip(DisasContext *ctx)
|
|||
static void gen_goto_tb(DisasContext *ctx, int which,
|
||||
const DisasIAQE *f, const DisasIAQE *b)
|
||||
{
|
||||
install_iaq_entries(ctx, f, b);
|
||||
if (use_goto_tb(ctx, f, b)) {
|
||||
tcg_gen_goto_tb(which);
|
||||
install_iaq_entries(ctx, f, b);
|
||||
tcg_gen_exit_tb(ctx->base.tb, which);
|
||||
} else {
|
||||
install_iaq_entries(ctx, f, b);
|
||||
tcg_gen_lookup_and_goto_ptr();
|
||||
}
|
||||
}
|
||||
|
@ -4576,6 +4575,7 @@ static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
|
|||
static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
uint64_t cs_base, iaoq_f, iaoq_b;
|
||||
int bound;
|
||||
|
||||
ctx->cs = cs;
|
||||
|
@ -4585,29 +4585,30 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
#ifdef CONFIG_USER_ONLY
|
||||
ctx->privilege = PRIV_USER;
|
||||
ctx->mmu_idx = MMU_USER_IDX;
|
||||
ctx->iaoq_first = ctx->base.pc_first | ctx->privilege;
|
||||
ctx->iaq_b.disp = ctx->base.tb->cs_base - ctx->base.pc_first;
|
||||
ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
|
||||
#else
|
||||
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
|
||||
ctx->mmu_idx = (ctx->tb_flags & PSW_D
|
||||
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
|
||||
: ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||
#endif
|
||||
|
||||
/* Recover the IAOQ values from the GVA + PRIV. */
|
||||
uint64_t cs_base = ctx->base.tb->cs_base;
|
||||
uint64_t iasq_f = cs_base & ~0xffffffffull;
|
||||
int32_t diff = cs_base;
|
||||
cs_base = ctx->base.tb->cs_base;
|
||||
iaoq_f = cs_base & MAKE_64BIT_MASK(32, 32);
|
||||
iaoq_f |= ctx->base.pc_first & MAKE_64BIT_MASK(2, 30);
|
||||
iaoq_f |= ctx->privilege;
|
||||
ctx->iaoq_first = iaoq_f;
|
||||
|
||||
ctx->iaoq_first = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
|
||||
|
||||
if (diff) {
|
||||
ctx->iaq_b.disp = diff;
|
||||
} else {
|
||||
ctx->iaq_b.base = cpu_iaoq_b;
|
||||
if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
|
||||
ctx->iaq_b.space = cpu_iasq_b;
|
||||
ctx->iaq_b.base = cpu_iaoq_b;
|
||||
} else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
|
||||
ctx->iaq_b.base = cpu_iaoq_b;
|
||||
} else {
|
||||
iaoq_b = (iaoq_f & TARGET_PAGE_MASK) | (cs_base & ~TARGET_PAGE_MASK);
|
||||
ctx->iaq_b.disp = iaoq_b - iaoq_f;
|
||||
}
|
||||
#endif
|
||||
|
||||
ctx->zero = tcg_constant_i64(0);
|
||||
|
||||
|
|
Loading…
Reference in New Issue