diff --git a/cputlb.c b/cputlb.c
index 4bc6c24e11..fbcebe3cf4 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -452,7 +452,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
     CPUState *cpu = ENV_GET_CPU(env1);
 
     page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
-    mmu_idx = cpu_mmu_index(env1);
+    mmu_idx = cpu_mmu_index(env1, true);
     if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
                  (addr & TARGET_PAGE_MASK))) {
         cpu_ldub_code(env1, addr);
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 26f479416a..ceac1688b3 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -363,7 +363,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 #endif /* (NB_MMU_MODES > 12) */
 
 /* these access are slower, they must be as rare as possible */
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
 #define MEMSUFFIX _data
 #define DATA_SIZE 1
 #include "exec/cpu_ldst_template.h"
@@ -379,7 +379,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 #undef CPU_MMU_INDEX
 #undef MEMSUFFIX
 
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
 #define MEMSUFFIX _code
 #define SOFTMMU_CODE_ACCESS
 
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 3f1ece30ee..ef88ffb83b 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -376,7 +376,7 @@ enum {
     PS_USER_MODE = 8
 };
 
-static inline int cpu_mmu_index(CPUAlphaState *env)
+static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
 {
     if (env->pal_mode) {
         return MMU_KERNEL_IDX;
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 206feb5746..2ba5fb80ae 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2878,7 +2878,7 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
 
     ctx.tb = tb;
     ctx.pc = pc_start;
-    ctx.mem_idx = cpu_mmu_index(env);
+    ctx.mem_idx = cpu_mmu_index(env, false);
     ctx.implver = env->implver;
     ctx.singlestep_enabled = cs->singlestep_enabled;
 
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 5abd8ba5c5..b9068c9dad 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -1678,7 +1678,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
 }
 
 /* Determine the current mmu_idx to use for normal loads/stores */
-static inline int cpu_mmu_index(CPUARMState *env)
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
 {
     int el = arm_current_el(env);
 
@@ -1911,7 +1911,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
                    << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
     }
 
-    *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+    *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
     /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
      * states defined in the ARM ARM for software singlestep:
      *  SS_ACTIVE   PSTATE.SS   State
diff --git a/target-arm/helper.c b/target-arm/helper.c
index d453120874..2c6ec9d6cf 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -6892,7 +6892,7 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
     uint32_t fsr;
     MemTxAttrs attrs = {};
 
-    ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
+    ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
                         &attrs, &prot, &page_size, &fsr);
 
     if (ret) {
@@ -7057,7 +7057,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
         void *hostaddr[maxidx];
         int try, i;
-        unsigned mmu_idx = cpu_mmu_index(env);
+        unsigned mmu_idx = cpu_mmu_index(env, false);
         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
 
         for (try = 0; try < 2; try++) {
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index 10c3f46547..82df90d91c 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -233,7 +233,7 @@ enum {
 #define MMU_MODE0_SUFFIX _kernel
 #define MMU_MODE1_SUFFIX _user
 #define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUCRISState *env)
+static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
 {
 	return !!(env->pregs[PR_CCS] & U_FLAG);
 }
diff --git a/target-cris/translate.c b/target-cris/translate.c
index aa90d7173f..d5b54e1ad4 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -1083,7 +1083,7 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
 
 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
 {
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     /* If we get a fault on a delayslot we must keep the jmp state in
        the cpu-state to be able to re-execute the jmp.  */
@@ -1097,7 +1097,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, 
              unsigned int size, int sign)
 {
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     /* If we get a fault on a delayslot we must keep the jmp state in
        the cpu-state to be able to re-execute the jmp.  */
@@ -1112,7 +1112,7 @@ static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
                unsigned int size)
 {
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     /* If we get a fault on a delayslot we must keep the jmp state in
        the cpu-state to be able to re-execute the jmp.  */
diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c
index 618c234949..da0b2caf85 100644
--- a/target-cris/translate_v10.c
+++ b/target-cris/translate_v10.c
@@ -96,7 +96,7 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
 static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
                        unsigned int size)
 {
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     /* If we get a fault on a delayslot we must keep the jmp state in
        the cpu-state to be able to re-execute the jmp.  */
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 74b674d524..151f1b9d7f 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1199,7 +1199,7 @@ uint64_t cpu_get_tsc(CPUX86State *env);
 #define MMU_KSMAP_IDX   0
 #define MMU_USER_IDX    1
 #define MMU_KNOSMAP_IDX 2
-static inline int cpu_mmu_index(CPUX86State *env)
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
 {
     return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
         (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
diff --git a/target-i386/translate.c b/target-i386/translate.c
index b1a5ad9a20..d72fa4687c 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7942,7 +7942,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
     /* select memory access functions */
     dc->mem_index = 0;
     if (flags & HF_SOFTMMU_MASK) {
-        dc->mem_index = cpu_mmu_index(env);
+	dc->mem_index = cpu_mmu_index(env, false);
     }
     dc->cpuid_features = env->features[FEAT_1_EDX];
     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 944777d052..cc7726301e 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -34,7 +34,7 @@ typedef struct CPULM32State CPULM32State;
 
 #define NB_MMU_MODES 1
 #define TARGET_PAGE_BITS 12
-static inline int cpu_mmu_index(CPULM32State *env)
+static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
 {
     return 0;
 }
diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h
index 9a62f6cb52..43a9a1c612 100644
--- a/target-m68k/cpu.h
+++ b/target-m68k/cpu.h
@@ -223,7 +223,7 @@ void register_m68k_insns (CPUM68KState *env);
 #define MMU_MODE0_SUFFIX _kernel
 #define MMU_MODE1_SUFFIX _user
 #define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUM68KState *env)
+static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
 {
     return (env->sr & SR_S) == 0 ? 1 : 0;
 }
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h
index 7e20e59b89..402124a051 100644
--- a/target-microblaze/cpu.h
+++ b/target-microblaze/cpu.h
@@ -309,7 +309,7 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
 #define MMU_USER_IDX    2
 /* See NB_MMU_MODES further up the file.  */
 
-static inline int cpu_mmu_index (CPUMBState *env)
+static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
 {
         /* Are we in nommu mode?.  */
         if (!(env->sregs[SR_MSR] & MSR_VM))
diff --git a/target-microblaze/mmu.c b/target-microblaze/mmu.c
index 728da133da..2ef1dc21a2 100644
--- a/target-microblaze/mmu.c
+++ b/target-microblaze/mmu.c
@@ -279,7 +279,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
             }
 
             hit = mmu_translate(&env->mmu, &lu,
-                                v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
+                                v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
             if (hit) {
                 env->mmu.regs[MMU_R_TLBX] = lu.idx;
             } else
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index c33b16c1b5..3de89440a6 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -418,7 +418,7 @@ static void dec_msr(DisasContext *dc)
     CPUState *cs = CPU(dc->cpu);
     TCGv t0, t1;
     unsigned int sr, to, rn;
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     sr = dc->imm & ((1 << 14) - 1);
     to = dc->imm & (1 << 14);
@@ -730,7 +730,7 @@ static void dec_bit(DisasContext *dc)
     CPUState *cs = CPU(dc->cpu);
     TCGv t0;
     unsigned int op;
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     op = dc->ir & ((1 << 9) - 1);
     switch (op) {
@@ -994,7 +994,7 @@ static void dec_load(DisasContext *dc)
      * address and if that succeeds we write into the destination reg.
      */
     v = tcg_temp_new();
-    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
+    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
 
     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
@@ -1072,7 +1072,7 @@ static void dec_store(DisasContext *dc)
            this compare and the following write to be atomic. For user
            emulation we need to add atomicity between threads.  */
         tval = tcg_temp_new();
-        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
+        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
                            MO_TEUL);
         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
         write_carryi(dc, 0);
@@ -1123,7 +1123,7 @@ static void dec_store(DisasContext *dc)
                 break;
         }
     }
-    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
+    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
 
     /* Verify alignment if needed.  */
     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
@@ -1219,7 +1219,7 @@ static void dec_bcc(DisasContext *dc)
 static void dec_br(DisasContext *dc)
 {
     unsigned int dslot, link, abs, mbar;
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     dslot = dc->ir & (1 << 20);
     abs = dc->ir & (1 << 19);
@@ -1351,7 +1351,7 @@ static inline void do_rte(DisasContext *dc)
 static void dec_rts(DisasContext *dc)
 {
     unsigned int b_bit, i_bit, e_bit;
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 
     i_bit = dc->ir & (1 << 21);
     b_bit = dc->ir & (1 << 22);
@@ -1523,7 +1523,7 @@ static void dec_null(DisasContext *dc)
 /* Insns connected to FSL or AXI stream attached devices.  */
 static void dec_stream(DisasContext *dc)
 {
-    int mem_index = cpu_mmu_index(&dc->cpu->env);
+    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
     TCGv_i32 t_id, t_ctrl;
     int ctrl;
 
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index c91883d5e1..2acc4b3d0a 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -634,7 +634,7 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
 #define MMU_MODE1_SUFFIX _super
 #define MMU_MODE2_SUFFIX _user
 #define MMU_USER_IDX 2
-static inline int cpu_mmu_index (CPUMIPSState *env)
+static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
 {
     return env->hflags & MIPS_HFLAG_KSU;
 }
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 809a061e29..1aa9e3c9e4 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -3629,7 +3629,7 @@ FOP_CONDN_S(sne,  (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
 #if !defined(CONFIG_USER_ONLY)
 #define MEMOP_IDX(DF)                                           \
         TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN,  \
-                                        cpu_mmu_index(env));
+                                        cpu_mmu_index(env, false));
 #else
 #define MEMOP_IDX(DF)
 #endif
@@ -3685,7 +3685,7 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd,             \
                             target_ulong addr)                          \
 {                                                                       \
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);                          \
-    int mmu_idx = cpu_mmu_index(env);                                   \
+    int mmu_idx = cpu_mmu_index(env, false);				\
     int i;                                                              \
     MEMOP_IDX(DF)                                                       \
     ensure_writable_pages(env, addr, mmu_idx, GETRA());                 \
diff --git a/target-moxie/cpu.h b/target-moxie/cpu.h
index 29572aaba3..15ca15bf53 100644
--- a/target-moxie/cpu.h
+++ b/target-moxie/cpu.h
@@ -127,7 +127,7 @@ int cpu_moxie_signal_handler(int host_signum, void *pinfo,
 #define cpu_gen_code cpu_moxie_gen_code
 #define cpu_signal_handler cpu_moxie_signal_handler
 
-static inline int cpu_mmu_index(CPUMoxieState *env)
+static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
 {
     return 0;
 }
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
index 36c4f20e20..560210d9e5 100644
--- a/target-openrisc/cpu.h
+++ b/target-openrisc/cpu.h
@@ -403,7 +403,7 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
     *flags = (env->flags & D_FLAG);
 }
 
-static inline int cpu_mmu_index(CPUOpenRISCState *env)
+static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
 {
     if (!(env->sr & SR_IME)) {
         return MMU_NOMMU_IDX;
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index aca1242bdb..473556e14a 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -1653,7 +1653,7 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
     dc->ppc = pc_start;
     dc->pc = pc_start;
     dc->flags = cpu->env.cpucfgr;
-    dc->mem_idx = cpu_mmu_index(&cpu->env);
+    dc->mem_idx = cpu_mmu_index(&cpu->env, false);
     dc->synced_flags = dc->tb_flags = tb->flags;
     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
     dc->singlestep_enabled = cs->singlestep_enabled;
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 6f76674a4c..406d308960 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1250,7 +1250,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
 #define MMU_MODE1_SUFFIX _kernel
 #define MMU_MODE2_SUFFIX _hypv
 #define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUPPCState *env)
+static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
 {
     return env->mmu_idx;
 }
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index b650890130..9aeb0241fb 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -308,7 +308,7 @@ static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
 #define MMU_SECONDARY_IDX       1
 #define MMU_HOME_IDX            2
 
-static inline int cpu_mmu_index (CPUS390XState *env)
+static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
 {
     switch (env->psw.mask & PSW_MASK_ASC) {
     case PSW_ASC_PRIMARY:
diff --git a/target-s390x/mem_helper.c b/target-s390x/mem_helper.c
index 84bf198e2b..90399f1b0b 100644
--- a/target-s390x/mem_helper.c
+++ b/target-s390x/mem_helper.c
@@ -69,7 +69,7 @@ static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
                         uint32_t l)
 {
-    int mmu_idx = cpu_mmu_index(env);
+    int mmu_idx = cpu_mmu_index(env, false);
 
     while (l > 0) {
         void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
@@ -92,7 +92,7 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
                          uint32_t l)
 {
-    int mmu_idx = cpu_mmu_index(env);
+    int mmu_idx = cpu_mmu_index(env, false);
 
     while (l > 0) {
         void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h
index 34bb3d7799..1f68b27a90 100644
--- a/target-sh4/cpu.h
+++ b/target-sh4/cpu.h
@@ -235,7 +235,7 @@ void cpu_load_tlb(CPUSH4State * env);
 #define MMU_MODE0_SUFFIX _kernel
 #define MMU_MODE1_SUFFIX _user
 #define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUSH4State *env)
+static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
 {
     return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
 }
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index 0522b65f19..72ea1710ae 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -642,7 +642,7 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
 }
 #endif
 
-static inline int cpu_mmu_index(CPUSPARCState *env1)
+static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
 {
 #if defined(CONFIG_USER_ONLY)
     return MMU_USER_IDX;
diff --git a/target-sparc/mmu_helper.c b/target-sparc/mmu_helper.c
index 2a0c6f0d3d..7495406b3e 100644
--- a/target-sparc/mmu_helper.c
+++ b/target-sparc/mmu_helper.c
@@ -849,7 +849,7 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
     SPARCCPU *cpu = SPARC_CPU(cs);
     CPUSPARCState *env = &cpu->env;
     hwaddr phys_addr;
-    int mmu_idx = cpu_mmu_index(env);
+    int mmu_idx = cpu_mmu_index(env, false);
     MemoryRegionSection section;
 
     if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 48fc2abe63..4690b4699a 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5234,7 +5234,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
     last_pc = dc->pc;
     dc->npc = (target_ulong) tb->cs_base;
     dc->cc_op = CC_OP_DYNAMIC;
-    dc->mem_idx = cpu_mmu_index(env);
+    dc->mem_idx = cpu_mmu_index(env, false);
     dc->def = env->def;
     dc->fpu_enabled = tb_fpu_enabled(tb->flags);
     dc->address_mask_32bit = tb_am_enabled(tb->flags);
diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h
index 916ee27ad4..42751e8a0c 100644
--- a/target-tricore/cpu.h
+++ b/target-tricore/cpu.h
@@ -350,7 +350,7 @@ void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf);
 #define cpu_signal_handler cpu_tricore_signal_handler
 #define cpu_list tricore_cpu_list
 
-static inline int cpu_mmu_index(CPUTriCoreState *env)
+static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
 {
     return 0;
 }
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index f02bef41ee..440f30a843 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -8287,7 +8287,7 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
     ctx.tb = tb;
     ctx.singlestep_enabled = cs->singlestep_enabled;
     ctx.bstate = BS_NONE;
-    ctx.mem_idx = cpu_mmu_index(env);
+    ctx.mem_idx = cpu_mmu_index(env, false);
 
     tcg_clear_temp_count();
     gen_tb_start(tb);
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
index 45e31e54ba..121e5283d7 100644
--- a/target-unicore32/cpu.h
+++ b/target-unicore32/cpu.h
@@ -131,7 +131,7 @@ int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
 #define MMU_MODE0_SUFFIX _kernel
 #define MMU_MODE1_SUFFIX _user
 #define MMU_USER_IDX 1
-static inline int cpu_mmu_index(CPUUniCore32State *env)
+static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
 {
     return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
 }
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index 96bfc82e92..dbd2c9cd0f 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -492,7 +492,7 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
 #define MMU_MODE2_SUFFIX _ring2
 #define MMU_MODE3_SUFFIX _ring3
 
-static inline int cpu_mmu_index(CPUXtensaState *env)
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
 {
     return xtensa_get_cring(env);
 }