mirror of https://github.com/xemu-project/xemu.git
precise self modifying code support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@745 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
eeab3a558f
commit
d720b93d0b
|
@ -681,8 +681,7 @@ extern uint8_t *phys_ram_dirty;
|
||||||
#define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
|
#define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
|
||||||
#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
|
#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
|
||||||
|
|
||||||
/* NOTE: vaddr is only used internally. Never use it except if you know what you do */
|
typedef void CPUWriteMemoryFunc(uint32_t addr, uint32_t value);
|
||||||
typedef void CPUWriteMemoryFunc(uint32_t addr, uint32_t value, uint32_t vaddr);
|
|
||||||
typedef uint32_t CPUReadMemoryFunc(uint32_t addr);
|
typedef uint32_t CPUReadMemoryFunc(uint32_t addr);
|
||||||
|
|
||||||
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
|
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
|
||||||
|
|
315
exec.c
315
exec.c
|
@ -168,7 +168,6 @@ static inline PageDesc *page_find(unsigned int index)
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
static void tlb_protect_code(CPUState *env, uint32_t addr);
|
static void tlb_protect_code(CPUState *env, uint32_t addr);
|
||||||
static void tlb_unprotect_code(CPUState *env, uint32_t addr);
|
|
||||||
static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
|
static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
|
||||||
|
|
||||||
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
|
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
|
||||||
|
@ -533,30 +532,78 @@ static void build_page_bitmap(PageDesc *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
|
||||||
|
static void tb_gen_code(CPUState *env,
|
||||||
|
target_ulong pc, target_ulong cs_base, int flags,
|
||||||
|
int cflags)
|
||||||
|
{
|
||||||
|
TranslationBlock *tb;
|
||||||
|
uint8_t *tc_ptr;
|
||||||
|
target_ulong phys_pc, phys_page2, virt_page2;
|
||||||
|
int code_gen_size;
|
||||||
|
|
||||||
|
phys_pc = get_phys_addr_code(env, (unsigned long)pc);
|
||||||
|
tb = tb_alloc((unsigned long)pc);
|
||||||
|
if (!tb) {
|
||||||
|
/* flush must be done */
|
||||||
|
tb_flush(env);
|
||||||
|
/* cannot fail at this point */
|
||||||
|
tb = tb_alloc((unsigned long)pc);
|
||||||
|
}
|
||||||
|
tc_ptr = code_gen_ptr;
|
||||||
|
tb->tc_ptr = tc_ptr;
|
||||||
|
tb->cs_base = cs_base;
|
||||||
|
tb->flags = flags;
|
||||||
|
tb->cflags = cflags;
|
||||||
|
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
|
||||||
|
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
|
||||||
|
|
||||||
|
/* check next page if needed */
|
||||||
|
virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
|
||||||
|
phys_page2 = -1;
|
||||||
|
if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
|
||||||
|
phys_page2 = get_phys_addr_code(env, virt_page2);
|
||||||
|
}
|
||||||
|
tb_link_phys(tb, phys_pc, phys_page2);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* invalidate all TBs which intersect with the target physical page
|
/* invalidate all TBs which intersect with the target physical page
|
||||||
starting in range [start;end[. NOTE: start and end must refer to
|
starting in range [start;end[. NOTE: start and end must refer to
|
||||||
the same physical page. 'vaddr' is a virtual address referencing
|
the same physical page. 'is_cpu_write_access' should be true if called
|
||||||
the physical page of code. It is only used an a hint if there is no
|
from a real cpu write access: the virtual CPU will exit the current
|
||||||
code left. */
|
TB if code is modified inside this TB. */
|
||||||
static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
||||||
target_ulong vaddr)
|
int is_cpu_write_access)
|
||||||
{
|
{
|
||||||
int n;
|
int n, current_tb_modified, current_tb_not_found, current_flags;
|
||||||
|
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
|
||||||
|
CPUState *env = cpu_single_env;
|
||||||
|
#endif
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
TranslationBlock *tb, *tb_next;
|
TranslationBlock *tb, *tb_next, *current_tb;
|
||||||
target_ulong tb_start, tb_end;
|
target_ulong tb_start, tb_end;
|
||||||
|
target_ulong current_pc, current_cs_base;
|
||||||
|
|
||||||
p = page_find(start >> TARGET_PAGE_BITS);
|
p = page_find(start >> TARGET_PAGE_BITS);
|
||||||
if (!p)
|
if (!p)
|
||||||
return;
|
return;
|
||||||
if (!p->code_bitmap &&
|
if (!p->code_bitmap &&
|
||||||
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
|
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
|
||||||
|
is_cpu_write_access) {
|
||||||
/* build code bitmap */
|
/* build code bitmap */
|
||||||
build_page_bitmap(p);
|
build_page_bitmap(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we remove all the TBs in the range [start, end[ */
|
/* we remove all the TBs in the range [start, end[ */
|
||||||
/* XXX: see if in some cases it could be faster to invalidate all the code */
|
/* XXX: see if in some cases it could be faster to invalidate all the code */
|
||||||
|
current_tb_not_found = is_cpu_write_access;
|
||||||
|
current_tb_modified = 0;
|
||||||
|
current_tb = NULL; /* avoid warning */
|
||||||
|
current_pc = 0; /* avoid warning */
|
||||||
|
current_cs_base = 0; /* avoid warning */
|
||||||
|
current_flags = 0; /* avoid warning */
|
||||||
tb = p->first_tb;
|
tb = p->first_tb;
|
||||||
while (tb != NULL) {
|
while (tb != NULL) {
|
||||||
n = (long)tb & 3;
|
n = (long)tb & 3;
|
||||||
|
@ -573,6 +620,36 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
||||||
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
|
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
|
||||||
}
|
}
|
||||||
if (!(tb_end <= start || tb_start >= end)) {
|
if (!(tb_end <= start || tb_start >= end)) {
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
if (current_tb_not_found) {
|
||||||
|
current_tb_not_found = 0;
|
||||||
|
current_tb = NULL;
|
||||||
|
if (env->mem_write_pc) {
|
||||||
|
/* now we have a real cpu fault */
|
||||||
|
current_tb = tb_find_pc(env->mem_write_pc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (current_tb == tb &&
|
||||||
|
!(current_tb->cflags & CF_SINGLE_INSN)) {
|
||||||
|
/* If we are modifying the current TB, we must stop
|
||||||
|
its execution. We could be more precise by checking
|
||||||
|
that the modification is after the current PC, but it
|
||||||
|
would require a specialized function to partially
|
||||||
|
restore the CPU state */
|
||||||
|
|
||||||
|
current_tb_modified = 1;
|
||||||
|
cpu_restore_state(current_tb, env,
|
||||||
|
env->mem_write_pc, NULL);
|
||||||
|
#if defined(TARGET_I386)
|
||||||
|
current_flags = env->hflags;
|
||||||
|
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
|
||||||
|
current_cs_base = (target_ulong)env->segs[R_CS].base;
|
||||||
|
current_pc = current_cs_base + env->eip;
|
||||||
|
#else
|
||||||
|
#error unsupported CPU
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||||
tb_phys_invalidate(tb, -1);
|
tb_phys_invalidate(tb, -1);
|
||||||
}
|
}
|
||||||
tb = tb_next;
|
tb = tb_next;
|
||||||
|
@ -581,13 +658,25 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
||||||
/* if no code remaining, no need to continue to use slow writes */
|
/* if no code remaining, no need to continue to use slow writes */
|
||||||
if (!p->first_tb) {
|
if (!p->first_tb) {
|
||||||
invalidate_page_bitmap(p);
|
invalidate_page_bitmap(p);
|
||||||
tlb_unprotect_code_phys(cpu_single_env, start, vaddr);
|
if (is_cpu_write_access) {
|
||||||
|
tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
if (current_tb_modified) {
|
||||||
|
/* we generate a block containing just the instruction
|
||||||
|
modifying the memory. It will ensure that it cannot modify
|
||||||
|
itself */
|
||||||
|
tb_gen_code(env, current_pc, current_cs_base, current_flags,
|
||||||
|
CF_SINGLE_INSN);
|
||||||
|
cpu_resume_from_signal(env, NULL);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* len must be <= 8 and start must be a multiple of len */
|
/* len must be <= 8 and start must be a multiple of len */
|
||||||
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr)
|
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
|
||||||
{
|
{
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
int offset, b;
|
int offset, b;
|
||||||
|
@ -608,77 +697,75 @@ static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, tar
|
||||||
goto do_invalidate;
|
goto do_invalidate;
|
||||||
} else {
|
} else {
|
||||||
do_invalidate:
|
do_invalidate:
|
||||||
tb_invalidate_phys_page_range(start, start + len, vaddr);
|
tb_invalidate_phys_page_range(start, start + len, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* invalidate all TBs which intersect with the target virtual page
|
|
||||||
starting in range [start;end[. This function is usually used when
|
|
||||||
the target processor flushes its I-cache. NOTE: start and end must
|
|
||||||
refer to the same physical page */
|
|
||||||
void tb_invalidate_page_range(target_ulong start, target_ulong end)
|
|
||||||
{
|
|
||||||
int n;
|
|
||||||
PageDesc *p;
|
|
||||||
TranslationBlock *tb, *tb_next;
|
|
||||||
target_ulong pc;
|
|
||||||
target_ulong phys_start;
|
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
{
|
|
||||||
VirtPageDesc *vp;
|
|
||||||
vp = virt_page_find(start >> TARGET_PAGE_BITS);
|
|
||||||
if (!vp)
|
|
||||||
return;
|
|
||||||
if (vp->valid_tag != virt_valid_tag)
|
|
||||||
return;
|
|
||||||
phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
phys_start = start;
|
|
||||||
#endif
|
|
||||||
p = page_find(phys_start >> TARGET_PAGE_BITS);
|
|
||||||
if (!p)
|
|
||||||
return;
|
|
||||||
/* we remove all the TBs in the range [start, end[ */
|
|
||||||
/* XXX: see if in some cases it could be faster to invalidate all the code */
|
|
||||||
tb = p->first_tb;
|
|
||||||
while (tb != NULL) {
|
|
||||||
n = (long)tb & 3;
|
|
||||||
tb = (TranslationBlock *)((long)tb & ~3);
|
|
||||||
tb_next = tb->page_next[n];
|
|
||||||
pc = tb->pc;
|
|
||||||
if (!((pc + tb->size) <= start || pc >= end)) {
|
|
||||||
tb_phys_invalidate(tb, -1);
|
|
||||||
}
|
|
||||||
tb = tb_next;
|
|
||||||
}
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
/* if no code remaining, no need to continue to use slow writes */
|
|
||||||
if (!p->first_tb)
|
|
||||||
tlb_unprotect_code(cpu_single_env, start);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(CONFIG_SOFTMMU)
|
#if !defined(CONFIG_SOFTMMU)
|
||||||
static void tb_invalidate_phys_page(target_ulong addr)
|
static void tb_invalidate_phys_page(target_ulong addr,
|
||||||
|
unsigned long pc, void *puc)
|
||||||
{
|
{
|
||||||
int n;
|
int n, current_flags, current_tb_modified;
|
||||||
|
target_ulong current_pc, current_cs_base;
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
TranslationBlock *tb;
|
TranslationBlock *tb, *current_tb;
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
CPUState *env = cpu_single_env;
|
||||||
|
#endif
|
||||||
|
|
||||||
addr &= TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||||
if (!p)
|
if (!p)
|
||||||
return;
|
return;
|
||||||
tb = p->first_tb;
|
tb = p->first_tb;
|
||||||
|
current_tb_modified = 0;
|
||||||
|
current_tb = NULL;
|
||||||
|
current_pc = 0; /* avoid warning */
|
||||||
|
current_cs_base = 0; /* avoid warning */
|
||||||
|
current_flags = 0; /* avoid warning */
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
if (tb && pc != 0) {
|
||||||
|
current_tb = tb_find_pc(pc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
while (tb != NULL) {
|
while (tb != NULL) {
|
||||||
n = (long)tb & 3;
|
n = (long)tb & 3;
|
||||||
tb = (TranslationBlock *)((long)tb & ~3);
|
tb = (TranslationBlock *)((long)tb & ~3);
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
if (current_tb == tb &&
|
||||||
|
!(current_tb->cflags & CF_SINGLE_INSN)) {
|
||||||
|
/* If we are modifying the current TB, we must stop
|
||||||
|
its execution. We could be more precise by checking
|
||||||
|
that the modification is after the current PC, but it
|
||||||
|
would require a specialized function to partially
|
||||||
|
restore the CPU state */
|
||||||
|
|
||||||
|
current_tb_modified = 1;
|
||||||
|
cpu_restore_state(current_tb, env, pc, puc);
|
||||||
|
#if defined(TARGET_I386)
|
||||||
|
current_flags = env->hflags;
|
||||||
|
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
|
||||||
|
current_cs_base = (target_ulong)env->segs[R_CS].base;
|
||||||
|
current_pc = current_cs_base + env->eip;
|
||||||
|
#else
|
||||||
|
#error unsupported CPU
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||||
tb_phys_invalidate(tb, addr);
|
tb_phys_invalidate(tb, addr);
|
||||||
tb = tb->page_next[n];
|
tb = tb->page_next[n];
|
||||||
}
|
}
|
||||||
p->first_tb = NULL;
|
p->first_tb = NULL;
|
||||||
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
|
if (current_tb_modified) {
|
||||||
|
/* we generate a block containing just the instruction
|
||||||
|
modifying the memory. It will ensure that it cannot modify
|
||||||
|
itself */
|
||||||
|
tb_gen_code(env, current_pc, current_cs_base, current_flags,
|
||||||
|
CF_SINGLE_INSN);
|
||||||
|
cpu_resume_from_signal(env, puc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -696,6 +783,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
|
||||||
p->first_tb = (TranslationBlock *)((long)tb | n);
|
p->first_tb = (TranslationBlock *)((long)tb | n);
|
||||||
invalidate_page_bitmap(p);
|
invalidate_page_bitmap(p);
|
||||||
|
|
||||||
|
#ifdef TARGET_HAS_SMC
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
if (p->flags & PAGE_WRITE) {
|
if (p->flags & PAGE_WRITE) {
|
||||||
unsigned long host_start, host_end, addr;
|
unsigned long host_start, host_end, addr;
|
||||||
|
@ -727,6 +816,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
|
||||||
tlb_protect_code(cpu_single_env, virt_addr);
|
tlb_protect_code(cpu_single_env, virt_addr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#endif /* TARGET_HAS_SMC */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate a new translation block. Flush the translation buffer if
|
/* Allocate a new translation block. Flush the translation buffer if
|
||||||
|
@ -910,6 +1001,14 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
|
||||||
tb_reset_jump_recursive2(tb, 1);
|
tb_reset_jump_recursive2(tb, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
||||||
|
{
|
||||||
|
target_ulong phys_addr;
|
||||||
|
|
||||||
|
phys_addr = cpu_get_phys_page_debug(env, pc);
|
||||||
|
tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
|
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
|
||||||
breakpoint is reached */
|
breakpoint is reached */
|
||||||
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
|
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
|
||||||
|
@ -925,7 +1024,8 @@ int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
|
||||||
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
|
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
|
||||||
return -1;
|
return -1;
|
||||||
env->breakpoints[env->nb_breakpoints++] = pc;
|
env->breakpoints[env->nb_breakpoints++] = pc;
|
||||||
tb_invalidate_page_range(pc, pc + 1);
|
|
||||||
|
breakpoint_invalidate(env, pc);
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -946,7 +1046,8 @@ int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
|
||||||
memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
|
memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
|
||||||
(env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
|
(env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
|
||||||
env->nb_breakpoints--;
|
env->nb_breakpoints--;
|
||||||
tb_invalidate_page_range(pc, pc + 1);
|
|
||||||
|
breakpoint_invalidate(env, pc);
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1197,27 +1298,6 @@ static void tlb_protect_code(CPUState *env, uint32_t addr)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
|
|
||||||
{
|
|
||||||
if (addr == (tlb_entry->address &
|
|
||||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
|
|
||||||
(tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
|
|
||||||
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* update the TLB so that writes in virtual page 'addr' are no longer
|
|
||||||
tested self modifying code */
|
|
||||||
static void tlb_unprotect_code(CPUState *env, uint32_t addr)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
addr &= TARGET_PAGE_MASK;
|
|
||||||
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
|
||||||
tlb_unprotect_code1(&env->tlb_write[0][i], addr);
|
|
||||||
tlb_unprotect_code1(&env->tlb_write[1][i], addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
|
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
|
||||||
uint32_t phys_addr)
|
uint32_t phys_addr)
|
||||||
{
|
{
|
||||||
|
@ -1387,12 +1467,18 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
|
||||||
/* ROM: access is ignored (same as unassigned) */
|
/* ROM: access is ignored (same as unassigned) */
|
||||||
env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
|
env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
|
||||||
env->tlb_write[is_user][index].addend = addend;
|
env->tlb_write[is_user][index].addend = addend;
|
||||||
} else if (first_tb) {
|
} else
|
||||||
|
/* XXX: the PowerPC code seems not ready to handle
|
||||||
|
self modifying code with DCBI */
|
||||||
|
#if defined(TARGET_HAS_SMC) || 1
|
||||||
|
if (first_tb) {
|
||||||
/* if code is present, we use a specific memory
|
/* if code is present, we use a specific memory
|
||||||
handler. It works only for physical memory access */
|
handler. It works only for physical memory access */
|
||||||
env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
|
env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
|
||||||
env->tlb_write[is_user][index].addend = addend;
|
env->tlb_write[is_user][index].addend = addend;
|
||||||
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
|
} else
|
||||||
|
#endif
|
||||||
|
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
|
||||||
!cpu_physical_memory_is_dirty(pd)) {
|
!cpu_physical_memory_is_dirty(pd)) {
|
||||||
env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
|
env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
|
||||||
env->tlb_write[is_user][index].addend = addend;
|
env->tlb_write[is_user][index].addend = addend;
|
||||||
|
@ -1420,7 +1506,9 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
|
||||||
} else {
|
} else {
|
||||||
if (prot & PROT_WRITE) {
|
if (prot & PROT_WRITE) {
|
||||||
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
|
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
|
||||||
|
#if defined(TARGET_HAS_SMC) || 1
|
||||||
first_tb ||
|
first_tb ||
|
||||||
|
#endif
|
||||||
((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
|
((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
|
||||||
!cpu_physical_memory_is_dirty(pd))) {
|
!cpu_physical_memory_is_dirty(pd))) {
|
||||||
/* ROM: we do as if code was inside */
|
/* ROM: we do as if code was inside */
|
||||||
|
@ -1450,7 +1538,7 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
|
||||||
|
|
||||||
/* called from signal handler: invalidate the code and unprotect the
|
/* called from signal handler: invalidate the code and unprotect the
|
||||||
page. Return TRUE if the fault was succesfully handled. */
|
page. Return TRUE if the fault was succesfully handled. */
|
||||||
int page_unprotect(unsigned long addr)
|
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
|
||||||
{
|
{
|
||||||
#if !defined(CONFIG_SOFTMMU)
|
#if !defined(CONFIG_SOFTMMU)
|
||||||
VirtPageDesc *vp;
|
VirtPageDesc *vp;
|
||||||
|
@ -1476,13 +1564,13 @@ int page_unprotect(unsigned long addr)
|
||||||
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
|
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
|
||||||
addr, vp->phys_addr, vp->prot);
|
addr, vp->phys_addr, vp->prot);
|
||||||
#endif
|
#endif
|
||||||
/* set the dirty bit */
|
|
||||||
phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
|
|
||||||
/* flush the code inside */
|
|
||||||
tb_invalidate_phys_page(vp->phys_addr);
|
|
||||||
if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
|
if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
|
||||||
cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
|
cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
|
||||||
(unsigned long)addr, vp->prot);
|
(unsigned long)addr, vp->prot);
|
||||||
|
/* set the dirty bit */
|
||||||
|
phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
|
||||||
|
/* flush the code inside */
|
||||||
|
tb_invalidate_phys_page(vp->phys_addr, pc, puc);
|
||||||
return 1;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1582,7 +1670,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags)
|
||||||
if (!(p->flags & PAGE_WRITE) &&
|
if (!(p->flags & PAGE_WRITE) &&
|
||||||
(flags & PAGE_WRITE) &&
|
(flags & PAGE_WRITE) &&
|
||||||
p->first_tb) {
|
p->first_tb) {
|
||||||
tb_invalidate_phys_page(addr);
|
tb_invalidate_phys_page(addr, 0, NULL);
|
||||||
}
|
}
|
||||||
p->flags = flags;
|
p->flags = flags;
|
||||||
}
|
}
|
||||||
|
@ -1591,7 +1679,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags)
|
||||||
|
|
||||||
/* called from signal handler: invalidate the code and unprotect the
|
/* called from signal handler: invalidate the code and unprotect the
|
||||||
page. Return TRUE if the fault was succesfully handled. */
|
page. Return TRUE if the fault was succesfully handled. */
|
||||||
int page_unprotect(unsigned long address)
|
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
|
||||||
{
|
{
|
||||||
unsigned int page_index, prot, pindex;
|
unsigned int page_index, prot, pindex;
|
||||||
PageDesc *p, *p1;
|
PageDesc *p, *p1;
|
||||||
|
@ -1619,7 +1707,7 @@ int page_unprotect(unsigned long address)
|
||||||
p1[pindex].flags |= PAGE_WRITE;
|
p1[pindex].flags |= PAGE_WRITE;
|
||||||
/* and since the content will be modified, we must invalidate
|
/* and since the content will be modified, we must invalidate
|
||||||
the corresponding translated code. */
|
the corresponding translated code. */
|
||||||
tb_invalidate_phys_page(address);
|
tb_invalidate_phys_page(address, pc, puc);
|
||||||
#ifdef DEBUG_TB_CHECK
|
#ifdef DEBUG_TB_CHECK
|
||||||
tb_invalidate_check(address);
|
tb_invalidate_check(address);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1639,14 +1727,13 @@ void page_unprotect_range(uint8_t *data, unsigned long data_size)
|
||||||
start &= TARGET_PAGE_MASK;
|
start &= TARGET_PAGE_MASK;
|
||||||
end = TARGET_PAGE_ALIGN(end);
|
end = TARGET_PAGE_ALIGN(end);
|
||||||
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
||||||
page_unprotect(addr);
|
page_unprotect(addr, 0, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
|
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* defined(CONFIG_USER_ONLY) */
|
#endif /* defined(CONFIG_USER_ONLY) */
|
||||||
|
|
||||||
/* register physical memory. 'size' must be a multiple of the target
|
/* register physical memory. 'size' must be a multiple of the target
|
||||||
|
@ -1672,7 +1759,7 @@ static uint32_t unassigned_mem_readb(uint32_t addr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1691,37 +1778,37 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
|
||||||
/* self modifying code support in soft mmu mode : writing to a page
|
/* self modifying code support in soft mmu mode : writing to a page
|
||||||
containing code comes to these functions */
|
containing code comes to these functions */
|
||||||
|
|
||||||
static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void code_mem_writeb(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
unsigned long phys_addr;
|
unsigned long phys_addr;
|
||||||
|
|
||||||
phys_addr = addr - (long)phys_ram_base;
|
phys_addr = addr - (long)phys_ram_base;
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
tb_invalidate_phys_page_fast(phys_addr, 1, vaddr);
|
tb_invalidate_phys_page_fast(phys_addr, 1);
|
||||||
#endif
|
#endif
|
||||||
stb_raw((uint8_t *)addr, val);
|
stb_raw((uint8_t *)addr, val);
|
||||||
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void code_mem_writew(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
unsigned long phys_addr;
|
unsigned long phys_addr;
|
||||||
|
|
||||||
phys_addr = addr - (long)phys_ram_base;
|
phys_addr = addr - (long)phys_ram_base;
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
tb_invalidate_phys_page_fast(phys_addr, 2, vaddr);
|
tb_invalidate_phys_page_fast(phys_addr, 2);
|
||||||
#endif
|
#endif
|
||||||
stw_raw((uint8_t *)addr, val);
|
stw_raw((uint8_t *)addr, val);
|
||||||
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void code_mem_writel(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
unsigned long phys_addr;
|
unsigned long phys_addr;
|
||||||
|
|
||||||
phys_addr = addr - (long)phys_ram_base;
|
phys_addr = addr - (long)phys_ram_base;
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
tb_invalidate_phys_page_fast(phys_addr, 4, vaddr);
|
tb_invalidate_phys_page_fast(phys_addr, 4);
|
||||||
#endif
|
#endif
|
||||||
stl_raw((uint8_t *)addr, val);
|
stl_raw((uint8_t *)addr, val);
|
||||||
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
||||||
|
@ -1739,22 +1826,22 @@ static CPUWriteMemoryFunc *code_mem_write[3] = {
|
||||||
code_mem_writel,
|
code_mem_writel,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void notdirty_mem_writeb(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
stb_raw((uint8_t *)addr, val);
|
stb_raw((uint8_t *)addr, val);
|
||||||
tlb_set_dirty(addr, vaddr);
|
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void notdirty_mem_writew(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
stw_raw((uint8_t *)addr, val);
|
stw_raw((uint8_t *)addr, val);
|
||||||
tlb_set_dirty(addr, vaddr);
|
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
|
static void notdirty_mem_writel(uint32_t addr, uint32_t val)
|
||||||
{
|
{
|
||||||
stl_raw((uint8_t *)addr, val);
|
stl_raw((uint8_t *)addr, val);
|
||||||
tlb_set_dirty(addr, vaddr);
|
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
|
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
|
||||||
|
@ -1861,17 +1948,17 @@ void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
|
||||||
if (l >= 4 && ((addr & 3) == 0)) {
|
if (l >= 4 && ((addr & 3) == 0)) {
|
||||||
/* 32 bit read access */
|
/* 32 bit read access */
|
||||||
val = ldl_raw(buf);
|
val = ldl_raw(buf);
|
||||||
io_mem_write[io_index][2](addr, val, 0);
|
io_mem_write[io_index][2](addr, val);
|
||||||
l = 4;
|
l = 4;
|
||||||
} else if (l >= 2 && ((addr & 1) == 0)) {
|
} else if (l >= 2 && ((addr & 1) == 0)) {
|
||||||
/* 16 bit read access */
|
/* 16 bit read access */
|
||||||
val = lduw_raw(buf);
|
val = lduw_raw(buf);
|
||||||
io_mem_write[io_index][1](addr, val, 0);
|
io_mem_write[io_index][1](addr, val);
|
||||||
l = 2;
|
l = 2;
|
||||||
} else {
|
} else {
|
||||||
/* 8 bit access */
|
/* 8 bit access */
|
||||||
val = ldub_raw(buf);
|
val = ldub_raw(buf);
|
||||||
io_mem_write[io_index][0](addr, val, 0);
|
io_mem_write[io_index][0](addr, val);
|
||||||
l = 1;
|
l = 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -70,20 +70,23 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
|
||||||
|
|
||||||
static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
|
static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
|
||||||
DATA_TYPE val,
|
DATA_TYPE val,
|
||||||
unsigned long tlb_addr)
|
unsigned long tlb_addr,
|
||||||
|
void *retaddr)
|
||||||
{
|
{
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
||||||
|
env->mem_write_vaddr = tlb_addr;
|
||||||
|
env->mem_write_pc = (unsigned long)retaddr;
|
||||||
#if SHIFT <= 2
|
#if SHIFT <= 2
|
||||||
io_mem_write[index][SHIFT](physaddr, val, tlb_addr);
|
io_mem_write[index][SHIFT](physaddr, val);
|
||||||
#else
|
#else
|
||||||
#ifdef TARGET_WORDS_BIGENDIAN
|
#ifdef TARGET_WORDS_BIGENDIAN
|
||||||
io_mem_write[index][2](physaddr, val >> 32, tlb_addr);
|
io_mem_write[index][2](physaddr, val >> 32);
|
||||||
io_mem_write[index][2](physaddr + 4, val, tlb_addr);
|
io_mem_write[index][2](physaddr + 4, val);
|
||||||
#else
|
#else
|
||||||
io_mem_write[index][2](physaddr, val, tlb_addr);
|
io_mem_write[index][2](physaddr, val);
|
||||||
io_mem_write[index][2](physaddr + 4, val >> 32, tlb_addr);
|
io_mem_write[index][2](physaddr + 4, val >> 32);
|
||||||
#endif
|
#endif
|
||||||
#endif /* SHIFT > 2 */
|
#endif /* SHIFT > 2 */
|
||||||
}
|
}
|
||||||
|
@ -193,7 +196,8 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr,
|
||||||
/* IO access */
|
/* IO access */
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||||
goto do_unaligned_access;
|
goto do_unaligned_access;
|
||||||
glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
|
retaddr = GETPC();
|
||||||
|
glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
|
||||||
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
|
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
retaddr = GETPC();
|
retaddr = GETPC();
|
||||||
|
@ -229,7 +233,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr,
|
||||||
/* IO access */
|
/* IO access */
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||||
goto do_unaligned_access;
|
goto do_unaligned_access;
|
||||||
glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
|
glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
|
||||||
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
|
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
/* XXX: not efficient, but simple */
|
/* XXX: not efficient, but simple */
|
||||||
|
|
|
@ -43,6 +43,13 @@ typedef struct CPUARMState {
|
||||||
struct TranslationBlock *current_tb;
|
struct TranslationBlock *current_tb;
|
||||||
int user_mode_only;
|
int user_mode_only;
|
||||||
|
|
||||||
|
/* in order to avoid passing too many arguments to the memory
|
||||||
|
write helpers, we store some rarely used information in the CPU
|
||||||
|
context) */
|
||||||
|
unsigned long mem_write_pc; /* host pc at which the memory was
|
||||||
|
written */
|
||||||
|
unsigned long mem_write_vaddr; /* target virtual addr at which the
|
||||||
|
memory was written */
|
||||||
/* user data */
|
/* user data */
|
||||||
void *opaque;
|
void *opaque;
|
||||||
} CPUARMState;
|
} CPUARMState;
|
||||||
|
|
|
@ -22,6 +22,12 @@
|
||||||
|
|
||||||
#define TARGET_LONG_BITS 32
|
#define TARGET_LONG_BITS 32
|
||||||
|
|
||||||
|
/* target supports implicit self modifying code */
|
||||||
|
#define TARGET_HAS_SMC
|
||||||
|
/* support for self modifying code even if the modified instruction is
|
||||||
|
close to the modifying instruction */
|
||||||
|
#define TARGET_HAS_PRECISE_SMC
|
||||||
|
|
||||||
#include "cpu-defs.h"
|
#include "cpu-defs.h"
|
||||||
|
|
||||||
#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
|
#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
|
||||||
|
@ -331,8 +337,16 @@ typedef struct CPUX86State {
|
||||||
int interrupt_request;
|
int interrupt_request;
|
||||||
int user_mode_only; /* user mode only simulation */
|
int user_mode_only; /* user mode only simulation */
|
||||||
|
|
||||||
/* soft mmu support */
|
|
||||||
uint32_t a20_mask;
|
uint32_t a20_mask;
|
||||||
|
|
||||||
|
/* soft mmu support */
|
||||||
|
/* in order to avoid passing too many arguments to the memory
|
||||||
|
write helpers, we store some rarely used information in the CPU
|
||||||
|
context) */
|
||||||
|
unsigned long mem_write_pc; /* host pc at which the memory was
|
||||||
|
written */
|
||||||
|
unsigned long mem_write_vaddr; /* target virtual addr at which the
|
||||||
|
memory was written */
|
||||||
/* 0 = kernel, 1 = user */
|
/* 0 = kernel, 1 = user */
|
||||||
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
|
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
|
||||||
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
|
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
|
||||||
|
@ -358,7 +372,7 @@ int cpu_x86_inl(CPUX86State *env, int addr);
|
||||||
CPUX86State *cpu_x86_init(void);
|
CPUX86State *cpu_x86_init(void);
|
||||||
int cpu_x86_exec(CPUX86State *s);
|
int cpu_x86_exec(CPUX86State *s);
|
||||||
void cpu_x86_close(CPUX86State *s);
|
void cpu_x86_close(CPUX86State *s);
|
||||||
int cpu_x86_get_pic_interrupt(CPUX86State *s);
|
int cpu_get_pic_interrupt(CPUX86State *s);
|
||||||
|
|
||||||
/* this function must always be used to load data in the segment
|
/* this function must always be used to load data in the segment
|
||||||
cache: it synchronizes the hflags with the segment cache values */
|
cache: it synchronizes the hflags with the segment cache values */
|
||||||
|
|
|
@ -1189,6 +1189,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
||||||
return -1;
|
return -1;
|
||||||
if (!(flags & HF_SS32_MASK))
|
if (!(flags & HF_SS32_MASK))
|
||||||
return -1;
|
return -1;
|
||||||
|
if (tb->cflags & CF_SINGLE_INSN)
|
||||||
|
return -1;
|
||||||
gen_code_end = gen_code_ptr +
|
gen_code_end = gen_code_ptr +
|
||||||
GEN_CODE_MAX_SIZE - GEN_CODE_MAX_INSN_SIZE;
|
GEN_CODE_MAX_SIZE - GEN_CODE_MAX_INSN_SIZE;
|
||||||
dc->gen_code_ptr = gen_code_ptr;
|
dc->gen_code_ptr = gen_code_ptr;
|
||||||
|
|
|
@ -4491,7 +4491,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
||||||
DisasContext dc1, *dc = &dc1;
|
DisasContext dc1, *dc = &dc1;
|
||||||
uint8_t *pc_ptr;
|
uint8_t *pc_ptr;
|
||||||
uint16_t *gen_opc_end;
|
uint16_t *gen_opc_end;
|
||||||
int flags, j, lj;
|
int flags, j, lj, cflags;
|
||||||
uint8_t *pc_start;
|
uint8_t *pc_start;
|
||||||
uint8_t *cs_base;
|
uint8_t *cs_base;
|
||||||
|
|
||||||
|
@ -4499,6 +4499,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
||||||
pc_start = (uint8_t *)tb->pc;
|
pc_start = (uint8_t *)tb->pc;
|
||||||
cs_base = (uint8_t *)tb->cs_base;
|
cs_base = (uint8_t *)tb->cs_base;
|
||||||
flags = tb->flags;
|
flags = tb->flags;
|
||||||
|
cflags = tb->cflags;
|
||||||
|
|
||||||
dc->pe = (flags >> HF_PE_SHIFT) & 1;
|
dc->pe = (flags >> HF_PE_SHIFT) & 1;
|
||||||
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
||||||
|
@ -4573,7 +4574,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
||||||
the flag and abort the translation to give the irqs a
|
the flag and abort the translation to give the irqs a
|
||||||
change to be happen */
|
change to be happen */
|
||||||
if (dc->tf || dc->singlestep_enabled ||
|
if (dc->tf || dc->singlestep_enabled ||
|
||||||
(flags & HF_INHIBIT_IRQ_MASK)) {
|
(flags & HF_INHIBIT_IRQ_MASK) ||
|
||||||
|
(cflags & CF_SINGLE_INSN)) {
|
||||||
gen_op_jmp_im(pc_ptr - dc->cs_base);
|
gen_op_jmp_im(pc_ptr - dc->cs_base);
|
||||||
gen_eob(dc);
|
gen_eob(dc);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -164,6 +164,13 @@ typedef struct CPUPPCState {
|
||||||
int user_mode_only; /* user mode only simulation */
|
int user_mode_only; /* user mode only simulation */
|
||||||
struct TranslationBlock *current_tb; /* currently executing TB */
|
struct TranslationBlock *current_tb; /* currently executing TB */
|
||||||
/* soft mmu support */
|
/* soft mmu support */
|
||||||
|
/* in order to avoid passing too many arguments to the memory
|
||||||
|
write helpers, we store some rarely used information in the CPU
|
||||||
|
context) */
|
||||||
|
unsigned long mem_write_pc; /* host pc at which the memory was
|
||||||
|
written */
|
||||||
|
unsigned long mem_write_vaddr; /* target virtual addr at which the
|
||||||
|
memory was written */
|
||||||
/* 0 = kernel, 1 = user (may have 2 = kernel code, 3 = user code ?) */
|
/* 0 = kernel, 1 = user (may have 2 = kernel code, 3 = user code ?) */
|
||||||
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
|
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
|
||||||
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
|
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
|
||||||
|
|
|
@ -43,6 +43,14 @@ typedef struct CPUSPARCState {
|
||||||
void *opaque;
|
void *opaque;
|
||||||
/* NOTE: we allow 8 more registers to handle wrapping */
|
/* NOTE: we allow 8 more registers to handle wrapping */
|
||||||
uint32_t regbase[NWINDOWS * 16 + 8];
|
uint32_t regbase[NWINDOWS * 16 + 8];
|
||||||
|
|
||||||
|
/* in order to avoid passing too many arguments to the memory
|
||||||
|
write helpers, we store some rarely used information in the CPU
|
||||||
|
context) */
|
||||||
|
unsigned long mem_write_pc; /* host pc at which the memory was
|
||||||
|
written */
|
||||||
|
unsigned long mem_write_vaddr; /* target virtual addr at which the
|
||||||
|
memory was written */
|
||||||
} CPUSPARCState;
|
} CPUSPARCState;
|
||||||
|
|
||||||
CPUSPARCState *cpu_sparc_init(void);
|
CPUSPARCState *cpu_sparc_init(void);
|
||||||
|
|
Loading…
Reference in New Issue