Respect PROT_EXEC in user-only mode.

Fix s390x, i386 and riscv for translations crossing a page.
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmMW8TcdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8qfwf9EYjXywES/UYzfeJC
 7irryE3iYddWP+ix3Q4WKaTc61plwP5MMCmeq4PjRo1IBAL5dTtUE1+AFXkEvm4L
 EckSiT5D5d/wYOfhWSWxjblmMk7GUXRRgKzkF1ir3soIftQgXdb43PwAswuOca/v
 dX7wXBJOoWmGWqXNNlQmGIl7c4uQTkOM6iTTLlm4Qg7SJC4MA6EiSZmXlvAs80lN
 TCbBV5P89qseHwzhJUTMZEO+ZMAuTSjFSd/RqBexVa4ty5UJxxgBk21A8JtQPUhr
 Y/Ezb0yhOcwrdjJ8REc267BZbdNgbaVNlUd7c9GKbv8bQUh0AoM9gnjGdoID88x9
 q0f+Pw==
 =HmJB
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20220906' of https://gitlab.com/rth7680/qemu into staging

Respect PROT_EXEC in user-only mode.
Fix s390x, i386 and riscv for translations crossing a page.

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmMW8TcdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8qfwf9EYjXywES/UYzfeJC
# 7irryE3iYddWP+ix3Q4WKaTc61plwP5MMCmeq4PjRo1IBAL5dTtUE1+AFXkEvm4L
# EckSiT5D5d/wYOfhWSWxjblmMk7GUXRRgKzkF1ir3soIftQgXdb43PwAswuOca/v
# dX7wXBJOoWmGWqXNNlQmGIl7c4uQTkOM6iTTLlm4Qg7SJC4MA6EiSZmXlvAs80lN
# TCbBV5P89qseHwzhJUTMZEO+ZMAuTSjFSd/RqBexVa4ty5UJxxgBk21A8JtQPUhr
# Y/Ezb0yhOcwrdjJ8REc267BZbdNgbaVNlUd7c9GKbv8bQUh0AoM9gnjGdoID88x9
# q0f+Pw==
# =HmJB
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 06 Sep 2022 03:05:27 EDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20220906' of https://gitlab.com/rth7680/qemu:
  target/riscv: Make translator stop before the end of a page
  target/riscv: Add MAX_INSN_LEN and insn_len
  target/i386: Make translator stop before the end of a page
  target/s390x: Make translator stop before the end of a page
  accel/tcg: Add fast path for translator_ld*
  accel/tcg: Add pc and host_pc params to gen_intermediate_code
  accel/tcg: Remove translator_ldsw
  accel/tcg: Document the faulting lookup in tb_lookup_cmp
  accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
  accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
  accel/tcg: Make tb_htable_lookup static
  accel/tcg: Unlock mmap_lock after longjmp
  accel/tcg: Properly implement get_page_addr_code for user-only
  accel/tcg: Introduce is_same_page()
  tests/tcg/i386: Move smc_code2 to an executable section
  linux-user: Clear translations on mprotect()
  linux-user: Honor PT_GNU_STACK
  linux-user/x86_64: Allocate vsyscall page as a commpage
  linux-user/hppa: Allocate page zero as a commpage
  linux-user/arm: Mark the commpage executable

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-09-06 07:31:43 -04:00
commit b34b42f1b6
43 changed files with 966 additions and 367 deletions

View File

@ -170,6 +170,76 @@ uint32_t curr_cflags(CPUState *cpu)
return cflags;
}
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
} else {
tb_page_addr_t phys_page2;
target_ulong virt_page2;
/*
* We know that the first page matched, and an otherwise valid TB
* encountered an incomplete instruction at the end of that page,
* therefore we know that generating a new TB from the current PC
* must also require reading from the next page -- even if the
* second pages do not match, and therefore the resulting insn
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
phys_page2 = get_page_addr_code(desc->env, virt_page2);
if (tb->page_addr[1] == phys_page2) {
return true;
}
}
}
return false;
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
desc.env = cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
@ -462,13 +532,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
/*
* The mmap_lock is dropped by tb_gen_code if it runs out of
* memory.
*/
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock());
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
@ -487,67 +555,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
end_exclusive();
}
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
} else {
tb_page_addr_t phys_page2;
target_ulong virt_page2;
virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
phys_page2 = get_page_addr_code(desc->env, virt_page2);
if (tb->page_addr[1] == phys_page2) {
return true;
}
}
}
return false;
}
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
desc.env = cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
{
if (TCG_TARGET_HAS_direct_jump) {
@ -936,7 +943,9 @@ int cpu_exec(CPUState *cpu)
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock());
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();

View File

@ -1283,18 +1283,6 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
prot, mmu_idx, size);
}
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
ram_addr_t ram_addr;
ram_addr = qemu_ram_addr_from_host(ptr);
if (ram_addr == RAM_ADDR_INVALID) {
error_report("Bad ram pointer %p", ptr);
abort();
}
return ram_addr;
}
/*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
@ -1494,61 +1482,6 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
(ADDR) & TARGET_PAGE_MASK)
/*
* Return a ram_addr_t for the virtual address for execution.
*
* Return -1 if we can't translate and execute from an entire page
* of RAM. This will force us to execute by loading and translating
* one insn at a time, without caching.
*
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
uintptr_t mmu_idx = cpu_mmu_index(env, true);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
void *p;
if (unlikely(!tlb_hit(entry->addr_code, addr))) {
if (!VICTIM_TLB_HIT(addr_code, addr)) {
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
/*
* The MMU protection covers a smaller range than a target
* page, so we must redo the MMU check for every insn.
*/
return -1;
}
}
assert(tlb_hit(entry->addr_code, addr));
}
if (unlikely(entry->addr_code & TLB_MMIO)) {
/* The region is not backed by RAM. */
if (hostp) {
*hostp = NULL;
}
return -1;
}
p = (void *)((uintptr_t)addr + entry->addend);
if (hostp) {
*hostp = p;
}
return qemu_ram_addr_from_host_nofail(p);
}
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
return get_page_addr_code_hostp(env, addr, NULL);
}
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
{
@ -1704,6 +1637,32 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
return flags ? NULL : host;
}
/*
* Return a ram_addr_t for the virtual address for execution.
*
* Return -1 if we can't translate and execute from an entire page
* of RAM. This will force us to execute by loading and translating
* one insn at a time, without caching.
*
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
void *p;
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), false, &p, 0);
if (p == NULL) {
return -1;
}
if (hostp) {
*hostp = p;
}
return qemu_ram_addr_from_host_nofail(p);
}
#ifdef CONFIG_PLUGIN
/*
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.

View File

@ -46,6 +46,7 @@
#include "exec/cputlb.h"
#include "exec/translate-all.h"
#include "exec/translator.h"
#include "qemu/bitmap.h"
#include "qemu/qemu-print.h"
#include "qemu/timer.h"
@ -1384,19 +1385,19 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti;
#endif
void *host_pc;
assert_memory_lock();
qemu_thread_jit_write();
phys_pc = get_page_addr_code(env, pc);
phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
@ -1427,6 +1428,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->flags = flags;
tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb->page_addr[0] = phys_pc;
tb->page_addr[1] = -1;
tcg_ctx->tb_cflags = cflags;
tb_overflow:
@ -1444,7 +1447,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(cpu, tb, max_insns);
gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
max_insns = tb->icount;
@ -1620,13 +1623,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
/*
* If the TB is not associated with a physical RAM page then
* it must be a temporary one-insn TB, and we have nothing to do
* except fill in the page_addr[] fields. Return early before
* attempting to link to other TBs or add to the lookup table.
* If the TB is not associated with a physical RAM page then it must be
* a temporary one-insn TB, and we have nothing left to do. Return early
* before attempting to link to other TBs or add to the lookup table.
*/
if (phys_pc == -1) {
tb->page_addr[0] = tb->page_addr[1] = -1;
if (tb->page_addr[0] == -1) {
return tb;
}
@ -1637,17 +1638,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/
tcg_tb_insert(tb);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
/*
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
existing_tb = tb_link_page(tb, phys_pc, phys_page2);
existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
/* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;

View File

@ -42,30 +42,27 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
static inline void translator_page_protect(DisasContextBase *dcbase,
target_ulong pc)
{
#ifdef CONFIG_USER_ONLY
dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
page_protect(pc);
#endif
}
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db)
{
uint32_t cflags = tb_cflags(tb);
bool plugin_enabled;
/* Initialize DisasContext */
db->tb = tb;
db->pc_first = tb->pc;
db->pc_next = db->pc_first;
db->pc_first = pc;
db->pc_next = pc;
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
translator_page_protect(db, db->pc_next);
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -150,31 +147,103 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
#endif
}
static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
static void *translator_access(CPUArchState *env, DisasContextBase *db,
target_ulong pc, size_t len)
{
void *host;
target_ulong base, end;
TranslationBlock *tb;
tb = db->tb;
/* Use slow path if first page is MMIO. */
if (unlikely(tb->page_addr[0] == -1)) {
return NULL;
}
end = pc + len - 1;
if (likely(is_same_page(db, end))) {
host = db->host_addr[0];
base = db->pc_first;
} else {
host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) {
tb->page_addr[1] =
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
#ifdef CONFIG_USER_ONLY
target_ulong end = pc + len - 1;
if (end > dcbase->page_protect_end) {
translator_page_protect(dcbase, end);
}
page_protect(end);
#endif
/* We cannot handle MMIO as second page. */
assert(tb->page_addr[1] != -1);
host = db->host_addr[1];
}
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
abi_ptr pc, bool do_swap) \
{ \
translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
type ret = load_fn(env, pc); \
if (do_swap) { \
ret = swap_fn(ret); \
} \
plugin_insn_append(pc, &ret, sizeof(ret)); \
return ret; \
/* Use slow path when crossing pages. */
if (is_same_page(db, pc)) {
return NULL;
}
}
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
tcg_debug_assert(pc >= base);
return host + (pc - base);
}
#undef GEN_TRANSLATOR_LD
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint8_t ret;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return ldub_p(p);
}
ret = cpu_ldub_code(env, pc);
plugin_insn_append(pc, &ret, sizeof(ret));
return ret;
}
uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint16_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return lduw_p(p);
}
ret = cpu_lduw_code(env, pc);
plug = tswap16(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint32_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return ldl_p(p);
}
ret = cpu_ldl_code(env, pc);
plug = tswap32(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint64_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return ldq_p(p);
}
ret = cpu_ldq_code(env, pc);
plug = tswap64(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}

View File

@ -80,10 +80,7 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* (and if the translator doesn't handle page boundaries correctly
* there's little we can do about that here). Therefore, do not
* trigger the unwinder.
*
* Like tb_gen_code, release the memory lock before cpu_loop_exit.
*/
mmap_unlock();
*pc = 0;
return MMU_INST_FETCH;
}
@ -199,6 +196,20 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
int flags;
flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
g_assert(flags == 0);
if (hostp) {
*hostp = g2h_untagged(addr);
}
return addr;
}
/* The softmmu versions of these helpers are in cputlb.c. */
/*

View File

@ -31,6 +31,7 @@ typedef int64_t Elf64_Sxword;
#define PT_LOPROC 0x70000000
#define PT_HIPROC 0x7fffffff
#define PT_GNU_STACK (PT_LOOS + 0x474e551)
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
#define PT_MIPS_REGINFO 0x70000000

View File

@ -72,6 +72,7 @@ typedef uintptr_t ram_addr_t;
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
RAMBlock *qemu_ram_block_by_name(const char *name);
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
ram_addr_t *offset);

View File

@ -39,7 +39,6 @@ typedef ram_addr_t tb_page_addr_t;
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
#endif
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
target_ulong *data);
@ -552,9 +551,6 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
@ -598,43 +594,44 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
#endif
#if defined(CONFIG_USER_ONLY)
void mmap_lock(void);
void mmap_unlock(void);
bool have_mmap_lock(void);
/**
* get_page_addr_code() - user-mode version
* get_page_addr_code_hostp()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* Returns @addr.
* See get_page_addr_code() (full-system version) for documentation on the
* return value.
*
* Sets *@hostp (when @hostp is non-NULL) as follows.
* If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
* to the host address where @addr's content is kept.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp);
/**
* get_page_addr_code()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* If we cannot translate and execute from the entire RAM page, or if
* the region is not backed by RAM, returns -1. Otherwise, returns the
* ram_addr_t corresponding to the guest code at @addr.
*
* Note: this function can trigger an exception.
*/
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
target_ulong addr)
{
return addr;
return get_page_addr_code_hostp(env, addr, NULL);
}
/**
* get_page_addr_code_hostp() - user-mode version
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* Returns @addr.
*
* If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
* is kept.
*/
static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
target_ulong addr,
void **hostp)
{
if (hostp) {
*hostp = g2h_untagged(addr);
}
return addr;
}
#if defined(CONFIG_USER_ONLY)
void mmap_lock(void);
void mmap_unlock(void);
bool have_mmap_lock(void);
/**
* adjust_signal_pc:
@ -691,36 +688,6 @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {}
/**
* get_page_addr_code() - full-system version
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* If we cannot translate and execute from the entire RAM page, or if
* the region is not backed by RAM, returns -1. Otherwise, returns the
* ram_addr_t corresponding to the guest code at @addr.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
/**
* get_page_addr_code_hostp() - full-system version
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* See get_page_addr_code() (full-system version) for documentation on the
* return value.
*
* Sets *@hostp (when @hostp is non-NULL) as follows.
* If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
* to the host address where @addr's content is kept.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp);
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);

View File

@ -26,6 +26,19 @@
#include "exec/translate-all.h"
#include "tcg/tcg.h"
/**
* gen_intermediate_code
* @cpu: cpu context
* @tb: translation block
* @max_insns: max number of instructions to translate
* @pc: guest virtual program counter address
* @host_pc: host physical program counter address
*
* This function must be provided by the target, which should create
* the target-specific DisasContext, and then invoke translator_loop.
*/
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc);
/**
* DisasJumpType:
@ -68,24 +81,14 @@ typedef enum DisasJumpType {
* Architecture-agnostic disassembly context.
*/
typedef struct DisasContextBase {
const TranslationBlock *tb;
TranslationBlock *tb;
target_ulong pc_first;
target_ulong pc_next;
DisasJumpType is_jmp;
int num_insns;
int max_insns;
bool singlestep_enabled;
#ifdef CONFIG_USER_ONLY
/*
* Guest address of the last byte of the last protected page.
*
* Pages containing the translated instructions are made non-writable in
* order to achieve consistency in case another thread is modifying the
* code while translate_insn() fetches the instruction bytes piecemeal.
* Such writer threads are blocked on mmap_lock() in page_unprotect().
*/
target_ulong page_protect_end;
#endif
void *host_addr[2];
} DisasContextBase;
/**
@ -123,11 +126,13 @@ typedef struct TranslatorOps {
/**
* translator_loop:
* @ops: Target-specific operations.
* @db: Disassembly context.
* @cpu: Target vCPU.
* @tb: Translation block.
* @max_insns: Maximum number of insns to translate.
* @pc: guest virtual program counter address
* @host_pc: host physical program counter address
* @ops: Target-specific operations.
* @db: Disassembly context.
*
* Generic translator loop.
*
@ -141,8 +146,9 @@ typedef struct TranslatorOps {
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns);
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db);
void translator_loop_temp_check(DisasContextBase *db);
@ -167,24 +173,52 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
* the relevant information at translation time.
*/
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
abi_ptr pc, bool do_swap); \
static inline type fullname(CPUArchState *env, \
DisasContextBase *dcbase, abi_ptr pc) \
{ \
return fullname ## _swap(env, dcbase, pc, false); \
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
static inline uint16_t
translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
abi_ptr pc, bool do_swap)
{
uint16_t ret = translator_lduw(env, db, pc);
if (do_swap) {
ret = bswap16(ret);
}
return ret;
}
#define FOR_EACH_TRANSLATOR_LD(F) \
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
static inline uint32_t
translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
abi_ptr pc, bool do_swap)
{
uint32_t ret = translator_ldl(env, db, pc);
if (do_swap) {
ret = bswap32(ret);
}
return ret;
}
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
static inline uint64_t
translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
abi_ptr pc, bool do_swap)
{
uint64_t ret = translator_ldq(env, db, pc);
if (do_swap) {
ret = bswap64(ret);
}
return ret;
}
#undef GEN_TRANSLATOR_LD
/*
* Return whether addr is on the same page as where disassembly started.
* Translators can use this to enforce the rule that only single-insn
* translation blocks are allowed to cross page boundaries.
*/
static inline bool is_same_page(const DisasContextBase *db, target_ulong addr)
{
return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
}
#endif /* EXEC__TRANSLATOR_H */

View File

@ -34,9 +34,9 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
} else {
/*
* We need to be able to map the commpage.
* See validate_guest_space in linux-user/elfload.c.
* See init_guest_commpage in linux-user/elfload.c.
*/
return 0xffff0000ul;
return 0xfffffffful;
}
}
#define MAX_RESERVED_VA arm_max_reserved_va

View File

@ -195,6 +195,27 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
}
#if ULONG_MAX > UINT32_MAX
#define INIT_GUEST_COMMPAGE
static bool init_guest_commpage(void)
{
/*
* The vsyscall page is at a high negative address aka kernel space,
* which means that we cannot actually allocate it with target_mmap.
* We still should be able to use page_set_flags, unless the user
* has specified -R reserved_va, which would trigger an assert().
*/
if (reserved_va != 0 &&
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
error_report("Cannot allocate vsyscall page");
exit(EXIT_FAILURE);
}
page_set_flags(TARGET_VSYSCALL_PAGE,
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
PAGE_EXEC | PAGE_VALID);
return true;
}
#endif
#else
#define ELF_START_MMAP 0x80000000
@ -211,6 +232,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
#define ELF_ARCH EM_386
#define ELF_PLATFORM get_elf_platform()
#define EXSTACK_DEFAULT true
static const char *get_elf_platform(void)
{
@ -287,6 +309,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
#define ELF_ARCH EM_ARM
#define ELF_CLASS ELFCLASS32
#define EXSTACK_DEFAULT true
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
@ -398,7 +421,8 @@ enum {
static bool init_guest_commpage(void)
{
void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
void *want = g2h_untagged(commpage);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
@ -417,6 +441,9 @@ static bool init_guest_commpage(void)
perror("Protecting guest commpage");
exit(EXIT_FAILURE);
}
page_set_flags(commpage, commpage + qemu_host_page_size,
PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
@ -751,6 +778,7 @@ static inline void init_thread(struct target_pt_regs *regs,
#else
#define ELF_CLASS ELFCLASS32
#define EXSTACK_DEFAULT true
#endif
@ -948,6 +976,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_LOONGARCH
#define EXSTACK_DEFAULT true
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
@ -1043,6 +1072,7 @@ static uint32_t get_elf_hwcap(void)
#define ELF_CLASS ELFCLASS32
#endif
#define ELF_ARCH EM_MIPS
#define EXSTACK_DEFAULT true
#ifdef TARGET_ABI_MIPSN32
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
@ -1642,6 +1672,34 @@ static inline void init_thread(struct target_pt_regs *regs,
regs->gr[31] = infop->entry;
}
#define LO_COMMPAGE 0
static bool init_guest_commpage(void)
{
void *want = g2h_untagged(LO_COMMPAGE);
void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (addr == MAP_FAILED) {
perror("Allocating guest commpage");
exit(EXIT_FAILURE);
}
if (addr != want) {
return false;
}
/*
* On Linux, page zero is normally marked execute only + gateway.
* Normal read or write is supposed to fail (thus PROT_NONE above),
* but specific offsets have kernel code mapped to raise permissions
* and implement syscalls. Here, simply mark the page executable.
* Special case the entry points during translation (see do_page_zero).
*/
page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
PAGE_EXEC | PAGE_VALID);
return true;
}
#endif /* TARGET_HPPA */
#ifdef TARGET_XTENSA
@ -1753,6 +1811,10 @@ static inline void init_thread(struct target_pt_regs *regs,
#define bswaptls(ptr) bswap32s(ptr)
#endif
#ifndef EXSTACK_DEFAULT
#define EXSTACK_DEFAULT false
#endif
#include "elf.h"
/* We must delay the following stanzas until after "elf.h". */
@ -2028,6 +2090,7 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
struct image_info *info)
{
abi_ulong size, error, guard;
int prot;
size = guest_stack_size;
if (size < STACK_LOWER_LIMIT) {
@ -2038,7 +2101,11 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
guard = qemu_real_host_page_size();
}
error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
prot = PROT_READ | PROT_WRITE;
if (info->exec_stack) {
prot |= PROT_EXEC;
}
error = target_mmap(0, size + guard, prot,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (error == -1) {
perror("mmap stack");
@ -2322,14 +2389,16 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
}
#if defined(HI_COMMPAGE)
#define LO_COMMPAGE 0
#define LO_COMMPAGE -1
#elif defined(LO_COMMPAGE)
#define HI_COMMPAGE 0
#else
#define HI_COMMPAGE 0
#define LO_COMMPAGE 0
#define LO_COMMPAGE -1
#ifndef INIT_GUEST_COMMPAGE
#define init_guest_commpage() true
#endif
#endif
static void pgb_fail_in_use(const char *image_name)
{
@ -2551,7 +2620,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
} else {
offset = -(HI_COMMPAGE & -align);
}
} else if (LO_COMMPAGE != 0) {
} else if (LO_COMMPAGE != -1) {
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
}
@ -2866,6 +2935,7 @@ static void load_elf_image(const char *image_name, int image_fd,
*/
loaddr = -1, hiaddr = 0;
info->alignment = 0;
info->exec_stack = EXSTACK_DEFAULT;
for (i = 0; i < ehdr->e_phnum; ++i) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
@ -2908,6 +2978,8 @@ static void load_elf_image(const char *image_name, int image_fd,
if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
goto exit_errmsg;
}
} else if (eppnt->p_type == PT_GNU_STACK) {
info->exec_stack = eppnt->p_flags & PF_X;
}
}

View File

@ -177,9 +177,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
goto error;
}
}
page_set_flags(start, start + len, page_flags);
mmap_unlock();
return 0;
tb_invalidate_phys_range(start, start + len);
ret = 0;
error:
mmap_unlock();
return ret;

View File

@ -48,6 +48,7 @@ struct image_info {
uint32_t elf_flags;
int personality;
abi_ulong alignment;
bool exec_stack;
/* Generic semihosting knows about these pointers. */
abi_ulong arg_strings; /* strings for argv */

View File

@ -2443,6 +2443,18 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
return block->offset + offset;
}
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
ram_addr_t ram_addr;
ram_addr = qemu_ram_addr_from_host(ptr);
if (ram_addr == RAM_ADDR_INVALID) {
error_report("Bad ram pointer %p", ptr);
abort();
}
return ram_addr;
}
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, void *buf, hwaddr len);
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,

View File

@ -3043,10 +3043,11 @@ static const TranslatorOps alpha_tr_ops = {
.disas_log = alpha_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
}
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,

View File

@ -9892,7 +9892,8 @@ static const TranslatorOps thumb_translator_ops = {
};
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = { };
const TranslatorOps *ops = &arm_translator_ops;
@ -9907,7 +9908,7 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
}
#endif
translator_loop(ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
}
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,

View File

@ -3049,10 +3049,11 @@ static const TranslatorOps avr_tr_ops = {
.disas_log = avr_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = { };
translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
}
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,

View File

@ -3286,10 +3286,11 @@ static const TranslatorOps cris_tr_ops = {
.disas_log = cris_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
}
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View File

@ -850,11 +850,13 @@ static const TranslatorOps hexagon_tr_ops = {
.disas_log = hexagon_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc,
&hexagon_tr_ops, &ctx.base);
}
#define NAME_LEN 64

View File

@ -4340,10 +4340,11 @@ static const TranslatorOps hppa_tr_ops = {
.disas_log = hppa_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
}
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,

View File

@ -130,6 +130,7 @@ typedef struct DisasContext {
TCGv_i64 tmp1_i64;
sigjmp_buf jmpbuf;
TCGOp *prev_insn_end;
} DisasContext;
/* The environment in which user-only runs is constrained. */
@ -2008,6 +2009,12 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
{
uint64_t pc = s->pc;
/* This is a subsequent insn that crosses a page boundary. */
if (s->base.num_insns > 1 &&
!is_same_page(&s->base, s->pc + num_bytes - 1)) {
siglongjmp(s->jmpbuf, 2);
}
s->pc += num_bytes;
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
/* If the instruction's 16th byte is on a different page than the 1st, a
@ -2033,7 +2040,7 @@ static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
{
return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
return translator_lduw(env, &s->base, advance_pc(env, s, 2));
}
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
@ -4669,6 +4676,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
target_ulong pc_start = s->base.pc_next;
bool orig_cc_op_dirty = s->cc_op_dirty;
CCOp orig_cc_op = s->cc_op;
s->pc_start = s->pc = pc_start;
s->override = -1;
@ -4681,9 +4690,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
if (sigsetjmp(s->jmpbuf, 0) != 0) {
switch (sigsetjmp(s->jmpbuf, 0)) {
case 0:
break;
case 1:
gen_exception_gpf(s);
return s->pc;
case 2:
/* Restore state that may affect the next instruction. */
s->cc_op_dirty = orig_cc_op_dirty;
s->cc_op = orig_cc_op;
s->base.num_insns--;
tcg_remove_ops_after(s->prev_insn_end);
s->base.is_jmp = DISAS_TOO_MANY;
return pc_start;
default:
g_assert_not_reached();
}
prefixes = 0;
@ -8745,6 +8767,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
dc->prev_insn_end = tcg_last_op();
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
}
@ -8765,31 +8788,22 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
#endif
pc_next = disas_insn(dc, cpu);
dc->base.pc_next = pc_next;
if (dc->base.is_jmp == DISAS_NEXT) {
if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
/* if single step mode, we generate only one instruction and
generate an exception */
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
the flag and abort the translation to give the irqs a
chance to happen */
dc->base.is_jmp = DISAS_TOO_MANY;
} else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
&& ((pc_next & TARGET_PAGE_MASK)
!= ((pc_next + TARGET_MAX_INSN_SIZE - 1)
& TARGET_PAGE_MASK)
|| (pc_next & ~TARGET_PAGE_MASK) == 0)) {
/* Do not cross the boundary of the pages in icount mode,
it can cause an exception. Do it only when boundary is
crossed by the first instruction in the block.
If current instruction already crossed the bound - it's ok,
because an exception hasn't stopped this code.
/*
* If single step mode, we generate only one instruction and
* generate an exception.
* If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
* the flag and abort the translation to give the irqs a
* chance to happen.
*/
dc->base.is_jmp = DISAS_TOO_MANY;
} else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
} else if (!is_same_page(&dc->base, pc_next)) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
dc->base.pc_next = pc_next;
}
}
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
@ -8821,11 +8835,12 @@ static const TranslatorOps i386_tr_ops = {
};
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
}
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,

View File

@ -241,11 +241,13 @@ static const TranslatorOps loongarch_tr_ops = {
.disas_log = loongarch_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc,
&loongarch_tr_ops, &ctx.base);
}
void loongarch_translate_init(void)

View File

@ -6361,10 +6361,11 @@ static const TranslatorOps m68k_tr_ops = {
.disas_log = m68k_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
}
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)

View File

@ -1849,10 +1849,11 @@ static const TranslatorOps mb_tr_ops = {
.disas_log = mb_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
}
void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View File

@ -16155,11 +16155,12 @@ static const TranslatorOps mips_tr_ops = {
.disas_log = mips_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base);
}
void mips_tcg_init(void)

View File

@ -1038,10 +1038,11 @@ static const TranslatorOps nios2_tr_ops = {
.disas_log = nios2_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
}
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View File

@ -1705,11 +1705,13 @@ static const TranslatorOps openrisc_tr_ops = {
.disas_log = openrisc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc,
&openrisc_tr_ops, &ctx.base);
}
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View File

@ -7719,11 +7719,12 @@ static const TranslatorOps ppc_tr_ops = {
.disas_log = ppc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
}
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,

View File

@ -1022,6 +1022,14 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* Include decoders for factored-out extensions */
#include "decode-XVentanaCondOps.c.inc"
/* The specification allows for longer insns, but not supported by qemu. */
#define MAX_INSN_LEN 4
static inline int insn_len(uint16_t first_word)
{
return (first_word & 3) == 3 ? 4 : 2;
}
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
{
/*
@ -1037,7 +1045,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
};
/* Check for compressed insn */
if (extract16(opcode, 0, 2) != 3) {
if (insn_len(opcode) == 2) {
if (!has_ext(ctx, RVC)) {
gen_exception_illegal(ctx);
} else {
@ -1146,12 +1154,21 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
ctx->nftemp = 0;
/* Only the first insn within a TB is allowed to cross a page boundary. */
if (ctx->base.is_jmp == DISAS_NEXT) {
target_ulong page_start;
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
if (!is_same_page(&ctx->base, ctx->base.pc_next)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
} else {
unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
int len = insn_len(next_insn);
if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
}
}
}
@ -1196,11 +1213,12 @@ static const TranslatorOps riscv_tr_ops = {
.disas_log = riscv_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
}
void riscv_translate_init(void)

View File

@ -2363,11 +2363,12 @@ static const TranslatorOps rx_tr_ops = {
.disas_log = rx_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
}
void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,

View File

@ -6609,6 +6609,14 @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
dc->insn_start = tcg_last_op();
}
static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
uint64_t pc)
{
uint64_t insn = ld_code2(env, s, pc);
return pc + get_ilen((insn >> 8) & 0xff);
}
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
CPUS390XState *env = cs->env_ptr;
@ -6616,10 +6624,9 @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
dc->base.is_jmp = translate_one(env, dc);
if (dc->base.is_jmp == DISAS_NEXT) {
uint64_t page_start;
page_start = dc->base.pc_first & TARGET_PAGE_MASK;
if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
if (!is_same_page(dcbase, dc->base.pc_next) ||
!is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next)) ||
dc->ex_value) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
}
@ -6676,11 +6683,12 @@ static const TranslatorOps s390x_tr_ops = {
.disas_log = s390x_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;
translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
}
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,

View File

@ -2368,11 +2368,12 @@ static const TranslatorOps sh4_tr_ops = {
.disas_log = sh4_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
}
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,

View File

@ -5917,11 +5917,12 @@ static const TranslatorOps sparc_tr_ops = {
.disas_log = sparc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = {};
translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
}
void sparc_tcg_init(void)

View File

@ -8878,10 +8878,12 @@ static const TranslatorOps tricore_tr_ops = {
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;
translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
translator_loop(cs, tb, max_insns, pc, host_pc,
&tricore_tr_ops, &ctx.base);
}
void

View File

@ -1279,10 +1279,12 @@ static const TranslatorOps xtensa_translator_ops = {
.disas_log = xtensa_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = {};
translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
translator_loop(cpu, tb, max_insns, pc, host_pc,
&xtensa_translator_ops, &dc.base);
}
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View File

@ -1998,7 +1998,7 @@ uint8_t code[] = {
0xc3, /* ret */
};
asm(".section \".data\"\n"
asm(".section \".data_x\",\"awx\"\n"
"smc_code2:\n"
"movl 4(%esp), %eax\n"
"movl %eax, smc_patch_addr2 + 1\n"

View File

@ -0,0 +1,139 @@
/*
* Common code for arch-specific MMU_INST_FETCH fault testing.
*/
#define _GNU_SOURCE
#include <assert.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
/* Forward declarations. */
static void *arch_mcontext_pc(const mcontext_t *ctx);
static int arch_mcontext_arg(const mcontext_t *ctx);
static void arch_flush(void *p, int len);
/* Testing infrastructure. */
struct noexec_test {
const char *name;
const char *test_code;
int test_len;
int page_ofs;
int entry_ofs;
int expected_si_ofs;
int expected_pc_ofs;
int expected_arg;
};
static void *page_base;
static int page_size;
static const struct noexec_test *current_noexec_test;
static void handle_err(const char *syscall)
{
printf("[ FAILED ] %s: %s\n", syscall, strerror(errno));
exit(EXIT_FAILURE);
}
static void handle_segv(int sig, siginfo_t *info, void *ucontext)
{
const struct noexec_test *test = current_noexec_test;
const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext;
void *expected_si;
void *expected_pc;
void *pc;
int arg;
if (test == NULL) {
printf("[ FAILED ] unexpected SEGV\n");
exit(EXIT_FAILURE);
}
current_noexec_test = NULL;
expected_si = page_base + test->expected_si_ofs;
if (info->si_addr != expected_si) {
printf("[ FAILED ] wrong si_addr (%p != %p)\n",
info->si_addr, expected_si);
exit(EXIT_FAILURE);
}
pc = arch_mcontext_pc(mc);
expected_pc = page_base + test->expected_pc_ofs;
if (pc != expected_pc) {
printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc);
exit(EXIT_FAILURE);
}
arg = arch_mcontext_arg(mc);
if (arg != test->expected_arg) {
printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg);
exit(EXIT_FAILURE);
}
if (mprotect(page_base, page_size,
PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
handle_err("mprotect");
}
}
static void test_noexec_1(const struct noexec_test *test)
{
void *start = page_base + test->page_ofs;
void (*fn)(int arg) = page_base + test->entry_ofs;
memcpy(start, test->test_code, test->test_len);
arch_flush(start, test->test_len);
/* Trigger TB creation in order to test invalidation. */
fn(0);
if (mprotect(page_base, page_size, PROT_NONE) < 0) {
handle_err("mprotect");
}
/* Trigger SEGV and check that handle_segv() ran. */
current_noexec_test = test;
fn(0);
assert(current_noexec_test == NULL);
}
static int test_noexec(struct noexec_test *tests, size_t n_tests)
{
struct sigaction act;
size_t i;
memset(&act, 0, sizeof(act));
act.sa_sigaction = handle_segv;
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &act, NULL) < 0) {
handle_err("sigaction");
}
page_size = getpagesize();
page_base = mmap(NULL, 2 * page_size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (page_base == MAP_FAILED) {
handle_err("mmap");
}
page_base += page_size;
for (i = 0; i < n_tests; i++) {
struct noexec_test *test = &tests[i];
printf("[ RUN ] %s\n", test->name);
test_noexec_1(test);
printf("[ OK ]\n");
}
printf("[ PASSED ]\n");
return EXIT_SUCCESS;
}

View File

@ -3,3 +3,4 @@
VPATH += $(SRC_PATH)/tests/tcg/riscv64
TESTS += test-div
TESTS += noexec

View File

@ -0,0 +1,79 @@
#include "../multiarch/noexec.c.inc"
static void *arch_mcontext_pc(const mcontext_t *ctx)
{
return (void *)ctx->__gregs[REG_PC];
}
static int arch_mcontext_arg(const mcontext_t *ctx)
{
return ctx->__gregs[REG_A0];
}
static void arch_flush(void *p, int len)
{
__builtin___clear_cache(p, p + len);
}
extern char noexec_1[];
extern char noexec_2[];
extern char noexec_end[];
asm(".option push\n"
".option norvc\n"
"noexec_1:\n"
" li a0,1\n" /* a0 is 0 on entry, set 1. */
"noexec_2:\n"
" li a0,2\n" /* a0 is 0/1; set 2. */
" ret\n"
"noexec_end:\n"
".option pop");
int main(void)
{
struct noexec_test noexec_tests[] = {
{
.name = "fallthrough",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = noexec_1 - noexec_2,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 1,
},
{
.name = "jump",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = 0,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 0,
},
{
.name = "fallthrough [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = noexec_1 - noexec_2 - 2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 1,
},
{
.name = "jump [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = -2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 0,
},
};
return test_noexec(noexec_tests,
sizeof(noexec_tests) / sizeof(noexec_tests[0]));
}

View File

@ -16,6 +16,7 @@ TESTS+=shift
TESTS+=trap
TESTS+=signals-s390x
TESTS+=branch-relative-long
TESTS+=noexec
Z14_TESTS=vfminmax
vfminmax: LDFLAGS+=-lm

106
tests/tcg/s390x/noexec.c Normal file
View File

@ -0,0 +1,106 @@
#include "../multiarch/noexec.c.inc"
static void *arch_mcontext_pc(const mcontext_t *ctx)
{
return (void *)ctx->psw.addr;
}
static int arch_mcontext_arg(const mcontext_t *ctx)
{
return ctx->gregs[2];
}
static void arch_flush(void *p, int len)
{
}
extern char noexec_1[];
extern char noexec_2[];
extern char noexec_end[];
asm("noexec_1:\n"
" lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */
"noexec_2:\n"
" lgfi %r2,2\n" /* %r2 is 0/1; set 2. */
" br %r14\n" /* return */
"noexec_end:");
extern char exrl_1[];
extern char exrl_2[];
extern char exrl_end[];
asm("exrl_1:\n"
" exrl %r0, exrl_2\n"
" br %r14\n"
"exrl_2:\n"
" lgfi %r2,2\n"
"exrl_end:");
int main(void)
{
struct noexec_test noexec_tests[] = {
{
.name = "fallthrough",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = noexec_1 - noexec_2,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 1,
},
{
.name = "jump",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = 0,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 0,
},
{
.name = "exrl",
.test_code = exrl_1,
.test_len = exrl_end - exrl_1,
.page_ofs = exrl_1 - exrl_2,
.entry_ofs = exrl_1 - exrl_2,
.expected_si_ofs = 0,
.expected_pc_ofs = exrl_1 - exrl_2,
.expected_arg = 0,
},
{
.name = "fallthrough [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = noexec_1 - noexec_2 - 2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 1,
},
{
.name = "jump [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = -2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 0,
},
{
.name = "exrl [cross]",
.test_code = exrl_1,
.test_len = exrl_end - exrl_1,
.page_ofs = exrl_1 - exrl_2 - 2,
.entry_ofs = exrl_1 - exrl_2 - 2,
.expected_si_ofs = 0,
.expected_pc_ofs = exrl_1 - exrl_2 - 2,
.expected_arg = 0,
},
};
return test_noexec(noexec_tests,
sizeof(noexec_tests) / sizeof(noexec_tests[0]));
}

View File

@ -10,6 +10,7 @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
X86_64_TESTS += vsyscall
X86_64_TESTS += noexec
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
else
TESTS=$(MULTIARCH_TESTS)
@ -23,5 +24,5 @@ test-x86_64: LDFLAGS+=-lm -lc
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
%: $(SRC_PATH)/tests/tcg/x86_64/%.c
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)

75
tests/tcg/x86_64/noexec.c Normal file
View File

@ -0,0 +1,75 @@
#include "../multiarch/noexec.c.inc"
static void *arch_mcontext_pc(const mcontext_t *ctx)
{
return (void *)ctx->gregs[REG_RIP];
}
int arch_mcontext_arg(const mcontext_t *ctx)
{
return ctx->gregs[REG_RDI];
}
static void arch_flush(void *p, int len)
{
}
extern char noexec_1[];
extern char noexec_2[];
extern char noexec_end[];
asm("noexec_1:\n"
" movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
"noexec_2:\n"
" movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
" ret\n"
"noexec_end:");
int main(void)
{
struct noexec_test noexec_tests[] = {
{
.name = "fallthrough",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = noexec_1 - noexec_2,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 1,
},
{
.name = "jump",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2,
.entry_ofs = 0,
.expected_si_ofs = 0,
.expected_pc_ofs = 0,
.expected_arg = 0,
},
{
.name = "fallthrough [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = noexec_1 - noexec_2 - 2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 1,
},
{
.name = "jump [cross]",
.test_code = noexec_1,
.test_len = noexec_end - noexec_1,
.page_ofs = noexec_1 - noexec_2 - 2,
.entry_ofs = -2,
.expected_si_ofs = 0,
.expected_pc_ofs = -2,
.expected_arg = 0,
},
};
return test_noexec(noexec_tests,
sizeof(noexec_tests) / sizeof(noexec_tests[0]));
}