exec: Support watching memory region accesses

This commit is contained in:
Matt Borgerson 2020-10-18 23:45:33 -07:00 committed by mborgerson
parent f8a632645c
commit 33728b060f
4 changed files with 181 additions and 13 deletions

View File

@ -909,6 +909,11 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
TARGET_PAGE_SIZE);
#ifdef XBOX
wp_flags |= mem_access_callback_address_matches(cpu,
iotlb & TARGET_PAGE_MASK,
TARGET_PAGE_SIZE);
#endif
index = tlb_index(env, mmu_idx, vaddr_page);
te = tlb_entry(env, mmu_idx, vaddr_page);
@ -1370,6 +1375,10 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
if (flags & TLB_WATCHPOINT) {
int wp_access = (access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ);
#ifdef XBOX
mem_check_access_callback_vaddr(env_cpu(env), addr, size, wp_access,
iotlbentry);
#endif
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, wp_access, retaddr);
}
@ -1603,6 +1612,11 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
#ifdef XBOX
mem_check_access_callback_vaddr(env_cpu(env), addr, size,
BP_MEM_READ, iotlbentry);
#endif
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr);
@ -2054,6 +2068,11 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
#ifdef XBOX
mem_check_access_callback_vaddr(env_cpu(env), addr, size,
BP_MEM_WRITE, iotlbentry);
#endif
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
@ -2109,19 +2128,26 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
* is already guaranteed to be filled, and that the second page
* cannot evict the first.
*/
page2 = (addr + size) & TARGET_PAGE_MASK;
size2 = (addr + size) & ~TARGET_PAGE_MASK;
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
if (!tlb_hit_page(tlb_addr2, page2)) {
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
}
// FIXME: Upstream patch for this
if ((addr & ~TARGET_PAGE_MASK) + size - 1 >= TARGET_PAGE_SIZE) {
page2 = (addr + size) & TARGET_PAGE_MASK;
size2 = (addr + size) & ~TARGET_PAGE_MASK;
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
if (!tlb_hit_page(tlb_addr2, page2)) {
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
}
tlb_addr2 = tlb_addr_write(entry2);
}
} else {
/* The access happens on a single page */
size2 = 0;
}
/*
@ -2129,11 +2155,19 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
* must happen before any store.
*/
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
#ifdef XBOX
mem_check_access_callback_vaddr(env_cpu(env), addr, size - size2,
BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].iotlb[index]);
#endif
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
BP_MEM_WRITE, retaddr);
}
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
if (size2 > 0 && unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
#ifdef XBOX
mem_check_access_callback_vaddr(env_cpu(env), page2, size2,
BP_MEM_WRITE, &env_tlb(env)->d[mmu_idx].iotlb[index2]);
#endif
cpu_check_watchpoint(env_cpu(env), page2, size2,
env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
BP_MEM_WRITE, retaddr);

96
exec.c
View File

@ -1092,6 +1092,90 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
}
}
#ifdef XBOX
static inline bool access_callback_address_matches(MemAccessCallback *cb,
hwaddr addr, hwaddr len)
{
hwaddr watch_end = cb->addr + cb->len - 1;
hwaddr access_end = addr + len - 1;
return !(addr > watch_end || cb->addr > access_end);
}
int mem_access_callback_address_matches(CPUState *cpu, hwaddr addr, hwaddr len)
{
int ret = 0;
MemAccessCallback *cb;
QTAILQ_FOREACH(cb, &cpu->mem_access_callbacks, entry) {
if (access_callback_address_matches(cb, addr, len)) {
ret |= BP_MEM_READ | BP_MEM_WRITE;
}
}
return ret;
}
int mem_access_callback_insert(CPUState *cpu, MemoryRegion *mr, hwaddr offset,
hwaddr len, MemAccessCallback **cb,
MemAccessCallbackFunc func, void *opaque)
{
assert(len > 0);
MemAccessCallback *cb_ = g_malloc(sizeof(*cb_));
cb_->mr = mr;
cb_->addr = memory_region_get_ram_addr(mr) + offset;
cb_->len = len;
cb_->func = func;
cb_->opaque = opaque;
QTAILQ_INSERT_TAIL(&cpu->mem_access_callbacks, cb_, entry);
if (cb) {
*cb = cb_;
}
// FIXME: flush only applicable pages
tlb_flush(cpu);
return 0;
}
void mem_access_callback_remove_by_ref(CPUState *cpu, MemAccessCallback *cb)
{
QTAILQ_REMOVE(&cpu->mem_access_callbacks, cb, entry);
g_free(cb);
// FIXME: flush only applicable pages
tlb_flush(cpu);
}
void mem_check_access_callback_vaddr(CPUState *cpu,
vaddr addr, vaddr len, int flags,
void *iotlbentry)
{
ram_addr_t ram_addr = (((CPUIOTLBEntry *)iotlbentry)->addr
& TARGET_PAGE_MASK) + addr;
mem_check_access_callback_ramaddr(cpu, ram_addr, len, flags);
}
void mem_check_access_callback_ramaddr(CPUState *cpu,
hwaddr ram_addr, vaddr len, int flags)
{
MemAccessCallback *cb;
QTAILQ_FOREACH(cb, &cpu->mem_access_callbacks, entry) {
if (access_callback_address_matches(cb, ram_addr, len)) {
ram_addr_t ram_addr_base = memory_region_get_ram_addr(cb->mr);
assert(ram_addr_base != RAM_ADDR_INVALID);
ram_addr_t hit_addr = MAX(ram_addr, cb->addr);
hwaddr mr_offset = hit_addr - ram_addr_base;
bool is_write = (flags & BP_MEM_WRITE) != 0;
cb->func(cb->opaque, cb->mr, mr_offset, len, is_write);
}
}
}
#endif // ifdef XBOX
/* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
@ -3166,6 +3250,12 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
bool release_lock = false;
const uint8_t *buf = ptr;
#ifdef XBOX
CPUState *cpu = qemu_get_cpu(0);
ram_addr_t ram_addr = addr1 + memory_region_get_ram_addr(mr);
mem_check_access_callback_ramaddr(cpu, ram_addr, len, BP_MEM_WRITE);
#endif
for (;;) {
if (!memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
@ -3231,6 +3321,12 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
bool release_lock = false;
uint8_t *buf = ptr;
#ifdef XBOX
CPUState *cpu = qemu_get_cpu(0);
ram_addr_t ram_addr = addr1 + memory_region_get_ram_addr(mr);
mem_check_access_callback_ramaddr(cpu, ram_addr, len, BP_MEM_READ);
#endif
for (;;) {
if (!memory_access_is_direct(mr, false)) {
/* I/O case */

View File

@ -372,6 +372,7 @@ static void cpu_common_initfn(Object *obj)
QSIMPLEQ_INIT(&cpu->work_list);
QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&cpu->watchpoints);
QTAILQ_INIT(&cpu->mem_access_callbacks);
cpu_exec_initfn(cpu);
}

View File

@ -250,6 +250,19 @@ typedef struct CPUBreakpoint {
QTAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
#ifdef XBOX
typedef void (*MemAccessCallbackFunc)(void *opaque, MemoryRegion *mr, hwaddr addr, hwaddr len, bool write);
typedef struct MemAccessCallback {
MemoryRegion *mr;
hwaddr addr;
hwaddr len;
MemAccessCallbackFunc func;
void *opaque;
QTAILQ_ENTRY(MemAccessCallback) entry;
} MemAccessCallback;
#endif
struct CPUWatchpoint {
vaddr vaddr;
vaddr len;
@ -412,6 +425,8 @@ struct CPUState {
QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
CPUWatchpoint *watchpoint_hit;
QTAILQ_HEAD(, MemAccessCallback) mem_access_callbacks;
void *opaque;
/* In order to avoid passing too many arguments to the MMIO helpers,
@ -1110,6 +1125,28 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
* If no watchpoint is registered for the range, the result is 0.
*/
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
#ifdef XBOX
/**
* Access callbacks to facilitate lazy syncronization, specifically when
* emulating GPUs in an UMA system (e.g. Xbox).
*
* Note: Access to this watched memory can be slow: each access results in a
* callback. This can be made faster, but for now just accept that CPU blitting
* to a surface will be slower.
*/
int mem_access_callback_insert(CPUState *cpu, MemoryRegion *mr,
hwaddr offset, hwaddr len,
MemAccessCallback **cb,
MemAccessCallbackFunc func, void *opaque);
void mem_access_callback_remove_by_ref(CPUState *cpu, MemAccessCallback *cb);
int mem_access_callback_address_matches(CPUState *cpu, hwaddr addr, hwaddr len);
void mem_check_access_callback_ramaddr(CPUState *cpu,
hwaddr ram_addr, vaddr len, int flags);
void mem_check_access_callback_vaddr(CPUState *cpu, vaddr addr, vaddr len,
int flags, void *iotlbentry);
#endif
#endif
/**