mirror of https://github.com/xqemu/xqemu.git
edgar/mmio-exec-v2.for-upstream
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZUng2AAoJECnFlngPa8qDn0UH/0WiBnYpS2JnJ9LM6kVswHZS gZm6j4ZCs+E2+Htbns0PQdSfCGoIe4ycS6r5M8oEMEHx65CNwLDvVW7tTE/tulof 2qrxlkuqbhbMouEziuX4jr5aDipBzqOxZLroex0iZ/iegUgh22wL21IWWxosMujB ayiKOcfXxH4/fVp6OFxKu1DHt7LbTXL2xsnU51HdQKGAJHctsQhZBOwEB+UztOvq b8Yz60FulPRm0FMUZagHth5R3Ljr10UFu4t3zLV/FQdbGsnBh0JHPF+LuMf0WwIc uB7l2Zm/Rs5yVlOjPSTQcCGinwd9lxKz0NOQz9fS+GdRl/6HP9wrer++JviYv3Y= =P86D -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/edgar/tags/edgar/mmio-exec-v2.for-upstream' into staging edgar/mmio-exec-v2.for-upstream # gpg: Signature made Tue 27 Jun 2017 16:22:30 BST # gpg: using RSA key 0x29C596780F6BCA83 # gpg: Good signature from "Edgar E. Iglesias (Xilinx key) <edgar.iglesias@xilinx.com>" # gpg: aka "Edgar E. Iglesias <edgar.iglesias@gmail.com>" # Primary key fingerprint: AC44 FEDC 14F7 F1EB EDBF 4151 29C5 9678 0F6B CA83 * remotes/edgar/tags/edgar/mmio-exec-v2.for-upstream: xilinx_spips: allow mmio execution exec: allow to get a pointer for some mmio memory region introduce mmio_interface qdev: add MemoryRegion property cputlb: fix the way get_page_addr_code fills the tlb cputlb: move get_page_addr_code cputlb: cleanup get_page_addr_code to use VICTIM_TLB_HIT Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
577caa2672
|
@ -746,41 +746,6 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
|
|||
return ram_addr;
|
||||
}
|
||||
|
||||
/* NOTE: this function can trigger an exception */
|
||||
/* NOTE2: the returned address is not exactly the physical address: it
|
||||
* is actually a ram_addr_t (in system mode; the user mode emulation
|
||||
* version of this function returns a guest virtual address).
|
||||
*/
|
||||
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||
{
|
||||
int mmu_idx, page_index, pd;
|
||||
void *p;
|
||||
MemoryRegion *mr;
|
||||
CPUState *cpu = ENV_GET_CPU(env1);
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = cpu_mmu_index(env1, true);
|
||||
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
||||
(addr & TARGET_PAGE_MASK))) {
|
||||
cpu_ldub_code(env1, addr);
|
||||
}
|
||||
iotlbentry = &env1->iotlb[mmu_idx][page_index];
|
||||
pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
|
||||
if (memory_region_is_unassigned(mr)) {
|
||||
cpu_unassigned_access(cpu, addr, false, true, 0, 4);
|
||||
/* The CPU's unassigned access hook might have longjumped out
|
||||
* with an exception. If it didn't (or there was no hook) then
|
||||
* we can't proceed further.
|
||||
*/
|
||||
report_bad_exec(cpu, addr);
|
||||
exit(1);
|
||||
}
|
||||
p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
|
||||
return qemu_ram_addr_from_host_nofail(p);
|
||||
}
|
||||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
target_ulong addr, uintptr_t retaddr, int size)
|
||||
{
|
||||
|
@ -868,6 +833,53 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
|
||||
(ADDR) & TARGET_PAGE_MASK)
|
||||
|
||||
/* NOTE: this function can trigger an exception */
|
||||
/* NOTE2: the returned address is not exactly the physical address: it
|
||||
* is actually a ram_addr_t (in system mode; the user mode emulation
|
||||
* version of this function returns a guest virtual address).
|
||||
*/
|
||||
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||
{
|
||||
int mmu_idx, index, pd;
|
||||
void *p;
|
||||
MemoryRegion *mr;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = cpu_mmu_index(env, true);
|
||||
if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
|
||||
(addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
|
||||
if (!VICTIM_TLB_HIT(addr_read, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
|
||||
}
|
||||
}
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
|
||||
if (memory_region_is_unassigned(mr)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
if (memory_region_request_mmio_ptr(mr, addr)) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
/* A MemoryRegion is potentially added so re-run the
|
||||
* get_page_addr_code.
|
||||
*/
|
||||
return get_page_addr_code(env, addr);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
cpu_unassigned_access(cpu, addr, false, true, 0, 4);
|
||||
/* The CPU's unassigned access hook might have longjumped out
|
||||
* with an exception. If it didn't (or there was no hook) then
|
||||
* we can't proceed further.
|
||||
*/
|
||||
report_bad_exec(cpu, addr);
|
||||
exit(1);
|
||||
}
|
||||
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
|
||||
return qemu_ram_addr_from_host_nofail(p);
|
||||
}
|
||||
|
||||
/* Probe for whether the specified guest write access is permitted.
|
||||
* If it is not permitted then an exception will be taken in the same
|
||||
* way as if this were a real write access (and we will not return).
|
||||
|
|
|
@ -57,3 +57,4 @@ obj-$(CONFIG_EDU) += edu.o
|
|||
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
|
||||
obj-$(CONFIG_AUX) += auxbus.o
|
||||
obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
|
||||
obj-y += mmio_interface.o
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* mmio_interface.c
|
||||
*
|
||||
* Copyright (C) 2017 : GreenSocs
|
||||
* http://www.greensocs.com/ , email: info@greensocs.com
|
||||
*
|
||||
* Developed by :
|
||||
* Frederic Konrad <fred.konrad@greensocs.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 2 of the License, or
|
||||
* (at your option)any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "trace.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/misc/mmio_interface.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#ifndef DEBUG_MMIO_INTERFACE
|
||||
#define DEBUG_MMIO_INTERFACE 0
|
||||
#endif
|
||||
|
||||
static uint64_t mmio_interface_counter;
|
||||
|
||||
#define DPRINTF(fmt, ...) do { \
|
||||
if (DEBUG_MMIO_INTERFACE) { \
|
||||
qemu_log("mmio_interface: 0x%" PRIX64 ": " fmt, s->id, ## __VA_ARGS__);\
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
static void mmio_interface_init(Object *obj)
|
||||
{
|
||||
MMIOInterface *s = MMIO_INTERFACE(obj);
|
||||
|
||||
if (DEBUG_MMIO_INTERFACE) {
|
||||
s->id = mmio_interface_counter++;
|
||||
}
|
||||
|
||||
DPRINTF("interface created\n");
|
||||
s->host_ptr = 0;
|
||||
s->subregion = 0;
|
||||
}
|
||||
|
||||
static void mmio_interface_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
MMIOInterface *s = MMIO_INTERFACE(dev);
|
||||
|
||||
DPRINTF("realize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer"
|
||||
" %p\n", s->start, s->end, s->host_ptr);
|
||||
|
||||
if (!s->host_ptr) {
|
||||
error_setg(errp, "host_ptr property must be set");
|
||||
}
|
||||
|
||||
if (!s->subregion) {
|
||||
error_setg(errp, "subregion property must be set");
|
||||
}
|
||||
|
||||
memory_region_init_ram_ptr(&s->ram_mem, OBJECT(s), "ram",
|
||||
s->end - s->start + 1, s->host_ptr);
|
||||
memory_region_set_readonly(&s->ram_mem, s->ro);
|
||||
memory_region_add_subregion(s->subregion, s->start, &s->ram_mem);
|
||||
}
|
||||
|
||||
static void mmio_interface_unrealize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
MMIOInterface *s = MMIO_INTERFACE(dev);
|
||||
|
||||
DPRINTF("unrealize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer"
|
||||
" %p\n", s->start, s->end, s->host_ptr);
|
||||
memory_region_del_subregion(s->subregion, &s->ram_mem);
|
||||
}
|
||||
|
||||
static void mmio_interface_finalize(Object *obj)
|
||||
{
|
||||
MMIOInterface *s = MMIO_INTERFACE(obj);
|
||||
|
||||
DPRINTF("finalize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer"
|
||||
" %p\n", s->start, s->end, s->host_ptr);
|
||||
object_unparent(OBJECT(&s->ram_mem));
|
||||
}
|
||||
|
||||
static Property mmio_interface_properties[] = {
|
||||
DEFINE_PROP_UINT64("start", MMIOInterface, start, 0),
|
||||
DEFINE_PROP_UINT64("end", MMIOInterface, end, 0),
|
||||
DEFINE_PROP_PTR("host_ptr", MMIOInterface, host_ptr),
|
||||
DEFINE_PROP_BOOL("ro", MMIOInterface, ro, false),
|
||||
DEFINE_PROP_MEMORY_REGION("subregion", MMIOInterface, subregion),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void mmio_interface_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = mmio_interface_realize;
|
||||
dc->unrealize = mmio_interface_unrealize;
|
||||
dc->props = mmio_interface_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo mmio_interface_info = {
|
||||
.name = TYPE_MMIO_INTERFACE,
|
||||
.parent = TYPE_DEVICE,
|
||||
.instance_size = sizeof(MMIOInterface),
|
||||
.instance_init = mmio_interface_init,
|
||||
.instance_finalize = mmio_interface_finalize,
|
||||
.class_init = mmio_interface_class_init,
|
||||
};
|
||||
|
||||
static void mmio_interface_register_types(void)
|
||||
{
|
||||
type_register_static(&mmio_interface_info);
|
||||
}
|
||||
|
||||
type_init(mmio_interface_register_types)
|
|
@ -496,6 +496,18 @@ static const MemoryRegionOps spips_ops = {
|
|||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void xilinx_qspips_invalidate_mmio_ptr(XilinxQSPIPS *q)
|
||||
{
|
||||
XilinxSPIPS *s = &q->parent_obj;
|
||||
|
||||
if (q->lqspi_cached_addr != ~0ULL) {
|
||||
/* Invalidate the current mapped mmio */
|
||||
memory_region_invalidate_mmio_ptr(&s->mmlqspi, q->lqspi_cached_addr,
|
||||
LQSPI_CACHE_SIZE);
|
||||
q->lqspi_cached_addr = ~0ULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_qspips_write(void *opaque, hwaddr addr,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
|
@ -505,7 +517,7 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr,
|
|||
addr >>= 2;
|
||||
|
||||
if (addr == R_LQSPI_CFG) {
|
||||
q->lqspi_cached_addr = ~0ULL;
|
||||
xilinx_qspips_invalidate_mmio_ptr(q);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -517,27 +529,20 @@ static const MemoryRegionOps qspips_ops = {
|
|||
|
||||
#define LQSPI_CACHE_SIZE 1024
|
||||
|
||||
static uint64_t
|
||||
lqspi_read(void *opaque, hwaddr addr, unsigned int size)
|
||||
static void lqspi_load_cache(void *opaque, hwaddr addr)
|
||||
{
|
||||
int i;
|
||||
XilinxQSPIPS *q = opaque;
|
||||
XilinxSPIPS *s = opaque;
|
||||
uint32_t ret;
|
||||
|
||||
if (addr >= q->lqspi_cached_addr &&
|
||||
addr <= q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) {
|
||||
uint8_t *retp = &q->lqspi_buf[addr - q->lqspi_cached_addr];
|
||||
ret = cpu_to_le32(*(uint32_t *)retp);
|
||||
DB_PRINT_L(1, "addr: %08x, data: %08x\n", (unsigned)addr,
|
||||
(unsigned)ret);
|
||||
return ret;
|
||||
} else {
|
||||
int flash_addr = (addr / num_effective_busses(s));
|
||||
int slave = flash_addr >> LQSPI_ADDRESS_BITS;
|
||||
int cache_entry = 0;
|
||||
uint32_t u_page_save = s->regs[R_LQSPI_STS] & ~LQSPI_CFG_U_PAGE;
|
||||
int i;
|
||||
int flash_addr = ((addr & ~(LQSPI_CACHE_SIZE - 1))
|
||||
/ num_effective_busses(s));
|
||||
int slave = flash_addr >> LQSPI_ADDRESS_BITS;
|
||||
int cache_entry = 0;
|
||||
uint32_t u_page_save = s->regs[R_LQSPI_STS] & ~LQSPI_CFG_U_PAGE;
|
||||
|
||||
if (addr < q->lqspi_cached_addr ||
|
||||
addr > q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) {
|
||||
xilinx_qspips_invalidate_mmio_ptr(q);
|
||||
s->regs[R_LQSPI_STS] &= ~LQSPI_CFG_U_PAGE;
|
||||
s->regs[R_LQSPI_STS] |= slave ? LQSPI_CFG_U_PAGE : 0;
|
||||
|
||||
|
@ -589,12 +594,43 @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size)
|
|||
xilinx_spips_update_cs_lines(s);
|
||||
|
||||
q->lqspi_cached_addr = flash_addr * num_effective_busses(s);
|
||||
}
|
||||
}
|
||||
|
||||
static void *lqspi_request_mmio_ptr(void *opaque, hwaddr addr, unsigned *size,
|
||||
unsigned *offset)
|
||||
{
|
||||
XilinxQSPIPS *q = opaque;
|
||||
hwaddr offset_within_the_region = addr & ~(LQSPI_CACHE_SIZE - 1);
|
||||
|
||||
lqspi_load_cache(opaque, offset_within_the_region);
|
||||
*size = LQSPI_CACHE_SIZE;
|
||||
*offset = offset_within_the_region;
|
||||
return q->lqspi_buf;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
lqspi_read(void *opaque, hwaddr addr, unsigned int size)
|
||||
{
|
||||
XilinxQSPIPS *q = opaque;
|
||||
uint32_t ret;
|
||||
|
||||
if (addr >= q->lqspi_cached_addr &&
|
||||
addr <= q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) {
|
||||
uint8_t *retp = &q->lqspi_buf[addr - q->lqspi_cached_addr];
|
||||
ret = cpu_to_le32(*(uint32_t *)retp);
|
||||
DB_PRINT_L(1, "addr: %08x, data: %08x\n", (unsigned)addr,
|
||||
(unsigned)ret);
|
||||
return ret;
|
||||
} else {
|
||||
lqspi_load_cache(opaque, addr);
|
||||
return lqspi_read(opaque, addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps lqspi_ops = {
|
||||
.read = lqspi_read,
|
||||
.request_ptr = lqspi_request_mmio_ptr,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
|
|
|
@ -137,6 +137,15 @@ struct MemoryRegionOps {
|
|||
uint64_t data,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs);
|
||||
/* Instruction execution pre-callback:
|
||||
* @addr is the address of the access relative to the @mr.
|
||||
* @size is the size of the area returned by the callback.
|
||||
* @offset is the location of the pointer inside @mr.
|
||||
*
|
||||
* Returns a pointer to a location which contains guest code.
|
||||
*/
|
||||
void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
|
||||
unsigned *offset);
|
||||
|
||||
enum device_endian endianness;
|
||||
/* Guest-visible constraints: */
|
||||
|
@ -1362,6 +1371,32 @@ void memory_global_dirty_log_stop(void);
|
|||
|
||||
void mtree_info(fprintf_function mon_printf, void *f, bool flatview);
|
||||
|
||||
/**
|
||||
* memory_region_request_mmio_ptr: request a pointer to an mmio
|
||||
* MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
|
||||
* When the device wants to invalidate the pointer it will call
|
||||
* memory_region_invalidate_mmio_ptr.
|
||||
*
|
||||
* @mr: #MemoryRegion to check
|
||||
* @addr: address within that region
|
||||
*
|
||||
* Returns true on success, false otherwise.
|
||||
*/
|
||||
bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
|
||||
|
||||
/**
|
||||
* memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
|
||||
* previously requested.
|
||||
* In the end that means that if something wants to execute from this area it
|
||||
* will need to request the pointer again.
|
||||
*
|
||||
* @mr: #MemoryRegion associated to the pointer.
|
||||
* @addr: address within that region
|
||||
* @size: size of that area.
|
||||
*/
|
||||
void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
|
||||
unsigned size);
|
||||
|
||||
/**
|
||||
* memory_region_dispatch_read: perform a read directly to the specified
|
||||
* MemoryRegion.
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* mmio_interface.h
|
||||
*
|
||||
* Copyright (C) 2017 : GreenSocs
|
||||
* http://www.greensocs.com/ , email: info@greensocs.com
|
||||
*
|
||||
* Developed by :
|
||||
* Frederic Konrad <fred.konrad@greensocs.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 2 of the License, or
|
||||
* (at your option)any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef MMIO_INTERFACE_H
|
||||
#define MMIO_INTERFACE_H
|
||||
|
||||
#include "exec/memory.h"
|
||||
|
||||
#define TYPE_MMIO_INTERFACE "mmio_interface"
|
||||
#define MMIO_INTERFACE(obj) OBJECT_CHECK(MMIOInterface, (obj), \
|
||||
TYPE_MMIO_INTERFACE)
|
||||
|
||||
typedef struct MMIOInterface {
|
||||
DeviceState parent_obj;
|
||||
|
||||
MemoryRegion *subregion;
|
||||
MemoryRegion ram_mem;
|
||||
uint64_t start;
|
||||
uint64_t end;
|
||||
bool ro;
|
||||
uint64_t id;
|
||||
void *host_ptr;
|
||||
} MMIOInterface;
|
||||
|
||||
void mmio_interface_map(MMIOInterface *s);
|
||||
void mmio_interface_unmap(MMIOInterface *s);
|
||||
|
||||
#endif /* MMIO_INTERFACE_H */
|
|
@ -177,6 +177,8 @@ extern PropertyInfo qdev_prop_arraylen;
|
|||
DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint16_t)
|
||||
#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
|
||||
DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
|
||||
#define DEFINE_PROP_MEMORY_REGION(_n, _s, _f) \
|
||||
DEFINE_PROP(_n, _s, _f, qdev_prop_ptr, MemoryRegion *)
|
||||
|
||||
#define DEFINE_PROP_END_OF_LIST() \
|
||||
{}
|
||||
|
|
111
memory.c
111
memory.c
|
@ -30,6 +30,8 @@
|
|||
#include "exec/ram_addr.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "hw/misc/mmio_interface.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
|
||||
//#define DEBUG_UNASSIGNED
|
||||
|
||||
|
@ -2430,6 +2432,115 @@ void memory_listener_unregister(MemoryListener *listener)
|
|||
listener->address_space = NULL;
|
||||
}
|
||||
|
||||
bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
|
||||
{
|
||||
void *host;
|
||||
unsigned size = 0;
|
||||
unsigned offset = 0;
|
||||
Object *new_interface;
|
||||
|
||||
if (!mr || !mr->ops->request_ptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid an update if the request_ptr call
|
||||
* memory_region_invalidate_mmio_ptr which seems to be likely when we use
|
||||
* a cache.
|
||||
*/
|
||||
memory_region_transaction_begin();
|
||||
|
||||
host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
|
||||
|
||||
if (!host || !size) {
|
||||
memory_region_transaction_commit();
|
||||
return false;
|
||||
}
|
||||
|
||||
new_interface = object_new("mmio_interface");
|
||||
qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
|
||||
qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
|
||||
qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
|
||||
qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
|
||||
qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
|
||||
object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
|
||||
|
||||
memory_region_transaction_commit();
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef struct MMIOPtrInvalidate {
|
||||
MemoryRegion *mr;
|
||||
hwaddr offset;
|
||||
unsigned size;
|
||||
int busy;
|
||||
int allocated;
|
||||
} MMIOPtrInvalidate;
|
||||
|
||||
#define MAX_MMIO_INVALIDATE 10
|
||||
static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
|
||||
|
||||
static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
|
||||
MemoryRegion *mr = invalidate_data->mr;
|
||||
hwaddr offset = invalidate_data->offset;
|
||||
unsigned size = invalidate_data->size;
|
||||
MemoryRegionSection section = memory_region_find(mr, offset, size);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
/* Reset dirty so this doesn't happen later. */
|
||||
cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
|
||||
|
||||
if (section.mr != mr) {
|
||||
/* memory_region_find add a ref on section.mr */
|
||||
memory_region_unref(section.mr);
|
||||
if (MMIO_INTERFACE(section.mr->owner)) {
|
||||
/* We found the interface just drop it. */
|
||||
object_property_set_bool(section.mr->owner, false, "realized",
|
||||
NULL);
|
||||
object_unref(section.mr->owner);
|
||||
object_unparent(section.mr->owner);
|
||||
}
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
if (invalidate_data->allocated) {
|
||||
g_free(invalidate_data);
|
||||
} else {
|
||||
invalidate_data->busy = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
size_t i;
|
||||
MMIOPtrInvalidate *invalidate_data = NULL;
|
||||
|
||||
for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
|
||||
if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
|
||||
invalidate_data = &mmio_ptr_invalidate_list[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!invalidate_data) {
|
||||
invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
|
||||
invalidate_data->allocated = 1;
|
||||
}
|
||||
|
||||
invalidate_data->mr = mr;
|
||||
invalidate_data->offset = offset;
|
||||
invalidate_data->size = size;
|
||||
|
||||
async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
|
||||
RUN_ON_CPU_HOST_PTR(invalidate_data));
|
||||
}
|
||||
|
||||
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
|
||||
{
|
||||
memory_region_ref(root);
|
||||
|
|
Loading…
Reference in New Issue