mirror of https://github.com/xemu-project/xemu.git
exec.c: Add new address_space_ld*/st* functions
Add new address_space_ld*/st* functions which allow transaction attributes and error reporting for basic load and stores. These are named to be in line with the address_space_read/write/rw buffer operations. The existing ld/st*_phys functions are now wrappers around the new functions. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
This commit is contained in:
parent
5c9eb0286c
commit
500131154d
297
exec.c
297
exec.c
|
@ -2679,20 +2679,22 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
|
|||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l, false);
|
||||
if (l < 4 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
memory_region_dispatch_read(mr, addr1, &val, 4,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
|
@ -2718,41 +2720,68 @@ static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
|
|||
val = ldl_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 8;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
memory_region_dispatch_read(mr, addr1, &val, 8,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
|
@ -2778,49 +2807,88 @@ static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
|
|||
val = ldq_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t val;
|
||||
address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, &val, 1, 0);
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &val, 1, 0);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint32_t address_space_lduw_internal(AddressSpace *as,
|
||||
hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 2 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
memory_region_dispatch_read(mr, addr1, &val, 2,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
|
@ -2846,40 +2914,66 @@ static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
|
|||
val = lduw_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned. The ram page is not masked as dirty
|
||||
and the code inside is not invalidated. It is useful if the dirty
|
||||
bits are used to track modified PTEs */
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||
memory_region_dispatch_write(mr, addr1, val, 4,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
|
@ -2893,18 +2987,30 @@ void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
|||
cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
|
||||
}
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void stl_phys_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
static inline void address_space_stl_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
|
@ -2918,8 +3024,7 @@ static inline void stl_phys_internal(AddressSpace *as,
|
|||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
memory_region_dispatch_write(mr, addr1, val, 4,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
|
@ -2936,40 +3041,79 @@ static inline void stl_phys_internal(AddressSpace *as,
|
|||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(addr1, 4);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
|
||||
address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
|
||||
address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
|
||||
address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t v = val;
|
||||
address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, &v, 1, 1);
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &v, 1, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void stw_phys_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
static inline void address_space_stw_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l, true);
|
||||
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
||||
|
@ -2982,8 +3126,7 @@ static inline void stw_phys_internal(AddressSpace *as,
|
|||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
memory_region_dispatch_write(mr, addr1, val, 2,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
|
@ -3000,41 +3143,95 @@ static inline void stw_phys_internal(AddressSpace *as,
|
|||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(addr1, 2);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
|
||||
address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
|
||||
address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
|
||||
address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = tswap64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_le64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_be64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = tswap64(val);
|
||||
address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, (void *) &val, 8, 1);
|
||||
address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = cpu_to_le64(val);
|
||||
address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, (void *) &val, 8, 1);
|
||||
address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = cpu_to_be64(val);
|
||||
address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, (void *) &val, 8, 1);
|
||||
address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* virtual memory access for debug (includes writing to ROM) */
|
||||
|
|
|
@ -1153,6 +1153,73 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
|
|||
MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
uint8_t *buf, int len);
|
||||
|
||||
/**
|
||||
* address_space_ld*: load from an address space
|
||||
* address_space_st*: store to an address space
|
||||
*
|
||||
* These functions perform a load or store of the byte, word,
|
||||
* longword or quad to the specified address within the AddressSpace.
|
||||
* The _le suffixed functions treat the data as little endian;
|
||||
* _be indicates big endian; no suffix indicates "same endianness
|
||||
* as guest CPU".
|
||||
*
|
||||
* The "guest CPU endianness" accessors are deprecated for use outside
|
||||
* target-* code; devices should be CPU-agnostic and use either the LE
|
||||
* or the BE accessors.
|
||||
*
|
||||
* @as #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @val: data value, for stores
|
||||
* @attrs: memory transaction attributes
|
||||
* @result: location to write the success/failure of the transaction;
|
||||
* if NULL, this information is discarded
|
||||
*/
|
||||
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
#endif
|
||||
|
||||
/* address_space_translate: translate an address range into an address space
|
||||
* into a MemoryRegion and an address range into that section
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue