system/physmem: Propagate AddressSpace to MapClient helpers

Propagate AddressSpace handler to following helpers:
- register_map_client()
- unregister_map_client()
- notify_map_clients[_locked]()

Rename them using 'address_space_' prefix instead of 'cpu_'.

The AddressSpace argument will be used in the next commit.

Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
Message-ID: <20240507094210.300566-2-mnissler@rivosinc.com>
[PMD: Split patch, part 1/2]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
This commit is contained in:
Mattias Nissler 2023-09-07 06:04:23 -07:00 committed by Philippe Mathieu-Daudé
parent d5e268197a
commit 5c62719710
4 changed files with 38 additions and 18 deletions

View File

@ -147,8 +147,6 @@ void *cpu_physical_memory_map(hwaddr addr,
bool is_write); bool is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len, void cpu_physical_memory_unmap(void *buffer, hwaddr len,
bool is_write, hwaddr access_len); bool is_write, hwaddr access_len);
void cpu_register_map_client(QEMUBH *bh);
void cpu_unregister_map_client(QEMUBH *bh);
bool cpu_physical_memory_is_io(hwaddr phys_addr); bool cpu_physical_memory_is_io(hwaddr phys_addr);

View File

@ -2946,8 +2946,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
* May return %NULL and set *@plen to zero(0), if resources needed to perform * May return %NULL and set *@plen to zero(0), if resources needed to perform
* the mapping are exhausted. * the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations. * Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is * Use address_space_register_map_client() to know when retrying the map
* likely to succeed. * operation is likely to succeed.
* *
* @as: #AddressSpace to be accessed * @as: #AddressSpace to be accessed
* @addr: address within that address space * @addr: address within that address space
@ -2972,6 +2972,28 @@ void *address_space_map(AddressSpace *as, hwaddr addr,
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len); bool is_write, hwaddr access_len);
/*
* address_space_register_map_client: Register a callback to invoke when
* resources for address_space_map() are available again.
*
* address_space_map may fail when there are not enough resources available,
* such as when bounce buffer memory would exceed the limit. The callback can
* be used to retry the address_space_map operation. Note that the callback
* gets automatically removed after firing.
*
* @as: #AddressSpace to be accessed
* @bh: callback to invoke when address_space_map() retry is appropriate
*/
void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
/*
* address_space_unregister_map_client: Unregister a callback that has
* previously been registered and not fired yet.
*
* @as: #AddressSpace to be accessed
* @bh: callback to unregister
*/
void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
/* Internal functions, part of the implementation of address_space_read. */ /* Internal functions, part of the implementation of address_space_read. */
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,

View File

@ -169,7 +169,7 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) { if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs); trace_dma_map_wait(dbs);
dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs); dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
cpu_register_map_client(dbs->bh); address_space_register_map_client(dbs->sg->as, dbs->bh);
return; return;
} }
@ -197,7 +197,7 @@ static void dma_aio_cancel(BlockAIOCB *acb)
} }
if (dbs->bh) { if (dbs->bh) {
cpu_unregister_map_client(dbs->bh); address_space_unregister_map_client(dbs->sg->as, dbs->bh);
qemu_bh_delete(dbs->bh); qemu_bh_delete(dbs->bh);
dbs->bh = NULL; dbs->bh = NULL;
} }

View File

@ -3066,24 +3066,24 @@ QemuMutex map_client_list_lock;
static QLIST_HEAD(, MapClient) map_client_list static QLIST_HEAD(, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list); = QLIST_HEAD_INITIALIZER(map_client_list);
static void cpu_unregister_map_client_do(MapClient *client) static void address_space_unregister_map_client_do(MapClient *client)
{ {
QLIST_REMOVE(client, link); QLIST_REMOVE(client, link);
g_free(client); g_free(client);
} }
static void cpu_notify_map_clients_locked(void) static void address_space_notify_map_clients_locked(AddressSpace *as)
{ {
MapClient *client; MapClient *client;
while (!QLIST_EMPTY(&map_client_list)) { while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list); client = QLIST_FIRST(&map_client_list);
qemu_bh_schedule(client->bh); qemu_bh_schedule(client->bh);
cpu_unregister_map_client_do(client); address_space_unregister_map_client_do(client);
} }
} }
void cpu_register_map_client(QEMUBH *bh) void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
{ {
MapClient *client = g_malloc(sizeof(*client)); MapClient *client = g_malloc(sizeof(*client));
@ -3093,7 +3093,7 @@ void cpu_register_map_client(QEMUBH *bh)
/* Write map_client_list before reading in_use. */ /* Write map_client_list before reading in_use. */
smp_mb(); smp_mb();
if (!qatomic_read(&bounce.in_use)) { if (!qatomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked(); address_space_notify_map_clients_locked(as);
} }
} }
@ -3113,23 +3113,23 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock); qemu_mutex_init(&map_client_list_lock);
} }
void cpu_unregister_map_client(QEMUBH *bh) void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
{ {
MapClient *client; MapClient *client;
QEMU_LOCK_GUARD(&map_client_list_lock); QEMU_LOCK_GUARD(&map_client_list_lock);
QLIST_FOREACH(client, &map_client_list, link) { QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) { if (client->bh == bh) {
cpu_unregister_map_client_do(client); address_space_unregister_map_client_do(client);
break; break;
} }
} }
} }
static void cpu_notify_map_clients(void) static void address_space_notify_map_clients(AddressSpace *as)
{ {
QEMU_LOCK_GUARD(&map_client_list_lock); QEMU_LOCK_GUARD(&map_client_list_lock);
cpu_notify_map_clients_locked(); address_space_notify_map_clients_locked(as);
} }
static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
@ -3196,8 +3196,8 @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
* May map a subset of the requested range, given by and returned in *plen. * May map a subset of the requested range, given by and returned in *plen.
* May return NULL if resources needed to perform the mapping are exhausted. * May return NULL if resources needed to perform the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations. * Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is * Use address_space_register_map_client() to know when retrying the map
* likely to succeed. * operation is likely to succeed.
*/ */
void *address_space_map(AddressSpace *as, void *address_space_map(AddressSpace *as,
hwaddr addr, hwaddr addr,
@ -3280,7 +3280,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
memory_region_unref(bounce.mr); memory_region_unref(bounce.mr);
/* Clear in_use before reading map_client_list. */ /* Clear in_use before reading map_client_list. */
qatomic_set_mb(&bounce.in_use, false); qatomic_set_mb(&bounce.in_use, false);
cpu_notify_map_clients(); address_space_notify_map_clients(as);
} }
void *cpu_physical_memory_map(hwaddr addr, void *cpu_physical_memory_map(hwaddr addr,