mcpx: Use new bql_[un]lock functions

This commit is contained in:
Matt Borgerson 2025-01-06 04:08:41 -07:00
parent 5cb65d1791
commit 3106ea97e5
5 changed files with 29 additions and 28 deletions

View File

@ -20,6 +20,7 @@
*/
#include "hw/xbox/nv2a/nv2a_int.h"
#include "qemu/main-loop.h"
void nv2a_update_irq(NV2AState *d)
{
@ -244,9 +245,9 @@ static void nv2a_lock_fifo(NV2AState *d)
{
qemu_mutex_lock(&d->pfifo.lock);
qemu_cond_broadcast(&d->pfifo.fifo_cond);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_cond_wait(&d->pfifo.fifo_idle_cond, &d->pfifo.lock);
qemu_mutex_lock_iothread();
bql_lock();
qemu_mutex_lock(&d->pgraph.lock);
}
@ -267,9 +268,9 @@ static void nv2a_reset(NV2AState *d)
qemu_event_reset(&d->pgraph.flush_complete);
qatomic_set(&d->pgraph.flush_pending, true);
nv2a_unlock_fifo(d);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_event_wait(&d->pgraph.flush_complete);
qemu_mutex_lock_iothread();
bql_lock();
nv2a_lock_fifo(d);
if (!halted) {
qatomic_set(&d->pfifo.halt, false);
@ -366,9 +367,9 @@ static void nv2a_vm_state_change(void *opaque, bool running, RunState state)
qatomic_set(&d->pfifo.halt, true);
pgraph_pre_savevm_trigger(d);
nv2a_unlock_fifo(d);
qemu_mutex_unlock_iothread();
bql_unlock();
pgraph_pre_savevm_wait(d);
qemu_mutex_lock_iothread();
bql_lock();
nv2a_lock_fifo(d);
} else if (state == RUN_STATE_RESTORE_VM) {
nv2a_lock_fifo(d);
@ -382,9 +383,9 @@ static void nv2a_vm_state_change(void *opaque, bool running, RunState state)
nv2a_lock_fifo(d);
pgraph_pre_shutdown_trigger(d);
nv2a_unlock_fifo(d);
qemu_mutex_unlock_iothread();
bql_unlock();
pgraph_pre_shutdown_wait(d);
qemu_mutex_lock_iothread();
bql_lock();
}
}

View File

@ -207,12 +207,12 @@ static ssize_t pfifo_run_puller(NV2AState *d, uint32_t method_entry,
/* methods that take objects.
* TODO: Check this range is correct for the nv2a */
if (method >= 0x180 && method < 0x200) {
//qemu_mutex_lock_iothread();
//bql_lock();
RAMHTEntry entry = ramht_lookup(d, parameter);
assert(entry.valid);
// assert(entry.channel_id == state->channel_id);
parameter = entry.instance;
//qemu_mutex_unlock_iothread();
//bql_unlock();
}
enum FIFOEngine engine = GET_MASK(*engine_reg, 3 << (4*subchannel));

View File

@ -482,12 +482,12 @@ static SurfaceBinding *surface_put(NV2AState *d, hwaddr addr,
if (tcg_enabled()) {
qemu_mutex_unlock(&d->pgraph.lock);
qemu_mutex_lock_iothread();
bql_lock();
mem_access_callback_insert(qemu_get_cpu(0),
d->vram, surface_out->vram_addr, surface_out->size,
&surface_out->access_cb, &surface_access_callback,
surface_out);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.lock);
}
@ -545,9 +545,9 @@ void pgraph_gl_surface_invalidate(NV2AState *d, SurfaceBinding *surface)
if (tcg_enabled()) {
qemu_mutex_unlock(&d->pgraph.lock);
qemu_mutex_lock_iothread();
bql_lock();
mem_access_callback_remove_by_ref(qemu_get_cpu(0), surface->access_cb);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.lock);
}

View File

@ -197,10 +197,10 @@ void pgraph_context_switch(NV2AState *d, unsigned int channel_id)
pg->waiting_for_context_switch = true;
qemu_mutex_unlock(&pg->lock);
qemu_mutex_lock_iothread();
bql_lock();
pg->pending_interrupts |= NV_PGRAPH_INTR_CONTEXT_SWITCH;
nv2a_update_irq(d);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&pg->lock);
}
}
@ -381,13 +381,13 @@ void nv2a_set_surface_scale_factor(unsigned int scale)
{
NV2AState *d = g_nv2a;
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.renderer_lock);
if (d->pgraph.renderer->ops.set_surface_scale_factor) {
d->pgraph.renderer->ops.set_surface_scale_factor(d, scale);
}
qemu_mutex_unlock(&d->pgraph.renderer_lock);
qemu_mutex_lock_iothread();
bql_lock();
}
unsigned int nv2a_get_surface_scale_factor(void)
@ -395,13 +395,13 @@ unsigned int nv2a_get_surface_scale_factor(void)
NV2AState *d = g_nv2a;
int s = 1;
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.renderer_lock);
if (d->pgraph.renderer->ops.get_surface_scale_factor) {
s = d->pgraph.renderer->ops.get_surface_scale_factor(d);
}
qemu_mutex_unlock(&d->pgraph.renderer_lock);
qemu_mutex_lock_iothread();
bql_lock();
return s;
}
@ -846,9 +846,9 @@ DEF_METHOD(NV097, NO_OPERATION)
pg->waiting_for_nop = true;
qemu_mutex_unlock(&pg->lock);
qemu_mutex_lock_iothread();
bql_lock();
nv2a_update_irq(d);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&pg->lock);
}
@ -2649,7 +2649,7 @@ DEF_METHOD(NV097, BACK_END_WRITE_SEMAPHORE_RELEASE)
d->pgraph.renderer->ops.surface_update(d, false, true, true);
//qemu_mutex_unlock(&d->pgraph.lock);
//qemu_mutex_lock_iothread();
//bql_lock();
uint32_t semaphore_offset = pgraph_reg_r(pg, NV_PGRAPH_SEMAPHOREOFFSET);
@ -2662,7 +2662,7 @@ DEF_METHOD(NV097, BACK_END_WRITE_SEMAPHORE_RELEASE)
stl_le_p((uint32_t*)semaphore_data, parameter);
//qemu_mutex_lock(&d->pgraph.lock);
//qemu_mutex_unlock_iothread();
//bql_unlock();
}
DEF_METHOD(NV097, SET_ZMIN_MAX_CONTROL)

View File

@ -546,12 +546,12 @@ static void register_cpu_access_callback(NV2AState *d, SurfaceBinding *surface)
{
if (tcg_enabled()) {
qemu_mutex_unlock(&d->pgraph.lock);
qemu_mutex_lock_iothread();
bql_lock();
mem_access_callback_insert(qemu_get_cpu(0),
d->vram, surface->vram_addr, surface->size,
&surface->access_cb, &surface_access_callback,
surface);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.lock);
}
}
@ -561,9 +561,9 @@ static void unregister_cpu_access_callback(NV2AState *d,
{
if (tcg_enabled()) {
qemu_mutex_unlock(&d->pgraph.lock);
qemu_mutex_lock_iothread();
bql_lock();
mem_access_callback_remove_by_ref(qemu_get_cpu(0), surface->access_cb);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_mutex_lock(&d->pgraph.lock);
}
}