mirror of https://github.com/xemu-project/xemu.git
nv2a: Writeback dirty surfaces on scale change, VM load
This commit is contained in:
parent
4c5eb60262
commit
c31cbf9f4e
|
@ -287,6 +287,20 @@ static void nv2a_unlock_fifo(NV2AState *d)
|
|||
static void nv2a_reset(NV2AState *d)
|
||||
{
|
||||
nv2a_lock_fifo(d);
|
||||
bool halted = qatomic_read(&d->pfifo.halt);
|
||||
if (!halted) {
|
||||
qatomic_set(&d->pfifo.halt, true);
|
||||
}
|
||||
qemu_event_reset(&d->pgraph.flush_complete);
|
||||
qatomic_set(&d->pgraph.flush_pending, true);
|
||||
nv2a_unlock_fifo(d);
|
||||
qemu_mutex_unlock_iothread();
|
||||
qemu_event_wait(&d->pgraph.flush_complete);
|
||||
qemu_mutex_lock_iothread();
|
||||
nv2a_lock_fifo(d);
|
||||
if (!halted) {
|
||||
qatomic_set(&d->pfifo.halt, false);
|
||||
}
|
||||
|
||||
memset(d->pfifo.regs, 0, sizeof(d->pfifo.regs));
|
||||
memset(d->pgraph.regs, 0, sizeof(d->pgraph.regs));
|
||||
|
@ -306,7 +320,6 @@ static void nv2a_reset(NV2AState *d)
|
|||
d->pgraph.waiting_for_nop = false;
|
||||
d->pgraph.waiting_for_flip = false;
|
||||
d->pgraph.waiting_for_context_switch = false;
|
||||
d->pgraph.flush_pending = true;
|
||||
|
||||
d->pmc.pending_interrupts = 0;
|
||||
d->pfifo.pending_interrupts = 0;
|
||||
|
@ -381,16 +394,26 @@ static void nv2a_vm_state_change(void *opaque, bool running, RunState state)
|
|||
{
|
||||
NV2AState *d = opaque;
|
||||
if (state == RUN_STATE_SAVE_VM) {
|
||||
// FIXME: writeback all surfaces to RAM before snapshot
|
||||
nv2a_lock_fifo(d);
|
||||
qatomic_set(&d->pfifo.halt, true);
|
||||
qatomic_set(&d->pgraph.download_dirty_surfaces_pending, true);
|
||||
qemu_event_reset(&d->pgraph.dirty_surfaces_download_complete);
|
||||
nv2a_unlock_fifo(d);
|
||||
qemu_mutex_unlock_iothread();
|
||||
qemu_event_wait(&d->pgraph.dirty_surfaces_download_complete);
|
||||
qemu_mutex_lock_iothread();
|
||||
nv2a_lock_fifo(d);
|
||||
} else if (state == RUN_STATE_RESTORE_VM) {
|
||||
nv2a_reset(d); // Early reset to avoid changing any state during load
|
||||
nv2a_lock_fifo(d);
|
||||
qatomic_set(&d->pfifo.halt, true);
|
||||
nv2a_unlock_fifo(d);
|
||||
}
|
||||
}
|
||||
|
||||
static int nv2a_post_save(void *opaque)
|
||||
{
|
||||
NV2AState *d = opaque;
|
||||
qatomic_set(&d->pfifo.halt, false);
|
||||
nv2a_unlock_fifo(d);
|
||||
return 0;
|
||||
}
|
||||
|
@ -405,7 +428,8 @@ static int nv2a_pre_load(void *opaque)
|
|||
static int nv2a_post_load(void *opaque, int version_id)
|
||||
{
|
||||
NV2AState *d = opaque;
|
||||
d->pgraph.flush_pending = true;
|
||||
qatomic_set(&d->pfifo.halt, false);
|
||||
qatomic_set(&d->pgraph.flush_pending, true);
|
||||
nv2a_unlock_fifo(d);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -272,8 +272,6 @@ typedef struct PGRAPHState {
|
|||
int width;
|
||||
int height;
|
||||
} surface_binding_dim; // FIXME: Refactor
|
||||
bool downloads_pending;
|
||||
QemuEvent downloads_complete;
|
||||
|
||||
hwaddr dma_a, dma_b;
|
||||
Lru texture_cache;
|
||||
|
@ -368,9 +366,15 @@ typedef struct PGRAPHState {
|
|||
bool waiting_for_nop;
|
||||
bool waiting_for_flip;
|
||||
bool waiting_for_context_switch;
|
||||
bool downloads_pending;
|
||||
bool download_dirty_surfaces_pending;
|
||||
bool flush_pending;
|
||||
bool gl_sync_pending;
|
||||
QemuEvent downloads_complete;
|
||||
QemuEvent dirty_surfaces_download_complete;
|
||||
QemuEvent flush_complete;
|
||||
QemuEvent gl_sync_complete;
|
||||
|
||||
unsigned int surface_scale_factor;
|
||||
uint8_t *scale_buf;
|
||||
} PGRAPHState;
|
||||
|
@ -410,6 +414,7 @@ typedef struct NV2AState {
|
|||
QemuCond fifo_cond;
|
||||
QemuCond fifo_idle_cond;
|
||||
bool fifo_kick;
|
||||
bool halt;
|
||||
} pfifo;
|
||||
|
||||
struct {
|
||||
|
@ -511,6 +516,8 @@ int pgraph_method(NV2AState *d, unsigned int subchannel, unsigned int method,
|
|||
size_t num_words_available, size_t max_lookahead_words);
|
||||
void pgraph_gl_sync(NV2AState *d);
|
||||
void pgraph_process_pending_downloads(NV2AState *d);
|
||||
void pgraph_download_dirty_surfaces(NV2AState *d);
|
||||
void pgraph_flush(NV2AState *d);
|
||||
|
||||
void *pfifo_thread(void *arg);
|
||||
void pfifo_kick(NV2AState *d);
|
||||
|
|
|
@ -446,6 +446,31 @@ static void pfifo_run_pusher(NV2AState *d)
|
|||
}
|
||||
}
|
||||
|
||||
static void process_requests(NV2AState *d)
|
||||
{
|
||||
if (qatomic_read(&d->pgraph.downloads_pending) ||
|
||||
qatomic_read(&d->pgraph.download_dirty_surfaces_pending) ||
|
||||
qatomic_read(&d->pgraph.gl_sync_pending) ||
|
||||
qatomic_read(&d->pgraph.flush_pending)) {
|
||||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
qemu_mutex_lock(&d->pgraph.lock);
|
||||
if (qatomic_read(&d->pgraph.downloads_pending)) {
|
||||
pgraph_process_pending_downloads(d);
|
||||
}
|
||||
if (qatomic_read(&d->pgraph.download_dirty_surfaces_pending)) {
|
||||
pgraph_download_dirty_surfaces(d);
|
||||
}
|
||||
if (qatomic_read(&d->pgraph.gl_sync_pending)) {
|
||||
pgraph_gl_sync(d);
|
||||
}
|
||||
if (qatomic_read(&d->pgraph.flush_pending)) {
|
||||
pgraph_flush(d);
|
||||
}
|
||||
qemu_mutex_unlock(&d->pgraph.lock);
|
||||
qemu_mutex_lock(&d->pfifo.lock);
|
||||
}
|
||||
}
|
||||
|
||||
void *pfifo_thread(void *arg)
|
||||
{
|
||||
NV2AState *d = (NV2AState *)arg;
|
||||
|
@ -457,18 +482,12 @@ void *pfifo_thread(void *arg)
|
|||
while (true) {
|
||||
d->pfifo.fifo_kick = false;
|
||||
|
||||
if (qatomic_read(&d->pgraph.downloads_pending)) {
|
||||
pgraph_process_pending_downloads(d);
|
||||
qatomic_set(&d->pgraph.downloads_pending, false);
|
||||
}
|
||||
process_requests(d);
|
||||
|
||||
if (qatomic_read(&d->pgraph.gl_sync_pending)) {
|
||||
pgraph_gl_sync(d);
|
||||
qatomic_set(&d->pgraph.gl_sync_pending, false);
|
||||
if (!d->pfifo.halt) {
|
||||
pfifo_run_pusher(d);
|
||||
}
|
||||
|
||||
pfifo_run_pusher(d);
|
||||
|
||||
if (!d->pfifo.fifo_kick) {
|
||||
qemu_cond_broadcast(&d->pfifo.fifo_idle_cond);
|
||||
|
||||
|
|
|
@ -613,11 +613,13 @@ void pgraph_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size)
|
|||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
}
|
||||
|
||||
static void pgraph_flush(NV2AState *d)
|
||||
void pgraph_flush(NV2AState *d)
|
||||
{
|
||||
PGRAPHState *pg = &d->pgraph;
|
||||
|
||||
// Clear last surface shape to force recreation of buffers at next draw
|
||||
bool update_surface = (pg->color_binding || pg->zeta_binding);
|
||||
|
||||
/* Clear last surface shape to force recreation of buffers at next draw */
|
||||
pg->surface_color.draw_dirty = false;
|
||||
pg->surface_zeta.draw_dirty = false;
|
||||
memset(&pg->last_surface_shape, 0, sizeof(pg->last_surface_shape));
|
||||
|
@ -631,13 +633,20 @@ static void pgraph_flush(NV2AState *d)
|
|||
|
||||
pgraph_mark_textures_possibly_dirty(d, 0, memory_region_size(d->vram));
|
||||
|
||||
// Sync all RAM
|
||||
/* Sync all RAM */
|
||||
glBindBuffer(GL_ARRAY_BUFFER, d->pgraph.gl_memory_buffer);
|
||||
glBufferSubData(GL_ARRAY_BUFFER, 0, memory_region_size(d->vram), d->vram_ptr);
|
||||
|
||||
// FIXME: Flush more?
|
||||
/* FIXME: Flush more? */
|
||||
|
||||
pgraph_reload_surface_scale_factor(d);
|
||||
|
||||
if (update_surface) {
|
||||
pgraph_update_surface(d, true, true, true);
|
||||
}
|
||||
|
||||
qatomic_set(&d->pgraph.flush_pending, false);
|
||||
qemu_event_set(&d->pgraph.flush_complete);
|
||||
}
|
||||
|
||||
#define METHOD_ADDR(gclass, name) \
|
||||
|
@ -744,11 +753,6 @@ int pgraph_method(NV2AState *d, unsigned int subchannel,
|
|||
|
||||
PGRAPHState *pg = &d->pgraph;
|
||||
|
||||
if (pg->flush_pending) {
|
||||
pgraph_flush(d);
|
||||
pg->flush_pending = false;
|
||||
}
|
||||
|
||||
bool channel_valid =
|
||||
d->pgraph.regs[NV_PGRAPH_CTX_CONTROL] & NV_PGRAPH_CTX_CONTROL_CHID;
|
||||
assert(channel_valid);
|
||||
|
@ -3391,15 +3395,42 @@ void nv2a_gl_context_init(void)
|
|||
|
||||
void nv2a_set_surface_scale_factor(unsigned int scale)
|
||||
{
|
||||
PGRAPHState *pg = &g_nv2a->pgraph;
|
||||
NV2AState *d = g_nv2a;
|
||||
|
||||
xemu_settings_set_int(XEMU_SETTINGS_DISPLAY_RENDER_SCALE,
|
||||
scale < 1 ? 1 : scale);
|
||||
xemu_settings_save();
|
||||
|
||||
qemu_mutex_lock(&pg->lock);
|
||||
pg->flush_pending = true;
|
||||
qemu_mutex_unlock(&pg->lock);
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
qemu_mutex_lock(&d->pfifo.lock);
|
||||
qatomic_set(&d->pfifo.halt, true);
|
||||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
|
||||
qemu_mutex_lock(&d->pgraph.lock);
|
||||
qemu_event_reset(&d->pgraph.dirty_surfaces_download_complete);
|
||||
qatomic_set(&d->pgraph.download_dirty_surfaces_pending, true);
|
||||
qemu_mutex_unlock(&d->pgraph.lock);
|
||||
qemu_mutex_lock(&d->pfifo.lock);
|
||||
pfifo_kick(d);
|
||||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
qemu_event_wait(&d->pgraph.dirty_surfaces_download_complete);
|
||||
|
||||
qemu_mutex_lock(&d->pgraph.lock);
|
||||
qemu_event_reset(&d->pgraph.flush_complete);
|
||||
qatomic_set(&d->pgraph.flush_pending, true);
|
||||
qemu_mutex_unlock(&d->pgraph.lock);
|
||||
qemu_mutex_lock(&d->pfifo.lock);
|
||||
pfifo_kick(d);
|
||||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
qemu_event_wait(&d->pgraph.flush_complete);
|
||||
|
||||
qemu_mutex_lock(&d->pfifo.lock);
|
||||
qatomic_set(&d->pfifo.halt, false);
|
||||
pfifo_kick(d);
|
||||
qemu_mutex_unlock(&d->pfifo.lock);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
}
|
||||
|
||||
unsigned int nv2a_get_surface_scale_factor(void)
|
||||
|
@ -3430,6 +3461,8 @@ void pgraph_init(NV2AState *d)
|
|||
qemu_mutex_init(&pg->lock);
|
||||
qemu_event_init(&pg->gl_sync_complete, false);
|
||||
qemu_event_init(&pg->downloads_complete, false);
|
||||
qemu_event_init(&pg->dirty_surfaces_download_complete, false);
|
||||
qemu_event_init(&pg->flush_complete, false);
|
||||
|
||||
/* fire up opengl */
|
||||
glo_set_current(g_nv2a_context_render);
|
||||
|
@ -4630,6 +4663,7 @@ void pgraph_gl_sync(NV2AState *d)
|
|||
/* Switch back to original context */
|
||||
glo_set_current(g_nv2a_context_render);
|
||||
|
||||
qatomic_set(&d->pgraph.gl_sync_pending, false);
|
||||
qemu_event_set(&d->pgraph.gl_sync_complete);
|
||||
}
|
||||
|
||||
|
@ -5065,9 +5099,22 @@ void pgraph_process_pending_downloads(NV2AState *d)
|
|||
pgraph_download_surface_data(d, surface, false);
|
||||
}
|
||||
|
||||
qatomic_set(&d->pgraph.downloads_pending, false);
|
||||
qemu_event_set(&d->pgraph.downloads_complete);
|
||||
}
|
||||
|
||||
void pgraph_download_dirty_surfaces(NV2AState *d)
|
||||
{
|
||||
SurfaceBinding *surface;
|
||||
QTAILQ_FOREACH(surface, &d->pgraph.surfaces, entry) {
|
||||
pgraph_download_surface_data_if_dirty(d, surface);
|
||||
}
|
||||
|
||||
qatomic_set(&d->pgraph.download_dirty_surfaces_pending, false);
|
||||
qemu_event_set(&d->pgraph.dirty_surfaces_download_complete);
|
||||
}
|
||||
|
||||
|
||||
static void surface_copy_expand_row(uint8_t *out, uint8_t *in,
|
||||
unsigned int width,
|
||||
unsigned int bytes_per_pixel,
|
||||
|
|
Loading…
Reference in New Issue