From 20f46806b3858b92e9d1b5cf586558d62bd5a913 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 3 Mar 2023 13:12:50 +0100 Subject: [PATCH] tb-maint: do not use mb_read/mb_set The load side can use a relaxed load, which will surely happen before the work item is run by async_safe_run_on_cpu() or before double-checking under mmap_lock. The store side can use an atomic RMW operation. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- accel/tcg/tb-maint.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index cb1f806f00..0dd173fbf0 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -746,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) tcg_region_reset_all(); /* XXX: flush processor icache at this point if cache flush is expensive */ - qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + qatomic_inc(&tb_ctx.tb_flush_count); done: mmap_unlock(); @@ -758,7 +758,7 @@ done: void tb_flush(CPUState *cpu) { if (tcg_enabled()) { - unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count); if (cpu_in_exclusive_context(cpu)) { do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));