mirror of https://github.com/xemu-project/xemu.git
tb-maint: do not use mb_read/mb_set
The load side can use a relaxed load, which will surely happen before the work item is run by async_safe_run_on_cpu() or before double-checking under mmap_lock. The store side can use an atomic RMW operation. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8f593ba9c5
commit
20f46806b3
|
@ -746,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
||||||
|
|
||||||
tcg_region_reset_all();
|
tcg_region_reset_all();
|
||||||
/* XXX: flush processor icache at this point if cache flush is expensive */
|
/* XXX: flush processor icache at this point if cache flush is expensive */
|
||||||
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
qatomic_inc(&tb_ctx.tb_flush_count);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
|
@ -758,7 +758,7 @@ done:
|
||||||
void tb_flush(CPUState *cpu)
|
void tb_flush(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (tcg_enabled()) {
|
if (tcg_enabled()) {
|
||||||
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
|
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
|
||||||
|
|
||||||
if (cpu_in_exclusive_context(cpu)) {
|
if (cpu_in_exclusive_context(cpu)) {
|
||||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||||
|
|
Loading…
Reference in New Issue