mirror of https://github.com/xqemu/xqemu.git
rcu: make memory barriers more explicit
Prepare for introducing smp_mb_placeholder() and smp_mb_global(). The new smp_mb() in synchronize_rcu() is not strictly necessary, since the first atomic_mb_set for rcu_gp_ctr provides the required ordering. However, synchronize_rcu is not performance critical, and it *will* be necessary to introduce a smp_mb_global before calling wait_for_readers(). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
729c0ddd3c
commit
77a8b8462b
|
@ -79,7 +79,10 @@ static inline void rcu_read_lock(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr = atomic_read(&rcu_gp_ctr);
|
ctr = atomic_read(&rcu_gp_ctr);
|
||||||
atomic_xchg(&p_rcu_reader->ctr, ctr);
|
atomic_set(&p_rcu_reader->ctr, ctr);
|
||||||
|
|
||||||
|
/* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
|
||||||
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rcu_read_unlock(void)
|
static inline void rcu_read_unlock(void)
|
||||||
|
@ -91,7 +94,15 @@ static inline void rcu_read_unlock(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_xchg(&p_rcu_reader->ctr, 0);
|
/* Ensure that the critical section is seen to precede the
|
||||||
|
* store to p_rcu_reader->ctr. Together with the following
|
||||||
|
* smp_mb(), this ensures writes to p_rcu_reader->ctr
|
||||||
|
* are sequentially consistent.
|
||||||
|
*/
|
||||||
|
atomic_store_release(&p_rcu_reader->ctr, 0);
|
||||||
|
|
||||||
|
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
|
||||||
|
smp_mb();
|
||||||
if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
|
if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
|
||||||
atomic_set(&p_rcu_reader->waiting, false);
|
atomic_set(&p_rcu_reader->waiting, false);
|
||||||
qemu_event_set(&rcu_gp_event);
|
qemu_event_set(&rcu_gp_event);
|
||||||
|
|
12
util/rcu.c
12
util/rcu.c
|
@ -92,8 +92,9 @@ static void wait_for_readers(void)
|
||||||
atomic_set(&index->waiting, true);
|
atomic_set(&index->waiting, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Here, order the stores to index->waiting before the
|
/* Here, order the stores to index->waiting before the loads of
|
||||||
* loads of index->ctr.
|
* index->ctr. Pairs with smp_mb() in rcu_read_unlock(),
|
||||||
|
* ensuring that the loads of index->ctr are sequentially consistent.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
|
@ -142,8 +143,13 @@ static void wait_for_readers(void)
|
||||||
void synchronize_rcu(void)
|
void synchronize_rcu(void)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&rcu_sync_lock);
|
qemu_mutex_lock(&rcu_sync_lock);
|
||||||
qemu_mutex_lock(&rcu_registry_lock);
|
|
||||||
|
|
||||||
|
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
|
||||||
|
* Pairs with smp_mb() in rcu_read_lock().
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
qemu_mutex_lock(&rcu_registry_lock);
|
||||||
if (!QLIST_EMPTY(®istry)) {
|
if (!QLIST_EMPTY(®istry)) {
|
||||||
/* In either case, the atomic_mb_set below blocks stores that free
|
/* In either case, the atomic_mb_set below blocks stores that free
|
||||||
* old RCU-protected pointers.
|
* old RCU-protected pointers.
|
||||||
|
|
Loading…
Reference in New Issue