srcu: Document __srcu_read_{,un}lock_fast() implicit RCU readers
This commit documents the implicit RCU readers that are implied by the this_cpu_inc() and atomic_long_inc() operations in __srcu_read_lock_fast() and __srcu_read_unlock_fast(). While in the area, fix the documentation of the memory pairing of atomic_long_inc() in __srcu_read_lock_fast(). [ paulmck: Apply Joel Fernandes feedback. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: <bpf@vger.kernel.org>
This commit is contained in:
parent
cacadb6303
commit
be975448a4
@ -232,9 +232,27 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
|
||||
* srcu_read_unlock_fast().
|
||||
*
|
||||
* Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
|
||||
* critical sections either because they disables interrupts, because they
|
||||
* are a single instruction, or because they are a read-modify-write atomic
|
||||
* operation, depending on the whims of the architecture.
|
||||
* critical sections either because they disables interrupts, because
|
||||
* they are a single instruction, or because they are read-modify-write
|
||||
* atomic operations, depending on the whims of the architecture.
|
||||
* This matters because the SRCU-fast grace-period mechanism uses either
|
||||
* synchronize_rcu() or synchronize_rcu_expedited(), that is, RCU,
|
||||
* *not* SRCU, in order to eliminate the need for the read-side smp_mb()
|
||||
* invocations that are used by srcu_read_lock() and srcu_read_unlock().
|
||||
* The __srcu_read_unlock_fast() function also relies on this same RCU
|
||||
* (again, *not* SRCU) trick to eliminate the need for smp_mb().
|
||||
*
|
||||
* The key point behind this RCU trick is that if any part of a given
|
||||
* RCU reader precedes the beginning of a given RCU grace period, then
|
||||
* the entirety of that RCU reader and everything preceding it happens
|
||||
* before the end of that same RCU grace period. Similarly, if any part
|
||||
* of a given RCU reader follows the end of a given RCU grace period,
|
||||
* then the entirety of that RCU reader and everything following it
|
||||
* happens after the beginning of that same RCU grace period. Therefore,
|
||||
* the operations labeled Y in __srcu_read_lock_fast() and those labeled Z
|
||||
* in __srcu_read_unlock_fast() are ordered against the corresponding SRCU
|
||||
* read-side critical section from the viewpoint of the SRCU grace period.
|
||||
* This is all the ordering that is required, hence no calls to smp_mb().
|
||||
*
|
||||
* This means that __srcu_read_lock_fast() is not all that fast
|
||||
* on architectures that support NMIs but do not supply NMI-safe
|
||||
@ -245,9 +263,9 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
|
||||
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
|
||||
this_cpu_inc(scp->srcu_locks.counter); /* Y */
|
||||
this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
|
||||
else
|
||||
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); /* Z */
|
||||
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
|
||||
barrier(); /* Avoid leaking the critical section. */
|
||||
return scp;
|
||||
}
|
||||
@ -258,23 +276,17 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
|
||||
* different CPU than that which was incremented by the corresponding
|
||||
* srcu_read_lock_fast(), but it must be within the same task.
|
||||
*
|
||||
* Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
|
||||
* critical sections either because they disables interrupts, because they
|
||||
* are a single instruction, or because they are a read-modify-write atomic
|
||||
* operation, depending on the whims of the architecture.
|
||||
*
|
||||
* This means that __srcu_read_unlock_fast() is not all that fast
|
||||
* on architectures that support NMIs but do not supply NMI-safe
|
||||
* implementations of this_cpu_inc().
|
||||
* Please see the __srcu_read_lock_fast() function's header comment for
|
||||
* information on implicit RCU readers and NMI safety.
|
||||
*/
|
||||
static inline void notrace
|
||||
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
|
||||
{
|
||||
barrier(); /* Avoid leaking the critical section. */
|
||||
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
|
||||
this_cpu_inc(scp->srcu_unlocks.counter); /* Z */
|
||||
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
|
||||
else
|
||||
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); /* Z */
|
||||
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
|
||||
}
|
||||
|
||||
void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
|
||||
|
||||
Loading…
Reference in New Issue
Block a user