From 6aa9e78d6e9878b8053c93a829ffd24d5d9ccb07 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 18 Jan 2022 13:54:19 -0800 Subject: [PATCH] FROMGIT: rcu: Allow expedited RCU grace periods on incoming CPUs Although it is usually safe to invoke synchronize_rcu_expedited() from a preemption-enabled CPU-hotplug notifier, if it is invoked from a notifier between CPUHP_AP_RCUTREE_ONLINE and CPUHP_AP_ACTIVE, its attempts to invoke a workqueue handler will hang due to RCU waiting on a CPU that the scheduler is not paying attention to. This commit therefore expands use of the existing workqueue-independent synchronize_rcu_expedited() from early boot to also include CPUs that are being hotplugged. Bug: 216238044 Link: https://lore.kernel.org/lkml/7359f994-8aaf-3cea-f5cf-c0d3929689d6@quicinc.com/ Reported-by: Mukesh Ojha Cc: Tejun Heo Signed-off-by: Paul E. McKenney (cherry picked from commit 710f460c395af6b81df1c81043308aaa60d5e25c https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git rcu/next) Change-Id: I3f81dee6deaf6a4504aec31e058785dc8cee6a3f Signed-off-by: Mukesh Ojha --- kernel/rcu/tree_exp.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 9fb0f0916374..cc5f51fa2b28 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -812,7 +812,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { - bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); + bool no_wq; struct rcu_exp_work rew; struct rcu_node *rnp; unsigned long s; @@ -837,9 +837,15 @@ void synchronize_rcu_expedited(void) if (exp_funnel_lock(s)) return; /* Someone else did our work for us. */ + /* Don't use workqueue during boot or from an incoming CPU. */ + preempt_disable(); + no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT || + !cpumask_test_cpu(smp_processor_id(), cpu_active_mask); + preempt_enable(); + /* Ensure that load happens before action based on it. */ - if (unlikely(boottime)) { - /* Direct call during scheduler init and early_initcalls(). */ + if (unlikely(no_wq)) { + /* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */ rcu_exp_sel_wait_wake(s); } else { /* Marshall arguments & schedule the expedited grace period. */ @@ -857,7 +863,7 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); - if (likely(!boottime)) + if (likely(!no_wq)) destroy_work_on_stack(&rew.rew_work); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);