rcu: Add READ_ONCE() to rcu_do_batch() access to rcu_divisor
authorPaul E. McKenney <paulmck@kernel.org>
Wed, 24 Jun 2020 00:09:27 +0000 (17:09 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 25 Aug 2020 01:36:06 +0000 (18:36 -0700)
Given that sysfs can change the value of rcu_divisor at any time, this
commit adds a READ_ONCE to the sole access to that variable.  While in
the area, this commit also adds bounds checking, clamping the value to
a shift that makes sense for a signed long.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index acc926f..1dca14c 100644 (file)
@@ -2362,6 +2362,7 @@ int rcutree_dead_cpu(unsigned int cpu)
  */
 static void rcu_do_batch(struct rcu_data *rdp)
 {
+       int div;
        unsigned long flags;
        const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
                               rcu_segcblist_is_offloaded(&rdp->cblist);
@@ -2390,7 +2391,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
        rcu_nocb_lock(rdp);
        WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
        pending = rcu_segcblist_n_cbs(&rdp->cblist);
-       bl = max(rdp->blimit, pending >> rcu_divisor);
+       div = READ_ONCE(rcu_divisor);
+       div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
+       bl = max(rdp->blimit, pending >> div);
        if (unlikely(bl > 100))
                tlimit = local_clock() + rcu_resched_ns;
        trace_rcu_batch_start(rcu_state.name,