root/kernel/rcu/tree_exp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. rcu_exp_gp_seq_start
  2. rcu_exp_gp_seq_endval
  3. rcu_exp_gp_seq_end
  4. rcu_exp_gp_seq_snap
  5. rcu_exp_gp_seq_done
  6. sync_exp_reset_tree_hotplug
  7. sync_exp_reset_tree
  8. sync_rcu_preempt_exp_done
  9. sync_rcu_preempt_exp_done_unlocked
  10. __rcu_report_exp_rnp
  11. rcu_report_exp_rnp
  12. rcu_report_exp_cpu_mult
  13. rcu_report_exp_rdp
  14. sync_exp_work_done
  15. exp_funnel_lock
  16. sync_rcu_exp_select_node_cpus
  17. sync_rcu_exp_select_cpus
  18. synchronize_sched_expedited_wait
  19. rcu_exp_wait_wake
  20. rcu_exp_sel_wait_wake
  21. wait_rcu_exp_gp
  22. rcu_exp_handler
  23. sync_sched_exp_online_cleanup
  24. rcu_print_task_exp_stall
  25. rcu_exp_need_qs
  26. rcu_exp_handler
  27. sync_sched_exp_online_cleanup
  28. rcu_print_task_exp_stall
  29. synchronize_rcu_expedited

   1 /* SPDX-License-Identifier: GPL-2.0+ */
   2 /*
   3  * RCU expedited grace periods
   4  *
   5  * Copyright IBM Corporation, 2016
   6  *
   7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8  */
   9 
  10 #include <linux/lockdep.h>
  11 
  12 static void rcu_exp_handler(void *unused);
  13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
  14 
  15 /*
  16  * Record the start of an expedited grace period.
  17  */
  18 static void rcu_exp_gp_seq_start(void)
  19 {
  20         rcu_seq_start(&rcu_state.expedited_sequence);
  21 }
  22 
  23 /*
  24  * Return then value that expedited-grace-period counter will have
  25  * at the end of the current grace period.
  26  */
  27 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
  28 {
  29         return rcu_seq_endval(&rcu_state.expedited_sequence);
  30 }
  31 
  32 /*
  33  * Record the end of an expedited grace period.
  34  */
  35 static void rcu_exp_gp_seq_end(void)
  36 {
  37         rcu_seq_end(&rcu_state.expedited_sequence);
  38         smp_mb(); /* Ensure that consecutive grace periods serialize. */
  39 }
  40 
  41 /*
  42  * Take a snapshot of the expedited-grace-period counter.
  43  */
  44 static unsigned long rcu_exp_gp_seq_snap(void)
  45 {
  46         unsigned long s;
  47 
  48         smp_mb(); /* Caller's modifications seen first by other CPUs. */
  49         s = rcu_seq_snap(&rcu_state.expedited_sequence);
  50         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
  51         return s;
  52 }
  53 
  54 /*
  55  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
  56  * if a full expedited grace period has elapsed since that snapshot
  57  * was taken.
  58  */
  59 static bool rcu_exp_gp_seq_done(unsigned long s)
  60 {
  61         return rcu_seq_done(&rcu_state.expedited_sequence, s);
  62 }
  63 
  64 /*
  65  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
  66  * recent CPU-online activity.  Note that these masks are not cleared
  67  * when CPUs go offline, so they reflect the union of all CPUs that have
  68  * ever been online.  This means that this function normally takes its
  69  * no-work-to-do fastpath.
  70  */
  71 static void sync_exp_reset_tree_hotplug(void)
  72 {
  73         bool done;
  74         unsigned long flags;
  75         unsigned long mask;
  76         unsigned long oldmask;
  77         int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
  78         struct rcu_node *rnp;
  79         struct rcu_node *rnp_up;
  80 
  81         /* If no new CPUs onlined since last time, nothing to do. */
  82         if (likely(ncpus == rcu_state.ncpus_snap))
  83                 return;
  84         rcu_state.ncpus_snap = ncpus;
  85 
  86         /*
  87          * Each pass through the following loop propagates newly onlined
  88          * CPUs for the current rcu_node structure up the rcu_node tree.
  89          */
  90         rcu_for_each_leaf_node(rnp) {
  91                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
  92                 if (rnp->expmaskinit == rnp->expmaskinitnext) {
  93                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  94                         continue;  /* No new CPUs, nothing to do. */
  95                 }
  96 
  97                 /* Update this node's mask, track old value for propagation. */
  98                 oldmask = rnp->expmaskinit;
  99                 rnp->expmaskinit = rnp->expmaskinitnext;
 100                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 101 
 102                 /* If was already nonzero, nothing to propagate. */
 103                 if (oldmask)
 104                         continue;
 105 
 106                 /* Propagate the new CPU up the tree. */
 107                 mask = rnp->grpmask;
 108                 rnp_up = rnp->parent;
 109                 done = false;
 110                 while (rnp_up) {
 111                         raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
 112                         if (rnp_up->expmaskinit)
 113                                 done = true;
 114                         rnp_up->expmaskinit |= mask;
 115                         raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
 116                         if (done)
 117                                 break;
 118                         mask = rnp_up->grpmask;
 119                         rnp_up = rnp_up->parent;
 120                 }
 121         }
 122 }
 123 
 124 /*
 125  * Reset the ->expmask values in the rcu_node tree in preparation for
 126  * a new expedited grace period.
 127  */
 128 static void __maybe_unused sync_exp_reset_tree(void)
 129 {
 130         unsigned long flags;
 131         struct rcu_node *rnp;
 132 
 133         sync_exp_reset_tree_hotplug();
 134         rcu_for_each_node_breadth_first(rnp) {
 135                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
 136                 WARN_ON_ONCE(rnp->expmask);
 137                 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
 138                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 139         }
 140 }
 141 
 142 /*
 143  * Return non-zero if there is no RCU expedited grace period in progress
 144  * for the specified rcu_node structure, in other words, if all CPUs and
 145  * tasks covered by the specified rcu_node structure have done their bit
 146  * for the current expedited grace period.  Works only for preemptible
 147  * RCU -- other RCU implementation use other means.
 148  *
 149  * Caller must hold the specificed rcu_node structure's ->lock
 150  */
 151 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 152 {
 153         raw_lockdep_assert_held_rcu_node(rnp);
 154 
 155         return rnp->exp_tasks == NULL &&
 156                READ_ONCE(rnp->expmask) == 0;
 157 }
 158 
 159 /*
 160  * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
 161  * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
 162  * itself
 163  */
 164 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
 165 {
 166         unsigned long flags;
 167         bool ret;
 168 
 169         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 170         ret = sync_rcu_preempt_exp_done(rnp);
 171         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 172 
 173         return ret;
 174 }
 175 
 176 
 177 /*
 178  * Report the exit from RCU read-side critical section for the last task
 179  * that queued itself during or before the current expedited preemptible-RCU
 180  * grace period.  This event is reported either to the rcu_node structure on
 181  * which the task was queued or to one of that rcu_node structure's ancestors,
 182  * recursively up the tree.  (Calm down, calm down, we do the recursion
 183  * iteratively!)
 184  *
 185  * Caller must hold the specified rcu_node structure's ->lock.
 186  */
 187 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 188                                  bool wake, unsigned long flags)
 189         __releases(rnp->lock)
 190 {
 191         unsigned long mask;
 192 
 193         for (;;) {
 194                 if (!sync_rcu_preempt_exp_done(rnp)) {
 195                         if (!rnp->expmask)
 196                                 rcu_initiate_boost(rnp, flags);
 197                         else
 198                                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 199                         break;
 200                 }
 201                 if (rnp->parent == NULL) {
 202                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 203                         if (wake) {
 204                                 smp_mb(); /* EGP done before wake_up(). */
 205                                 swake_up_one(&rcu_state.expedited_wq);
 206                         }
 207                         break;
 208                 }
 209                 mask = rnp->grpmask;
 210                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
 211                 rnp = rnp->parent;
 212                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
 213                 WARN_ON_ONCE(!(rnp->expmask & mask));
 214                 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
 215         }
 216 }
 217 
 218 /*
 219  * Report expedited quiescent state for specified node.  This is a
 220  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
 221  */
 222 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 223 {
 224         unsigned long flags;
 225 
 226         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 227         __rcu_report_exp_rnp(rnp, wake, flags);
 228 }
 229 
 230 /*
 231  * Report expedited quiescent state for multiple CPUs, all covered by the
 232  * specified leaf rcu_node structure.
 233  */
 234 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 235                                     unsigned long mask, bool wake)
 236 {
 237         unsigned long flags;
 238 
 239         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 240         if (!(rnp->expmask & mask)) {
 241                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 242                 return;
 243         }
 244         WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
 245         __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 246 }
 247 
 248 /*
 249  * Report expedited quiescent state for specified rcu_data (CPU).
 250  */
 251 static void rcu_report_exp_rdp(struct rcu_data *rdp)
 252 {
 253         WRITE_ONCE(rdp->exp_deferred_qs, false);
 254         rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 255 }
 256 
 257 /* Common code for work-done checking. */
 258 static bool sync_exp_work_done(unsigned long s)
 259 {
 260         if (rcu_exp_gp_seq_done(s)) {
 261                 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
 262                 smp_mb(); /* Ensure test happens before caller kfree(). */
 263                 return true;
 264         }
 265         return false;
 266 }
 267 
 268 /*
 269  * Funnel-lock acquisition for expedited grace periods.  Returns true
 270  * if some other task completed an expedited grace period that this task
 271  * can piggy-back on, and with no mutex held.  Otherwise, returns false
 272  * with the mutex held, indicating that the caller must actually do the
 273  * expedited grace period.
 274  */
 275 static bool exp_funnel_lock(unsigned long s)
 276 {
 277         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 278         struct rcu_node *rnp = rdp->mynode;
 279         struct rcu_node *rnp_root = rcu_get_root();
 280 
 281         /* Low-contention fastpath. */
 282         if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
 283             (rnp == rnp_root ||
 284              ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
 285             mutex_trylock(&rcu_state.exp_mutex))
 286                 goto fastpath;
 287 
 288         /*
 289          * Each pass through the following loop works its way up
 290          * the rcu_node tree, returning if others have done the work or
 291          * otherwise falls through to acquire ->exp_mutex.  The mapping
 292          * from CPU to rcu_node structure can be inexact, as it is just
 293          * promoting locality and is not strictly needed for correctness.
 294          */
 295         for (; rnp != NULL; rnp = rnp->parent) {
 296                 if (sync_exp_work_done(s))
 297                         return true;
 298 
 299                 /* Work not done, either wait here or go up. */
 300                 spin_lock(&rnp->exp_lock);
 301                 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
 302 
 303                         /* Someone else doing GP, so wait for them. */
 304                         spin_unlock(&rnp->exp_lock);
 305                         trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 306                                                   rnp->grplo, rnp->grphi,
 307                                                   TPS("wait"));
 308                         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 309                                    sync_exp_work_done(s));
 310                         return true;
 311                 }
 312                 rnp->exp_seq_rq = s; /* Followers can wait on us. */
 313                 spin_unlock(&rnp->exp_lock);
 314                 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 315                                           rnp->grplo, rnp->grphi, TPS("nxtlvl"));
 316         }
 317         mutex_lock(&rcu_state.exp_mutex);
 318 fastpath:
 319         if (sync_exp_work_done(s)) {
 320                 mutex_unlock(&rcu_state.exp_mutex);
 321                 return true;
 322         }
 323         rcu_exp_gp_seq_start();
 324         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
 325         return false;
 326 }
 327 
 328 /*
 329  * Select the CPUs within the specified rcu_node that the upcoming
 330  * expedited grace period needs to wait for.
 331  */
 332 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 333 {
 334         int cpu;
 335         unsigned long flags;
 336         unsigned long mask_ofl_test;
 337         unsigned long mask_ofl_ipi;
 338         int ret;
 339         struct rcu_exp_work *rewp =
 340                 container_of(wp, struct rcu_exp_work, rew_work);
 341         struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
 342 
 343         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 344 
 345         /* Each pass checks a CPU for identity, offline, and idle. */
 346         mask_ofl_test = 0;
 347         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
 348                 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
 349                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 350                 int snap;
 351 
 352                 if (raw_smp_processor_id() == cpu ||
 353                     !(rnp->qsmaskinitnext & mask)) {
 354                         mask_ofl_test |= mask;
 355                 } else {
 356                         snap = rcu_dynticks_snap(rdp);
 357                         if (rcu_dynticks_in_eqs(snap))
 358                                 mask_ofl_test |= mask;
 359                         else
 360                                 rdp->exp_dynticks_snap = snap;
 361                 }
 362         }
 363         mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
 364 
 365         /*
 366          * Need to wait for any blocked tasks as well.  Note that
 367          * additional blocking tasks will also block the expedited GP
 368          * until such time as the ->expmask bits are cleared.
 369          */
 370         if (rcu_preempt_has_tasks(rnp))
 371                 rnp->exp_tasks = rnp->blkd_tasks.next;
 372         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 373 
 374         /* IPI the remaining CPUs for expedited quiescent state. */
 375         for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
 376                 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
 377                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 378 
 379 retry_ipi:
 380                 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
 381                         mask_ofl_test |= mask;
 382                         continue;
 383                 }
 384                 if (get_cpu() == cpu) {
 385                         put_cpu();
 386                         continue;
 387                 }
 388                 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 389                 put_cpu();
 390                 if (!ret) {
 391                         mask_ofl_ipi &= ~mask;
 392                         continue;
 393                 }
 394                 /* Failed, raced with CPU hotplug operation. */
 395                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
 396                 if ((rnp->qsmaskinitnext & mask) &&
 397                     (rnp->expmask & mask)) {
 398                         /* Online, so delay for a bit and try again. */
 399                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 400                         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
 401                         schedule_timeout_uninterruptible(1);
 402                         goto retry_ipi;
 403                 }
 404                 /* CPU really is offline, so we can ignore it. */
 405                 if (!(rnp->expmask & mask))
 406                         mask_ofl_ipi &= ~mask;
 407                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 408         }
 409         /* Report quiescent states for those that went offline. */
 410         mask_ofl_test |= mask_ofl_ipi;
 411         if (mask_ofl_test)
 412                 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 413 }
 414 
 415 /*
 416  * Select the nodes that the upcoming expedited grace period needs
 417  * to wait for.
 418  */
 419 static void sync_rcu_exp_select_cpus(void)
 420 {
 421         int cpu;
 422         struct rcu_node *rnp;
 423 
 424         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
 425         sync_exp_reset_tree();
 426         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 427 
 428         /* Schedule work for each leaf rcu_node structure. */
 429         rcu_for_each_leaf_node(rnp) {
 430                 rnp->exp_need_flush = false;
 431                 if (!READ_ONCE(rnp->expmask))
 432                         continue; /* Avoid early boot non-existent wq. */
 433                 if (!READ_ONCE(rcu_par_gp_wq) ||
 434                     rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
 435                     rcu_is_last_leaf_node(rnp)) {
 436                         /* No workqueues yet or last leaf, do direct call. */
 437                         sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 438                         continue;
 439                 }
 440                 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
 441                 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
 442                 /* If all offline, queue the work on an unbound CPU. */
 443                 if (unlikely(cpu > rnp->grphi - rnp->grplo))
 444                         cpu = WORK_CPU_UNBOUND;
 445                 else
 446                         cpu += rnp->grplo;
 447                 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
 448                 rnp->exp_need_flush = true;
 449         }
 450 
 451         /* Wait for workqueue jobs (if any) to complete. */
 452         rcu_for_each_leaf_node(rnp)
 453                 if (rnp->exp_need_flush)
 454                         flush_work(&rnp->rew.rew_work);
 455 }
 456 
 457 static void synchronize_sched_expedited_wait(void)
 458 {
 459         int cpu;
 460         unsigned long jiffies_stall;
 461         unsigned long jiffies_start;
 462         unsigned long mask;
 463         int ndetected;
 464         struct rcu_node *rnp;
 465         struct rcu_node *rnp_root = rcu_get_root();
 466         int ret;
 467 
 468         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
 469         jiffies_stall = rcu_jiffies_till_stall_check();
 470         jiffies_start = jiffies;
 471 
 472         for (;;) {
 473                 ret = swait_event_timeout_exclusive(
 474                                 rcu_state.expedited_wq,
 475                                 sync_rcu_preempt_exp_done_unlocked(rnp_root),
 476                                 jiffies_stall);
 477                 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
 478                         return;
 479                 WARN_ON(ret < 0);  /* workqueues should not be signaled. */
 480                 if (rcu_cpu_stall_suppress)
 481                         continue;
 482                 panic_on_rcu_stall();
 483                 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
 484                        rcu_state.name);
 485                 ndetected = 0;
 486                 rcu_for_each_leaf_node(rnp) {
 487                         ndetected += rcu_print_task_exp_stall(rnp);
 488                         for_each_leaf_node_possible_cpu(rnp, cpu) {
 489                                 struct rcu_data *rdp;
 490 
 491                                 mask = leaf_node_cpu_bit(rnp, cpu);
 492                                 if (!(READ_ONCE(rnp->expmask) & mask))
 493                                         continue;
 494                                 ndetected++;
 495                                 rdp = per_cpu_ptr(&rcu_data, cpu);
 496                                 pr_cont(" %d-%c%c%c", cpu,
 497                                         "O."[!!cpu_online(cpu)],
 498                                         "o."[!!(rdp->grpmask & rnp->expmaskinit)],
 499                                         "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
 500                         }
 501                 }
 502                 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
 503                         jiffies - jiffies_start, rcu_state.expedited_sequence,
 504                         READ_ONCE(rnp_root->expmask),
 505                         ".T"[!!rnp_root->exp_tasks]);
 506                 if (ndetected) {
 507                         pr_err("blocking rcu_node structures:");
 508                         rcu_for_each_node_breadth_first(rnp) {
 509                                 if (rnp == rnp_root)
 510                                         continue; /* printed unconditionally */
 511                                 if (sync_rcu_preempt_exp_done_unlocked(rnp))
 512                                         continue;
 513                                 pr_cont(" l=%u:%d-%d:%#lx/%c",
 514                                         rnp->level, rnp->grplo, rnp->grphi,
 515                                         READ_ONCE(rnp->expmask),
 516                                         ".T"[!!rnp->exp_tasks]);
 517                         }
 518                         pr_cont("\n");
 519                 }
 520                 rcu_for_each_leaf_node(rnp) {
 521                         for_each_leaf_node_possible_cpu(rnp, cpu) {
 522                                 mask = leaf_node_cpu_bit(rnp, cpu);
 523                                 if (!(READ_ONCE(rnp->expmask) & mask))
 524                                         continue;
 525                                 dump_cpu_task(cpu);
 526                         }
 527                 }
 528                 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
 529         }
 530 }
 531 
 532 /*
 533  * Wait for the current expedited grace period to complete, and then
 534  * wake up everyone who piggybacked on the just-completed expedited
 535  * grace period.  Also update all the ->exp_seq_rq counters as needed
 536  * in order to avoid counter-wrap problems.
 537  */
 538 static void rcu_exp_wait_wake(unsigned long s)
 539 {
 540         struct rcu_node *rnp;
 541 
 542         synchronize_sched_expedited_wait();
 543 
 544         // Switch over to wakeup mode, allowing the next GP to proceed.
 545         // End the previous grace period only after acquiring the mutex
 546         // to ensure that only one GP runs concurrently with wakeups.
 547         mutex_lock(&rcu_state.exp_wake_mutex);
 548         rcu_exp_gp_seq_end();
 549         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 550 
 551         rcu_for_each_node_breadth_first(rnp) {
 552                 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 553                         spin_lock(&rnp->exp_lock);
 554                         /* Recheck, avoid hang in case someone just arrived. */
 555                         if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
 556                                 rnp->exp_seq_rq = s;
 557                         spin_unlock(&rnp->exp_lock);
 558                 }
 559                 smp_mb(); /* All above changes before wakeup. */
 560                 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
 561         }
 562         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
 563         mutex_unlock(&rcu_state.exp_wake_mutex);
 564 }
 565 
 566 /*
 567  * Common code to drive an expedited grace period forward, used by
 568  * workqueues and mid-boot-time tasks.
 569  */
 570 static void rcu_exp_sel_wait_wake(unsigned long s)
 571 {
 572         /* Initialize the rcu_node tree in preparation for the wait. */
 573         sync_rcu_exp_select_cpus();
 574 
 575         /* Wait and clean up, including waking everyone. */
 576         rcu_exp_wait_wake(s);
 577 }
 578 
 579 /*
 580  * Work-queue handler to drive an expedited grace period forward.
 581  */
 582 static void wait_rcu_exp_gp(struct work_struct *wp)
 583 {
 584         struct rcu_exp_work *rewp;
 585 
 586         rewp = container_of(wp, struct rcu_exp_work, rew_work);
 587         rcu_exp_sel_wait_wake(rewp->rew_s);
 588 }
 589 
 590 #ifdef CONFIG_PREEMPT_RCU
 591 
 592 /*
 593  * Remote handler for smp_call_function_single().  If there is an
 594  * RCU read-side critical section in effect, request that the
 595  * next rcu_read_unlock() record the quiescent state up the
 596  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
 597  * report the quiescent state.
 598  */
 599 static void rcu_exp_handler(void *unused)
 600 {
 601         unsigned long flags;
 602         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 603         struct rcu_node *rnp = rdp->mynode;
 604         struct task_struct *t = current;
 605 
 606         /*
 607          * First, the common case of not being in an RCU read-side
 608          * critical section.  If also enabled or idle, immediately
 609          * report the quiescent state, otherwise defer.
 610          */
 611         if (!t->rcu_read_lock_nesting) {
 612                 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 613                     rcu_dynticks_curr_cpu_in_eqs()) {
 614                         rcu_report_exp_rdp(rdp);
 615                 } else {
 616                         rdp->exp_deferred_qs = true;
 617                         set_tsk_need_resched(t);
 618                         set_preempt_need_resched();
 619                 }
 620                 return;
 621         }
 622 
 623         /*
 624          * Second, the less-common case of being in an RCU read-side
 625          * critical section.  In this case we can count on a future
 626          * rcu_read_unlock().  However, this rcu_read_unlock() might
 627          * execute on some other CPU, but in that case there will be
 628          * a future context switch.  Either way, if the expedited
 629          * grace period is still waiting on this CPU, set ->deferred_qs
 630          * so that the eventual quiescent state will be reported.
 631          * Note that there is a large group of race conditions that
 632          * can have caused this quiescent state to already have been
 633          * reported, so we really do need to check ->expmask.
 634          */
 635         if (t->rcu_read_lock_nesting > 0) {
 636                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
 637                 if (rnp->expmask & rdp->grpmask) {
 638                         rdp->exp_deferred_qs = true;
 639                         t->rcu_read_unlock_special.b.exp_hint = true;
 640                 }
 641                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 642                 return;
 643         }
 644 
 645         /*
 646          * The final and least likely case is where the interrupted
 647          * code was just about to or just finished exiting the RCU-preempt
 648          * read-side critical section, and no, we can't tell which.
 649          * So either way, set ->deferred_qs to flag later code that
 650          * a quiescent state is required.
 651          *
 652          * If the CPU is fully enabled (or if some buggy RCU-preempt
 653          * read-side critical section is being used from idle), just
 654          * invoke rcu_preempt_deferred_qs() to immediately report the
 655          * quiescent state.  We cannot use rcu_read_unlock_special()
 656          * because we are in an interrupt handler, which will cause that
 657          * function to take an early exit without doing anything.
 658          *
 659          * Otherwise, force a context switch after the CPU enables everything.
 660          */
 661         rdp->exp_deferred_qs = true;
 662         if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 663             WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
 664                 rcu_preempt_deferred_qs(t);
 665         } else {
 666                 set_tsk_need_resched(t);
 667                 set_preempt_need_resched();
 668         }
 669 }
 670 
 671 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
 672 static void sync_sched_exp_online_cleanup(int cpu)
 673 {
 674 }
 675 
 676 /*
 677  * Scan the current list of tasks blocked within RCU read-side critical
 678  * sections, printing out the tid of each that is blocking the current
 679  * expedited grace period.
 680  */
 681 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 682 {
 683         struct task_struct *t;
 684         int ndetected = 0;
 685 
 686         if (!rnp->exp_tasks)
 687                 return 0;
 688         t = list_entry(rnp->exp_tasks->prev,
 689                        struct task_struct, rcu_node_entry);
 690         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 691                 pr_cont(" P%d", t->pid);
 692                 ndetected++;
 693         }
 694         return ndetected;
 695 }
 696 
 697 #else /* #ifdef CONFIG_PREEMPT_RCU */
 698 
 699 /* Request an expedited quiescent state. */
 700 static void rcu_exp_need_qs(void)
 701 {
 702         __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
 703         /* Store .exp before .rcu_urgent_qs. */
 704         smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
 705         set_tsk_need_resched(current);
 706         set_preempt_need_resched();
 707 }
 708 
 709 /* Invoked on each online non-idle CPU for expedited quiescent state. */
 710 static void rcu_exp_handler(void *unused)
 711 {
 712         struct rcu_data *rdp;
 713         struct rcu_node *rnp;
 714 
 715         rdp = this_cpu_ptr(&rcu_data);
 716         rnp = rdp->mynode;
 717         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 718             __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 719                 return;
 720         if (rcu_is_cpu_rrupt_from_idle()) {
 721                 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 722                 return;
 723         }
 724         rcu_exp_need_qs();
 725 }
 726 
 727 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
 728 static void sync_sched_exp_online_cleanup(int cpu)
 729 {
 730         unsigned long flags;
 731         int my_cpu;
 732         struct rcu_data *rdp;
 733         int ret;
 734         struct rcu_node *rnp;
 735 
 736         rdp = per_cpu_ptr(&rcu_data, cpu);
 737         rnp = rdp->mynode;
 738         my_cpu = get_cpu();
 739         /* Quiescent state either not needed or already requested, leave. */
 740         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 741             __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
 742                 put_cpu();
 743                 return;
 744         }
 745         /* Quiescent state needed on current CPU, so set it up locally. */
 746         if (my_cpu == cpu) {
 747                 local_irq_save(flags);
 748                 rcu_exp_need_qs();
 749                 local_irq_restore(flags);
 750                 put_cpu();
 751                 return;
 752         }
 753         /* Quiescent state needed on some other CPU, send IPI. */
 754         ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 755         put_cpu();
 756         WARN_ON_ONCE(ret);
 757 }
 758 
 759 /*
 760  * Because preemptible RCU does not exist, we never have to check for
 761  * tasks blocked within RCU read-side critical sections that are
 762  * blocking the current expedited grace period.
 763  */
 764 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 765 {
 766         return 0;
 767 }
 768 
 769 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 770 
 771 /**
 772  * synchronize_rcu_expedited - Brute-force RCU grace period
 773  *
 774  * Wait for an RCU grace period, but expedite it.  The basic idea is to
 775  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
 776  * the CPU is in an RCU critical section, and if so, it sets a flag that
 777  * causes the outermost rcu_read_unlock() to report the quiescent state
 778  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
 779  * other hand, if the CPU is not in an RCU read-side critical section,
 780  * the IPI handler reports the quiescent state immediately.
 781  *
 782  * Although this is a great improvement over previous expedited
 783  * implementations, it is still unfriendly to real-time workloads, so is
 784  * thus not recommended for any sort of common-case code.  In fact, if
 785  * you are using synchronize_rcu_expedited() in a loop, please restructure
 786  * your code to batch your updates, and then Use a single synchronize_rcu()
 787  * instead.
 788  *
 789  * This has the same semantics as (but is more brutal than) synchronize_rcu().
 790  */
 791 void synchronize_rcu_expedited(void)
 792 {
 793         bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
 794         struct rcu_exp_work rew;
 795         struct rcu_node *rnp;
 796         unsigned long s;
 797 
 798         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 799                          lock_is_held(&rcu_lock_map) ||
 800                          lock_is_held(&rcu_sched_lock_map),
 801                          "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
 802 
 803         /* Is the state is such that the call is a grace period? */
 804         if (rcu_blocking_is_gp())
 805                 return;
 806 
 807         /* If expedited grace periods are prohibited, fall back to normal. */
 808         if (rcu_gp_is_normal()) {
 809                 wait_rcu_gp(call_rcu);
 810                 return;
 811         }
 812 
 813         /* Take a snapshot of the sequence number.  */
 814         s = rcu_exp_gp_seq_snap();
 815         if (exp_funnel_lock(s))
 816                 return;  /* Someone else did our work for us. */
 817 
 818         /* Ensure that load happens before action based on it. */
 819         if (unlikely(boottime)) {
 820                 /* Direct call during scheduler init and early_initcalls(). */
 821                 rcu_exp_sel_wait_wake(s);
 822         } else {
 823                 /* Marshall arguments & schedule the expedited grace period. */
 824                 rew.rew_s = s;
 825                 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
 826                 queue_work(rcu_gp_wq, &rew.rew_work);
 827         }
 828 
 829         /* Wait for expedited grace period to complete. */
 830         rnp = rcu_get_root();
 831         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 832                    sync_exp_work_done(s));
 833         smp_mb(); /* Workqueue actions happen before return. */
 834 
 835         /* Let the next expedited grace period start. */
 836         mutex_unlock(&rcu_state.exp_mutex);
 837 
 838         if (likely(!boottime))
 839                 destroy_work_on_stack(&rew.rew_work);
 840 }
 841 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);

/* [<][>][^][v][top][bottom][index][help] */