linux/kernel/rcu/tree_exp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * RCU expedited grace periods
   4 *
   5 * Copyright IBM Corporation, 2016
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 */
   9
  10#include <linux/lockdep.h>
  11
  12static void rcu_exp_handler(void *unused);
  13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
  14
  15/*
  16 * Record the start of an expedited grace period.
  17 */
  18static void rcu_exp_gp_seq_start(void)
  19{
  20        rcu_seq_start(&rcu_state.expedited_sequence);
  21}
  22
  23/*
  24 * Return the value that the expedited-grace-period counter will have
  25 * at the end of the current grace period.
  26 */
  27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
  28{
  29        return rcu_seq_endval(&rcu_state.expedited_sequence);
  30}
  31
  32/*
  33 * Record the end of an expedited grace period.
  34 */
  35static void rcu_exp_gp_seq_end(void)
  36{
  37        rcu_seq_end(&rcu_state.expedited_sequence);
  38        smp_mb(); /* Ensure that consecutive grace periods serialize. */
  39}
  40
  41/*
  42 * Take a snapshot of the expedited-grace-period counter, which is the
  43 * earliest value that will indicate that a full grace period has
  44 * elapsed since the current time.
  45 */
  46static unsigned long rcu_exp_gp_seq_snap(void)
  47{
  48        unsigned long s;
  49
  50        smp_mb(); /* Caller's modifications seen first by other CPUs. */
  51        s = rcu_seq_snap(&rcu_state.expedited_sequence);
  52        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
  53        return s;
  54}
  55
  56/*
  57 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
  58 * if a full expedited grace period has elapsed since that snapshot
  59 * was taken.
  60 */
  61static bool rcu_exp_gp_seq_done(unsigned long s)
  62{
  63        return rcu_seq_done(&rcu_state.expedited_sequence, s);
  64}
  65
  66/*
  67 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
  68 * recent CPU-online activity.  Note that these masks are not cleared
  69 * when CPUs go offline, so they reflect the union of all CPUs that have
  70 * ever been online.  This means that this function normally takes its
  71 * no-work-to-do fastpath.
  72 */
  73static void sync_exp_reset_tree_hotplug(void)
  74{
  75        bool done;
  76        unsigned long flags;
  77        unsigned long mask;
  78        unsigned long oldmask;
  79        int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
  80        struct rcu_node *rnp;
  81        struct rcu_node *rnp_up;
  82
  83        /* If no new CPUs onlined since last time, nothing to do. */
  84        if (likely(ncpus == rcu_state.ncpus_snap))
  85                return;
  86        rcu_state.ncpus_snap = ncpus;
  87
  88        /*
  89         * Each pass through the following loop propagates newly onlined
  90         * CPUs for the current rcu_node structure up the rcu_node tree.
  91         */
  92        rcu_for_each_leaf_node(rnp) {
  93                raw_spin_lock_irqsave_rcu_node(rnp, flags);
  94                if (rnp->expmaskinit == rnp->expmaskinitnext) {
  95                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  96                        continue;  /* No new CPUs, nothing to do. */
  97                }
  98
  99                /* Update this node's mask, track old value for propagation. */
 100                oldmask = rnp->expmaskinit;
 101                rnp->expmaskinit = rnp->expmaskinitnext;
 102                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 103
 104                /* If was already nonzero, nothing to propagate. */
 105                if (oldmask)
 106                        continue;
 107
 108                /* Propagate the new CPU up the tree. */
 109                mask = rnp->grpmask;
 110                rnp_up = rnp->parent;
 111                done = false;
 112                while (rnp_up) {
 113                        raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
 114                        if (rnp_up->expmaskinit)
 115                                done = true;
 116                        rnp_up->expmaskinit |= mask;
 117                        raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
 118                        if (done)
 119                                break;
 120                        mask = rnp_up->grpmask;
 121                        rnp_up = rnp_up->parent;
 122                }
 123        }
 124}
 125
 126/*
 127 * Reset the ->expmask values in the rcu_node tree in preparation for
 128 * a new expedited grace period.
 129 */
 130static void __maybe_unused sync_exp_reset_tree(void)
 131{
 132        unsigned long flags;
 133        struct rcu_node *rnp;
 134
 135        sync_exp_reset_tree_hotplug();
 136        rcu_for_each_node_breadth_first(rnp) {
 137                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 138                WARN_ON_ONCE(rnp->expmask);
 139                WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
 140                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 141        }
 142}
 143
 144/*
 145 * Return non-zero if there is no RCU expedited grace period in progress
 146 * for the specified rcu_node structure, in other words, if all CPUs and
 147 * tasks covered by the specified rcu_node structure have done their bit
 148 * for the current expedited grace period.
 149 */
 150static bool sync_rcu_exp_done(struct rcu_node *rnp)
 151{
 152        raw_lockdep_assert_held_rcu_node(rnp);
 153        return READ_ONCE(rnp->exp_tasks) == NULL &&
 154               READ_ONCE(rnp->expmask) == 0;
 155}
 156
 157/*
 158 * Like sync_rcu_exp_done(), but where the caller does not hold the
 159 * rcu_node's ->lock.
 160 */
 161static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
 162{
 163        unsigned long flags;
 164        bool ret;
 165
 166        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 167        ret = sync_rcu_exp_done(rnp);
 168        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 169
 170        return ret;
 171}
 172
 173
 174/*
 175 * Report the exit from RCU read-side critical section for the last task
 176 * that queued itself during or before the current expedited preemptible-RCU
 177 * grace period.  This event is reported either to the rcu_node structure on
 178 * which the task was queued or to one of that rcu_node structure's ancestors,
 179 * recursively up the tree.  (Calm down, calm down, we do the recursion
 180 * iteratively!)
 181 */
 182static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 183                                 bool wake, unsigned long flags)
 184        __releases(rnp->lock)
 185{
 186        unsigned long mask;
 187
 188        raw_lockdep_assert_held_rcu_node(rnp);
 189        for (;;) {
 190                if (!sync_rcu_exp_done(rnp)) {
 191                        if (!rnp->expmask)
 192                                rcu_initiate_boost(rnp, flags);
 193                        else
 194                                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 195                        break;
 196                }
 197                if (rnp->parent == NULL) {
 198                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 199                        if (wake) {
 200                                smp_mb(); /* EGP done before wake_up(). */
 201                                swake_up_one(&rcu_state.expedited_wq);
 202                        }
 203                        break;
 204                }
 205                mask = rnp->grpmask;
 206                raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
 207                rnp = rnp->parent;
 208                raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
 209                WARN_ON_ONCE(!(rnp->expmask & mask));
 210                WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
 211        }
 212}
 213
 214/*
 215 * Report expedited quiescent state for specified node.  This is a
 216 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
 217 */
 218static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 219{
 220        unsigned long flags;
 221
 222        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 223        __rcu_report_exp_rnp(rnp, wake, flags);
 224}
 225
 226/*
 227 * Report expedited quiescent state for multiple CPUs, all covered by the
 228 * specified leaf rcu_node structure.
 229 */
 230static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 231                                    unsigned long mask, bool wake)
 232{
 233        int cpu;
 234        unsigned long flags;
 235        struct rcu_data *rdp;
 236
 237        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 238        if (!(rnp->expmask & mask)) {
 239                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 240                return;
 241        }
 242        WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
 243        for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
 244                rdp = per_cpu_ptr(&rcu_data, cpu);
 245                if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
 246                        continue;
 247                rdp->rcu_forced_tick_exp = false;
 248                tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
 249        }
 250        __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 251}
 252
 253/*
 254 * Report expedited quiescent state for specified rcu_data (CPU).
 255 */
 256static void rcu_report_exp_rdp(struct rcu_data *rdp)
 257{
 258        WRITE_ONCE(rdp->exp_deferred_qs, false);
 259        rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 260}
 261
 262/* Common code for work-done checking. */
 263static bool sync_exp_work_done(unsigned long s)
 264{
 265        if (rcu_exp_gp_seq_done(s)) {
 266                trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
 267                smp_mb(); /* Ensure test happens before caller kfree(). */
 268                return true;
 269        }
 270        return false;
 271}
 272
 273/*
 274 * Funnel-lock acquisition for expedited grace periods.  Returns true
 275 * if some other task completed an expedited grace period that this task
 276 * can piggy-back on, and with no mutex held.  Otherwise, returns false
 277 * with the mutex held, indicating that the caller must actually do the
 278 * expedited grace period.
 279 */
 280static bool exp_funnel_lock(unsigned long s)
 281{
 282        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 283        struct rcu_node *rnp = rdp->mynode;
 284        struct rcu_node *rnp_root = rcu_get_root();
 285
 286        /* Low-contention fastpath. */
 287        if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
 288            (rnp == rnp_root ||
 289             ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
 290            mutex_trylock(&rcu_state.exp_mutex))
 291                goto fastpath;
 292
 293        /*
 294         * Each pass through the following loop works its way up
 295         * the rcu_node tree, returning if others have done the work or
 296         * otherwise falls through to acquire ->exp_mutex.  The mapping
 297         * from CPU to rcu_node structure can be inexact, as it is just
 298         * promoting locality and is not strictly needed for correctness.
 299         */
 300        for (; rnp != NULL; rnp = rnp->parent) {
 301                if (sync_exp_work_done(s))
 302                        return true;
 303
 304                /* Work not done, either wait here or go up. */
 305                spin_lock(&rnp->exp_lock);
 306                if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
 307
 308                        /* Someone else doing GP, so wait for them. */
 309                        spin_unlock(&rnp->exp_lock);
 310                        trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 311                                                  rnp->grplo, rnp->grphi,
 312                                                  TPS("wait"));
 313                        wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 314                                   sync_exp_work_done(s));
 315                        return true;
 316                }
 317                WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
 318                spin_unlock(&rnp->exp_lock);
 319                trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 320                                          rnp->grplo, rnp->grphi, TPS("nxtlvl"));
 321        }
 322        mutex_lock(&rcu_state.exp_mutex);
 323fastpath:
 324        if (sync_exp_work_done(s)) {
 325                mutex_unlock(&rcu_state.exp_mutex);
 326                return true;
 327        }
 328        rcu_exp_gp_seq_start();
 329        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
 330        return false;
 331}
 332
 333/*
 334 * Select the CPUs within the specified rcu_node that the upcoming
 335 * expedited grace period needs to wait for.
 336 */
 337static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 338{
 339        int cpu;
 340        unsigned long flags;
 341        unsigned long mask_ofl_test;
 342        unsigned long mask_ofl_ipi;
 343        int ret;
 344        struct rcu_exp_work *rewp =
 345                container_of(wp, struct rcu_exp_work, rew_work);
 346        struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
 347
 348        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 349
 350        /* Each pass checks a CPU for identity, offline, and idle. */
 351        mask_ofl_test = 0;
 352        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
 353                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 354                unsigned long mask = rdp->grpmask;
 355                int snap;
 356
 357                if (raw_smp_processor_id() == cpu ||
 358                    !(rnp->qsmaskinitnext & mask)) {
 359                        mask_ofl_test |= mask;
 360                } else {
 361                        snap = rcu_dynticks_snap(rdp);
 362                        if (rcu_dynticks_in_eqs(snap))
 363                                mask_ofl_test |= mask;
 364                        else
 365                                rdp->exp_dynticks_snap = snap;
 366                }
 367        }
 368        mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
 369
 370        /*
 371         * Need to wait for any blocked tasks as well.  Note that
 372         * additional blocking tasks will also block the expedited GP
 373         * until such time as the ->expmask bits are cleared.
 374         */
 375        if (rcu_preempt_has_tasks(rnp))
 376                WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
 377        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 378
 379        /* IPI the remaining CPUs for expedited quiescent state. */
 380        for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
 381                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 382                unsigned long mask = rdp->grpmask;
 383
 384retry_ipi:
 385                if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
 386                        mask_ofl_test |= mask;
 387                        continue;
 388                }
 389                if (get_cpu() == cpu) {
 390                        put_cpu();
 391                        continue;
 392                }
 393                ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 394                put_cpu();
 395                /* The CPU will report the QS in response to the IPI. */
 396                if (!ret)
 397                        continue;
 398
 399                /* Failed, raced with CPU hotplug operation. */
 400                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 401                if ((rnp->qsmaskinitnext & mask) &&
 402                    (rnp->expmask & mask)) {
 403                        /* Online, so delay for a bit and try again. */
 404                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 405                        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
 406                        schedule_timeout_idle(1);
 407                        goto retry_ipi;
 408                }
 409                /* CPU really is offline, so we must report its QS. */
 410                if (rnp->expmask & mask)
 411                        mask_ofl_test |= mask;
 412                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 413        }
 414        /* Report quiescent states for those that went offline. */
 415        if (mask_ofl_test)
 416                rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 417}
 418
 419/*
 420 * Select the nodes that the upcoming expedited grace period needs
 421 * to wait for.
 422 */
 423static void sync_rcu_exp_select_cpus(void)
 424{
 425        int cpu;
 426        struct rcu_node *rnp;
 427
 428        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
 429        sync_exp_reset_tree();
 430        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 431
 432        /* Schedule work for each leaf rcu_node structure. */
 433        rcu_for_each_leaf_node(rnp) {
 434                rnp->exp_need_flush = false;
 435                if (!READ_ONCE(rnp->expmask))
 436                        continue; /* Avoid early boot non-existent wq. */
 437                if (!READ_ONCE(rcu_par_gp_wq) ||
 438                    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
 439                    rcu_is_last_leaf_node(rnp)) {
 440                        /* No workqueues yet or last leaf, do direct call. */
 441                        sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 442                        continue;
 443                }
 444                INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
 445                cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
 446                /* If all offline, queue the work on an unbound CPU. */
 447                if (unlikely(cpu > rnp->grphi - rnp->grplo))
 448                        cpu = WORK_CPU_UNBOUND;
 449                else
 450                        cpu += rnp->grplo;
 451                queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
 452                rnp->exp_need_flush = true;
 453        }
 454
 455        /* Wait for workqueue jobs (if any) to complete. */
 456        rcu_for_each_leaf_node(rnp)
 457                if (rnp->exp_need_flush)
 458                        flush_work(&rnp->rew.rew_work);
 459}
 460
 461/*
 462 * Wait for the expedited grace period to elapse, within time limit.
 463 * If the time limit is exceeded without the grace period elapsing,
 464 * return false, otherwise return true.
 465 */
 466static bool synchronize_rcu_expedited_wait_once(long tlimit)
 467{
 468        int t;
 469        struct rcu_node *rnp_root = rcu_get_root();
 470
 471        t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
 472                                          sync_rcu_exp_done_unlocked(rnp_root),
 473                                          tlimit);
 474        // Workqueues should not be signaled.
 475        if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
 476                return true;
 477        WARN_ON(t < 0);  /* workqueues should not be signaled. */
 478        return false;
 479}
 480
 481/*
 482 * Wait for the expedited grace period to elapse, issuing any needed
 483 * RCU CPU stall warnings along the way.
 484 */
 485static void synchronize_rcu_expedited_wait(void)
 486{
 487        int cpu;
 488        unsigned long j;
 489        unsigned long jiffies_stall;
 490        unsigned long jiffies_start;
 491        unsigned long mask;
 492        int ndetected;
 493        struct rcu_data *rdp;
 494        struct rcu_node *rnp;
 495        struct rcu_node *rnp_root = rcu_get_root();
 496
 497        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
 498        jiffies_stall = rcu_jiffies_till_stall_check();
 499        jiffies_start = jiffies;
 500        if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
 501                if (synchronize_rcu_expedited_wait_once(1))
 502                        return;
 503                rcu_for_each_leaf_node(rnp) {
 504                        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
 505                                rdp = per_cpu_ptr(&rcu_data, cpu);
 506                                if (rdp->rcu_forced_tick_exp)
 507                                        continue;
 508                                rdp->rcu_forced_tick_exp = true;
 509                                tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
 510                        }
 511                }
 512                j = READ_ONCE(jiffies_till_first_fqs);
 513                if (synchronize_rcu_expedited_wait_once(j + HZ))
 514                        return;
 515                WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
 516        }
 517
 518        for (;;) {
 519                if (synchronize_rcu_expedited_wait_once(jiffies_stall))
 520                        return;
 521                if (rcu_stall_is_suppressed())
 522                        continue;
 523                panic_on_rcu_stall();
 524                trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
 525                pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
 526                       rcu_state.name);
 527                ndetected = 0;
 528                rcu_for_each_leaf_node(rnp) {
 529                        ndetected += rcu_print_task_exp_stall(rnp);
 530                        for_each_leaf_node_possible_cpu(rnp, cpu) {
 531                                struct rcu_data *rdp;
 532
 533                                mask = leaf_node_cpu_bit(rnp, cpu);
 534                                if (!(READ_ONCE(rnp->expmask) & mask))
 535                                        continue;
 536                                ndetected++;
 537                                rdp = per_cpu_ptr(&rcu_data, cpu);
 538                                pr_cont(" %d-%c%c%c", cpu,
 539                                        "O."[!!cpu_online(cpu)],
 540                                        "o."[!!(rdp->grpmask & rnp->expmaskinit)],
 541                                        "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
 542                        }
 543                }
 544                pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
 545                        jiffies - jiffies_start, rcu_state.expedited_sequence,
 546                        data_race(rnp_root->expmask),
 547                        ".T"[!!data_race(rnp_root->exp_tasks)]);
 548                if (ndetected) {
 549                        pr_err("blocking rcu_node structures (internal RCU debug):");
 550                        rcu_for_each_node_breadth_first(rnp) {
 551                                if (rnp == rnp_root)
 552                                        continue; /* printed unconditionally */
 553                                if (sync_rcu_exp_done_unlocked(rnp))
 554                                        continue;
 555                                pr_cont(" l=%u:%d-%d:%#lx/%c",
 556                                        rnp->level, rnp->grplo, rnp->grphi,
 557                                        data_race(rnp->expmask),
 558                                        ".T"[!!data_race(rnp->exp_tasks)]);
 559                        }
 560                        pr_cont("\n");
 561                }
 562                rcu_for_each_leaf_node(rnp) {
 563                        for_each_leaf_node_possible_cpu(rnp, cpu) {
 564                                mask = leaf_node_cpu_bit(rnp, cpu);
 565                                if (!(READ_ONCE(rnp->expmask) & mask))
 566                                        continue;
 567                                dump_cpu_task(cpu);
 568                        }
 569                }
 570                jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
 571        }
 572}
 573
 574/*
 575 * Wait for the current expedited grace period to complete, and then
 576 * wake up everyone who piggybacked on the just-completed expedited
 577 * grace period.  Also update all the ->exp_seq_rq counters as needed
 578 * in order to avoid counter-wrap problems.
 579 */
 580static void rcu_exp_wait_wake(unsigned long s)
 581{
 582        struct rcu_node *rnp;
 583
 584        synchronize_rcu_expedited_wait();
 585
 586        // Switch over to wakeup mode, allowing the next GP to proceed.
 587        // End the previous grace period only after acquiring the mutex
 588        // to ensure that only one GP runs concurrently with wakeups.
 589        mutex_lock(&rcu_state.exp_wake_mutex);
 590        rcu_exp_gp_seq_end();
 591        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 592
 593        rcu_for_each_node_breadth_first(rnp) {
 594                if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 595                        spin_lock(&rnp->exp_lock);
 596                        /* Recheck, avoid hang in case someone just arrived. */
 597                        if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
 598                                WRITE_ONCE(rnp->exp_seq_rq, s);
 599                        spin_unlock(&rnp->exp_lock);
 600                }
 601                smp_mb(); /* All above changes before wakeup. */
 602                wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
 603        }
 604        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
 605        mutex_unlock(&rcu_state.exp_wake_mutex);
 606}
 607
 608/*
 609 * Common code to drive an expedited grace period forward, used by
 610 * workqueues and mid-boot-time tasks.
 611 */
 612static void rcu_exp_sel_wait_wake(unsigned long s)
 613{
 614        /* Initialize the rcu_node tree in preparation for the wait. */
 615        sync_rcu_exp_select_cpus();
 616
 617        /* Wait and clean up, including waking everyone. */
 618        rcu_exp_wait_wake(s);
 619}
 620
 621/*
 622 * Work-queue handler to drive an expedited grace period forward.
 623 */
 624static void wait_rcu_exp_gp(struct work_struct *wp)
 625{
 626        struct rcu_exp_work *rewp;
 627
 628        rewp = container_of(wp, struct rcu_exp_work, rew_work);
 629        rcu_exp_sel_wait_wake(rewp->rew_s);
 630}
 631
 632#ifdef CONFIG_PREEMPT_RCU
 633
 634/*
 635 * Remote handler for smp_call_function_single().  If there is an
 636 * RCU read-side critical section in effect, request that the
 637 * next rcu_read_unlock() record the quiescent state up the
 638 * ->expmask fields in the rcu_node tree.  Otherwise, immediately
 639 * report the quiescent state.
 640 */
 641static void rcu_exp_handler(void *unused)
 642{
 643        int depth = rcu_preempt_depth();
 644        unsigned long flags;
 645        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 646        struct rcu_node *rnp = rdp->mynode;
 647        struct task_struct *t = current;
 648
 649        /*
 650         * First, the common case of not being in an RCU read-side
 651         * critical section.  If also enabled or idle, immediately
 652         * report the quiescent state, otherwise defer.
 653         */
 654        if (!depth) {
 655                if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 656                    rcu_dynticks_curr_cpu_in_eqs()) {
 657                        rcu_report_exp_rdp(rdp);
 658                } else {
 659                        rdp->exp_deferred_qs = true;
 660                        set_tsk_need_resched(t);
 661                        set_preempt_need_resched();
 662                }
 663                return;
 664        }
 665
 666        /*
 667         * Second, the less-common case of being in an RCU read-side
 668         * critical section.  In this case we can count on a future
 669         * rcu_read_unlock().  However, this rcu_read_unlock() might
 670         * execute on some other CPU, but in that case there will be
 671         * a future context switch.  Either way, if the expedited
 672         * grace period is still waiting on this CPU, set ->deferred_qs
 673         * so that the eventual quiescent state will be reported.
 674         * Note that there is a large group of race conditions that
 675         * can have caused this quiescent state to already have been
 676         * reported, so we really do need to check ->expmask.
 677         */
 678        if (depth > 0) {
 679                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 680                if (rnp->expmask & rdp->grpmask) {
 681                        rdp->exp_deferred_qs = true;
 682                        t->rcu_read_unlock_special.b.exp_hint = true;
 683                }
 684                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 685                return;
 686        }
 687
 688        // Finally, negative nesting depth should not happen.
 689        WARN_ON_ONCE(1);
 690}
 691
 692/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
 693static void sync_sched_exp_online_cleanup(int cpu)
 694{
 695}
 696
 697/*
 698 * Scan the current list of tasks blocked within RCU read-side critical
 699 * sections, printing out the tid of each that is blocking the current
 700 * expedited grace period.
 701 */
 702static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 703{
 704        unsigned long flags;
 705        int ndetected = 0;
 706        struct task_struct *t;
 707
 708        if (!READ_ONCE(rnp->exp_tasks))
 709                return 0;
 710        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 711        t = list_entry(rnp->exp_tasks->prev,
 712                       struct task_struct, rcu_node_entry);
 713        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 714                pr_cont(" P%d", t->pid);
 715                ndetected++;
 716        }
 717        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 718        return ndetected;
 719}
 720
 721#else /* #ifdef CONFIG_PREEMPT_RCU */
 722
 723/* Request an expedited quiescent state. */
 724static void rcu_exp_need_qs(void)
 725{
 726        __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
 727        /* Store .exp before .rcu_urgent_qs. */
 728        smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
 729        set_tsk_need_resched(current);
 730        set_preempt_need_resched();
 731}
 732
 733/* Invoked on each online non-idle CPU for expedited quiescent state. */
 734static void rcu_exp_handler(void *unused)
 735{
 736        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 737        struct rcu_node *rnp = rdp->mynode;
 738
 739        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 740            __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 741                return;
 742        if (rcu_is_cpu_rrupt_from_idle()) {
 743                rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 744                return;
 745        }
 746        rcu_exp_need_qs();
 747}
 748
 749/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
 750static void sync_sched_exp_online_cleanup(int cpu)
 751{
 752        unsigned long flags;
 753        int my_cpu;
 754        struct rcu_data *rdp;
 755        int ret;
 756        struct rcu_node *rnp;
 757
 758        rdp = per_cpu_ptr(&rcu_data, cpu);
 759        rnp = rdp->mynode;
 760        my_cpu = get_cpu();
 761        /* Quiescent state either not needed or already requested, leave. */
 762        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 763            __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
 764                put_cpu();
 765                return;
 766        }
 767        /* Quiescent state needed on current CPU, so set it up locally. */
 768        if (my_cpu == cpu) {
 769                local_irq_save(flags);
 770                rcu_exp_need_qs();
 771                local_irq_restore(flags);
 772                put_cpu();
 773                return;
 774        }
 775        /* Quiescent state needed on some other CPU, send IPI. */
 776        ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 777        put_cpu();
 778        WARN_ON_ONCE(ret);
 779}
 780
 781/*
 782 * Because preemptible RCU does not exist, we never have to check for
 783 * tasks blocked within RCU read-side critical sections that are
 784 * blocking the current expedited grace period.
 785 */
 786static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 787{
 788        return 0;
 789}
 790
 791#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 792
 793/**
 794 * synchronize_rcu_expedited - Brute-force RCU grace period
 795 *
 796 * Wait for an RCU grace period, but expedite it.  The basic idea is to
 797 * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
 798 * the CPU is in an RCU critical section, and if so, it sets a flag that
 799 * causes the outermost rcu_read_unlock() to report the quiescent state
 800 * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
 801 * other hand, if the CPU is not in an RCU read-side critical section,
 802 * the IPI handler reports the quiescent state immediately.
 803 *
 804 * Although this is a great improvement over previous expedited
 805 * implementations, it is still unfriendly to real-time workloads, so is
 806 * thus not recommended for any sort of common-case code.  In fact, if
 807 * you are using synchronize_rcu_expedited() in a loop, please restructure
 808 * your code to batch your updates, and then use a single synchronize_rcu()
 809 * instead.
 810 *
 811 * This has the same semantics as (but is more brutal than) synchronize_rcu().
 812 */
 813void synchronize_rcu_expedited(void)
 814{
 815        bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
 816        struct rcu_exp_work rew;
 817        struct rcu_node *rnp;
 818        unsigned long s;
 819
 820        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 821                         lock_is_held(&rcu_lock_map) ||
 822                         lock_is_held(&rcu_sched_lock_map),
 823                         "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
 824
 825        /* Is the state is such that the call is a grace period? */
 826        if (rcu_blocking_is_gp())
 827                return;
 828
 829        /* If expedited grace periods are prohibited, fall back to normal. */
 830        if (rcu_gp_is_normal()) {
 831                wait_rcu_gp(call_rcu);
 832                return;
 833        }
 834
 835        /* Take a snapshot of the sequence number.  */
 836        s = rcu_exp_gp_seq_snap();
 837        if (exp_funnel_lock(s))
 838                return;  /* Someone else did our work for us. */
 839
 840        /* Ensure that load happens before action based on it. */
 841        if (unlikely(boottime)) {
 842                /* Direct call during scheduler init and early_initcalls(). */
 843                rcu_exp_sel_wait_wake(s);
 844        } else {
 845                /* Marshall arguments & schedule the expedited grace period. */
 846                rew.rew_s = s;
 847                INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
 848                queue_work(rcu_gp_wq, &rew.rew_work);
 849        }
 850
 851        /* Wait for expedited grace period to complete. */
 852        rnp = rcu_get_root();
 853        wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 854                   sync_exp_work_done(s));
 855        smp_mb(); /* Workqueue actions happen before return. */
 856
 857        /* Let the next expedited grace period start. */
 858        mutex_unlock(&rcu_state.exp_mutex);
 859
 860        if (likely(!boottime))
 861                destroy_work_on_stack(&rew.rew_work);
 862}
 863EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 864