linux/kernel/rcu/tree_exp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * RCU expedited grace periods
   4 *
   5 * Copyright IBM Corporation, 2016
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 */
   9
  10#include <linux/lockdep.h>
  11
  12static void rcu_exp_handler(void *unused);
  13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
  14
  15/*
  16 * Record the start of an expedited grace period.
  17 */
  18static void rcu_exp_gp_seq_start(void)
  19{
  20        rcu_seq_start(&rcu_state.expedited_sequence);
  21}
  22
  23/*
  24 * Return then value that expedited-grace-period counter will have
  25 * at the end of the current grace period.
  26 */
  27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
  28{
  29        return rcu_seq_endval(&rcu_state.expedited_sequence);
  30}
  31
  32/*
  33 * Record the end of an expedited grace period.
  34 */
  35static void rcu_exp_gp_seq_end(void)
  36{
  37        rcu_seq_end(&rcu_state.expedited_sequence);
  38        smp_mb(); /* Ensure that consecutive grace periods serialize. */
  39}
  40
  41/*
  42 * Take a snapshot of the expedited-grace-period counter.
  43 */
  44static unsigned long rcu_exp_gp_seq_snap(void)
  45{
  46        unsigned long s;
  47
  48        smp_mb(); /* Caller's modifications seen first by other CPUs. */
  49        s = rcu_seq_snap(&rcu_state.expedited_sequence);
  50        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
  51        return s;
  52}
  53
  54/*
  55 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
  56 * if a full expedited grace period has elapsed since that snapshot
  57 * was taken.
  58 */
  59static bool rcu_exp_gp_seq_done(unsigned long s)
  60{
  61        return rcu_seq_done(&rcu_state.expedited_sequence, s);
  62}
  63
  64/*
  65 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
  66 * recent CPU-online activity.  Note that these masks are not cleared
  67 * when CPUs go offline, so they reflect the union of all CPUs that have
  68 * ever been online.  This means that this function normally takes its
  69 * no-work-to-do fastpath.
  70 */
  71static void sync_exp_reset_tree_hotplug(void)
  72{
  73        bool done;
  74        unsigned long flags;
  75        unsigned long mask;
  76        unsigned long oldmask;
  77        int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
  78        struct rcu_node *rnp;
  79        struct rcu_node *rnp_up;
  80
  81        /* If no new CPUs onlined since last time, nothing to do. */
  82        if (likely(ncpus == rcu_state.ncpus_snap))
  83                return;
  84        rcu_state.ncpus_snap = ncpus;
  85
  86        /*
  87         * Each pass through the following loop propagates newly onlined
  88         * CPUs for the current rcu_node structure up the rcu_node tree.
  89         */
  90        rcu_for_each_leaf_node(rnp) {
  91                raw_spin_lock_irqsave_rcu_node(rnp, flags);
  92                if (rnp->expmaskinit == rnp->expmaskinitnext) {
  93                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  94                        continue;  /* No new CPUs, nothing to do. */
  95                }
  96
  97                /* Update this node's mask, track old value for propagation. */
  98                oldmask = rnp->expmaskinit;
  99                rnp->expmaskinit = rnp->expmaskinitnext;
 100                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 101
 102                /* If was already nonzero, nothing to propagate. */
 103                if (oldmask)
 104                        continue;
 105
 106                /* Propagate the new CPU up the tree. */
 107                mask = rnp->grpmask;
 108                rnp_up = rnp->parent;
 109                done = false;
 110                while (rnp_up) {
 111                        raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
 112                        if (rnp_up->expmaskinit)
 113                                done = true;
 114                        rnp_up->expmaskinit |= mask;
 115                        raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
 116                        if (done)
 117                                break;
 118                        mask = rnp_up->grpmask;
 119                        rnp_up = rnp_up->parent;
 120                }
 121        }
 122}
 123
 124/*
 125 * Reset the ->expmask values in the rcu_node tree in preparation for
 126 * a new expedited grace period.
 127 */
 128static void __maybe_unused sync_exp_reset_tree(void)
 129{
 130        unsigned long flags;
 131        struct rcu_node *rnp;
 132
 133        sync_exp_reset_tree_hotplug();
 134        rcu_for_each_node_breadth_first(rnp) {
 135                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 136                WARN_ON_ONCE(rnp->expmask);
 137                rnp->expmask = rnp->expmaskinit;
 138                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 139        }
 140}
 141
 142/*
 143 * Return non-zero if there is no RCU expedited grace period in progress
 144 * for the specified rcu_node structure, in other words, if all CPUs and
 145 * tasks covered by the specified rcu_node structure have done their bit
 146 * for the current expedited grace period.  Works only for preemptible
 147 * RCU -- other RCU implementation use other means.
 148 *
 149 * Caller must hold the specificed rcu_node structure's ->lock
 150 */
 151static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 152{
 153        raw_lockdep_assert_held_rcu_node(rnp);
 154
 155        return rnp->exp_tasks == NULL &&
 156               READ_ONCE(rnp->expmask) == 0;
 157}
 158
 159/*
 160 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
 161 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
 162 * itself
 163 */
 164static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
 165{
 166        unsigned long flags;
 167        bool ret;
 168
 169        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 170        ret = sync_rcu_preempt_exp_done(rnp);
 171        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 172
 173        return ret;
 174}
 175
 176
 177/*
 178 * Report the exit from RCU read-side critical section for the last task
 179 * that queued itself during or before the current expedited preemptible-RCU
 180 * grace period.  This event is reported either to the rcu_node structure on
 181 * which the task was queued or to one of that rcu_node structure's ancestors,
 182 * recursively up the tree.  (Calm down, calm down, we do the recursion
 183 * iteratively!)
 184 *
 185 * Caller must hold the specified rcu_node structure's ->lock.
 186 */
 187static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 188                                 bool wake, unsigned long flags)
 189        __releases(rnp->lock)
 190{
 191        unsigned long mask;
 192
 193        for (;;) {
 194                if (!sync_rcu_preempt_exp_done(rnp)) {
 195                        if (!rnp->expmask)
 196                                rcu_initiate_boost(rnp, flags);
 197                        else
 198                                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 199                        break;
 200                }
 201                if (rnp->parent == NULL) {
 202                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 203                        if (wake) {
 204                                smp_mb(); /* EGP done before wake_up(). */
 205                                swake_up_one(&rcu_state.expedited_wq);
 206                        }
 207                        break;
 208                }
 209                mask = rnp->grpmask;
 210                raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
 211                rnp = rnp->parent;
 212                raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
 213                WARN_ON_ONCE(!(rnp->expmask & mask));
 214                rnp->expmask &= ~mask;
 215        }
 216}
 217
 218/*
 219 * Report expedited quiescent state for specified node.  This is a
 220 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
 221 */
 222static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 223{
 224        unsigned long flags;
 225
 226        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 227        __rcu_report_exp_rnp(rnp, wake, flags);
 228}
 229
 230/*
 231 * Report expedited quiescent state for multiple CPUs, all covered by the
 232 * specified leaf rcu_node structure.
 233 */
 234static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 235                                    unsigned long mask, bool wake)
 236{
 237        unsigned long flags;
 238
 239        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 240        if (!(rnp->expmask & mask)) {
 241                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 242                return;
 243        }
 244        rnp->expmask &= ~mask;
 245        __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 246}
 247
 248/*
 249 * Report expedited quiescent state for specified rcu_data (CPU).
 250 */
 251static void rcu_report_exp_rdp(struct rcu_data *rdp)
 252{
 253        WRITE_ONCE(rdp->deferred_qs, false);
 254        rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 255}
 256
 257/* Common code for work-done checking. */
 258static bool sync_exp_work_done(unsigned long s)
 259{
 260        if (rcu_exp_gp_seq_done(s)) {
 261                trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
 262                /* Ensure test happens before caller kfree(). */
 263                smp_mb__before_atomic(); /* ^^^ */
 264                return true;
 265        }
 266        return false;
 267}
 268
 269/*
 270 * Funnel-lock acquisition for expedited grace periods.  Returns true
 271 * if some other task completed an expedited grace period that this task
 272 * can piggy-back on, and with no mutex held.  Otherwise, returns false
 273 * with the mutex held, indicating that the caller must actually do the
 274 * expedited grace period.
 275 */
 276static bool exp_funnel_lock(unsigned long s)
 277{
 278        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 279        struct rcu_node *rnp = rdp->mynode;
 280        struct rcu_node *rnp_root = rcu_get_root();
 281
 282        /* Low-contention fastpath. */
 283        if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
 284            (rnp == rnp_root ||
 285             ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
 286            mutex_trylock(&rcu_state.exp_mutex))
 287                goto fastpath;
 288
 289        /*
 290         * Each pass through the following loop works its way up
 291         * the rcu_node tree, returning if others have done the work or
 292         * otherwise falls through to acquire ->exp_mutex.  The mapping
 293         * from CPU to rcu_node structure can be inexact, as it is just
 294         * promoting locality and is not strictly needed for correctness.
 295         */
 296        for (; rnp != NULL; rnp = rnp->parent) {
 297                if (sync_exp_work_done(s))
 298                        return true;
 299
 300                /* Work not done, either wait here or go up. */
 301                spin_lock(&rnp->exp_lock);
 302                if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
 303
 304                        /* Someone else doing GP, so wait for them. */
 305                        spin_unlock(&rnp->exp_lock);
 306                        trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 307                                                  rnp->grplo, rnp->grphi,
 308                                                  TPS("wait"));
 309                        wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 310                                   sync_exp_work_done(s));
 311                        return true;
 312                }
 313                rnp->exp_seq_rq = s; /* Followers can wait on us. */
 314                spin_unlock(&rnp->exp_lock);
 315                trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 316                                          rnp->grplo, rnp->grphi, TPS("nxtlvl"));
 317        }
 318        mutex_lock(&rcu_state.exp_mutex);
 319fastpath:
 320        if (sync_exp_work_done(s)) {
 321                mutex_unlock(&rcu_state.exp_mutex);
 322                return true;
 323        }
 324        rcu_exp_gp_seq_start();
 325        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
 326        return false;
 327}
 328
 329/*
 330 * Select the CPUs within the specified rcu_node that the upcoming
 331 * expedited grace period needs to wait for.
 332 */
 333static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 334{
 335        int cpu;
 336        unsigned long flags;
 337        unsigned long mask_ofl_test;
 338        unsigned long mask_ofl_ipi;
 339        int ret;
 340        struct rcu_exp_work *rewp =
 341                container_of(wp, struct rcu_exp_work, rew_work);
 342        struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
 343
 344        raw_spin_lock_irqsave_rcu_node(rnp, flags);
 345
 346        /* Each pass checks a CPU for identity, offline, and idle. */
 347        mask_ofl_test = 0;
 348        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
 349                unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
 350                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 351                int snap;
 352
 353                if (raw_smp_processor_id() == cpu ||
 354                    !(rnp->qsmaskinitnext & mask)) {
 355                        mask_ofl_test |= mask;
 356                } else {
 357                        snap = rcu_dynticks_snap(rdp);
 358                        if (rcu_dynticks_in_eqs(snap))
 359                                mask_ofl_test |= mask;
 360                        else
 361                                rdp->exp_dynticks_snap = snap;
 362                }
 363        }
 364        mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
 365
 366        /*
 367         * Need to wait for any blocked tasks as well.  Note that
 368         * additional blocking tasks will also block the expedited GP
 369         * until such time as the ->expmask bits are cleared.
 370         */
 371        if (rcu_preempt_has_tasks(rnp))
 372                rnp->exp_tasks = rnp->blkd_tasks.next;
 373        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 374
 375        /* IPI the remaining CPUs for expedited quiescent state. */
 376        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
 377                unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
 378                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 379
 380                if (!(mask_ofl_ipi & mask))
 381                        continue;
 382retry_ipi:
 383                if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
 384                        mask_ofl_test |= mask;
 385                        continue;
 386                }
 387                ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 388                if (!ret) {
 389                        mask_ofl_ipi &= ~mask;
 390                        continue;
 391                }
 392                /* Failed, raced with CPU hotplug operation. */
 393                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 394                if ((rnp->qsmaskinitnext & mask) &&
 395                    (rnp->expmask & mask)) {
 396                        /* Online, so delay for a bit and try again. */
 397                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 398                        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
 399                        schedule_timeout_uninterruptible(1);
 400                        goto retry_ipi;
 401                }
 402                /* CPU really is offline, so we can ignore it. */
 403                if (!(rnp->expmask & mask))
 404                        mask_ofl_ipi &= ~mask;
 405                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 406        }
 407        /* Report quiescent states for those that went offline. */
 408        mask_ofl_test |= mask_ofl_ipi;
 409        if (mask_ofl_test)
 410                rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 411}
 412
 413/*
 414 * Select the nodes that the upcoming expedited grace period needs
 415 * to wait for.
 416 */
 417static void sync_rcu_exp_select_cpus(void)
 418{
 419        int cpu;
 420        struct rcu_node *rnp;
 421
 422        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
 423        sync_exp_reset_tree();
 424        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 425
 426        /* Schedule work for each leaf rcu_node structure. */
 427        rcu_for_each_leaf_node(rnp) {
 428                rnp->exp_need_flush = false;
 429                if (!READ_ONCE(rnp->expmask))
 430                        continue; /* Avoid early boot non-existent wq. */
 431                if (!READ_ONCE(rcu_par_gp_wq) ||
 432                    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
 433                    rcu_is_last_leaf_node(rnp)) {
 434                        /* No workqueues yet or last leaf, do direct call. */
 435                        sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 436                        continue;
 437                }
 438                INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
 439                cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
 440                /* If all offline, queue the work on an unbound CPU. */
 441                if (unlikely(cpu > rnp->grphi - rnp->grplo))
 442                        cpu = WORK_CPU_UNBOUND;
 443                else
 444                        cpu += rnp->grplo;
 445                queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
 446                rnp->exp_need_flush = true;
 447        }
 448
 449        /* Wait for workqueue jobs (if any) to complete. */
 450        rcu_for_each_leaf_node(rnp)
 451                if (rnp->exp_need_flush)
 452                        flush_work(&rnp->rew.rew_work);
 453}
 454
 455static void synchronize_sched_expedited_wait(void)
 456{
 457        int cpu;
 458        unsigned long jiffies_stall;
 459        unsigned long jiffies_start;
 460        unsigned long mask;
 461        int ndetected;
 462        struct rcu_node *rnp;
 463        struct rcu_node *rnp_root = rcu_get_root();
 464        int ret;
 465
 466        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
 467        jiffies_stall = rcu_jiffies_till_stall_check();
 468        jiffies_start = jiffies;
 469
 470        for (;;) {
 471                ret = swait_event_timeout_exclusive(
 472                                rcu_state.expedited_wq,
 473                                sync_rcu_preempt_exp_done_unlocked(rnp_root),
 474                                jiffies_stall);
 475                if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
 476                        return;
 477                WARN_ON(ret < 0);  /* workqueues should not be signaled. */
 478                if (rcu_cpu_stall_suppress)
 479                        continue;
 480                panic_on_rcu_stall();
 481                pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
 482                       rcu_state.name);
 483                ndetected = 0;
 484                rcu_for_each_leaf_node(rnp) {
 485                        ndetected += rcu_print_task_exp_stall(rnp);
 486                        for_each_leaf_node_possible_cpu(rnp, cpu) {
 487                                struct rcu_data *rdp;
 488
 489                                mask = leaf_node_cpu_bit(rnp, cpu);
 490                                if (!(rnp->expmask & mask))
 491                                        continue;
 492                                ndetected++;
 493                                rdp = per_cpu_ptr(&rcu_data, cpu);
 494                                pr_cont(" %d-%c%c%c", cpu,
 495                                        "O."[!!cpu_online(cpu)],
 496                                        "o."[!!(rdp->grpmask & rnp->expmaskinit)],
 497                                        "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
 498                        }
 499                }
 500                pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
 501                        jiffies - jiffies_start, rcu_state.expedited_sequence,
 502                        rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
 503                if (ndetected) {
 504                        pr_err("blocking rcu_node structures:");
 505                        rcu_for_each_node_breadth_first(rnp) {
 506                                if (rnp == rnp_root)
 507                                        continue; /* printed unconditionally */
 508                                if (sync_rcu_preempt_exp_done_unlocked(rnp))
 509                                        continue;
 510                                pr_cont(" l=%u:%d-%d:%#lx/%c",
 511                                        rnp->level, rnp->grplo, rnp->grphi,
 512                                        rnp->expmask,
 513                                        ".T"[!!rnp->exp_tasks]);
 514                        }
 515                        pr_cont("\n");
 516                }
 517                rcu_for_each_leaf_node(rnp) {
 518                        for_each_leaf_node_possible_cpu(rnp, cpu) {
 519                                mask = leaf_node_cpu_bit(rnp, cpu);
 520                                if (!(rnp->expmask & mask))
 521                                        continue;
 522                                dump_cpu_task(cpu);
 523                        }
 524                }
 525                jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
 526        }
 527}
 528
 529/*
 530 * Wait for the current expedited grace period to complete, and then
 531 * wake up everyone who piggybacked on the just-completed expedited
 532 * grace period.  Also update all the ->exp_seq_rq counters as needed
 533 * in order to avoid counter-wrap problems.
 534 */
 535static void rcu_exp_wait_wake(unsigned long s)
 536{
 537        struct rcu_node *rnp;
 538
 539        synchronize_sched_expedited_wait();
 540        rcu_exp_gp_seq_end();
 541        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 542
 543        /*
 544         * Switch over to wakeup mode, allowing the next GP, but -only- the
 545         * next GP, to proceed.
 546         */
 547        mutex_lock(&rcu_state.exp_wake_mutex);
 548
 549        rcu_for_each_node_breadth_first(rnp) {
 550                if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 551                        spin_lock(&rnp->exp_lock);
 552                        /* Recheck, avoid hang in case someone just arrived. */
 553                        if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
 554                                rnp->exp_seq_rq = s;
 555                        spin_unlock(&rnp->exp_lock);
 556                }
 557                smp_mb(); /* All above changes before wakeup. */
 558                wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
 559        }
 560        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
 561        mutex_unlock(&rcu_state.exp_wake_mutex);
 562}
 563
 564/*
 565 * Common code to drive an expedited grace period forward, used by
 566 * workqueues and mid-boot-time tasks.
 567 */
 568static void rcu_exp_sel_wait_wake(unsigned long s)
 569{
 570        /* Initialize the rcu_node tree in preparation for the wait. */
 571        sync_rcu_exp_select_cpus();
 572
 573        /* Wait and clean up, including waking everyone. */
 574        rcu_exp_wait_wake(s);
 575}
 576
 577/*
 578 * Work-queue handler to drive an expedited grace period forward.
 579 */
 580static void wait_rcu_exp_gp(struct work_struct *wp)
 581{
 582        struct rcu_exp_work *rewp;
 583
 584        rewp = container_of(wp, struct rcu_exp_work, rew_work);
 585        rcu_exp_sel_wait_wake(rewp->rew_s);
 586}
 587
 588#ifdef CONFIG_PREEMPT_RCU
 589
 590/*
 591 * Remote handler for smp_call_function_single().  If there is an
 592 * RCU read-side critical section in effect, request that the
 593 * next rcu_read_unlock() record the quiescent state up the
 594 * ->expmask fields in the rcu_node tree.  Otherwise, immediately
 595 * report the quiescent state.
 596 */
 597static void rcu_exp_handler(void *unused)
 598{
 599        unsigned long flags;
 600        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 601        struct rcu_node *rnp = rdp->mynode;
 602        struct task_struct *t = current;
 603
 604        /*
 605         * First, the common case of not being in an RCU read-side
 606         * critical section.  If also enabled or idle, immediately
 607         * report the quiescent state, otherwise defer.
 608         */
 609        if (!t->rcu_read_lock_nesting) {
 610                if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 611                    rcu_dynticks_curr_cpu_in_eqs()) {
 612                        rcu_report_exp_rdp(rdp);
 613                } else {
 614                        rdp->deferred_qs = true;
 615                        set_tsk_need_resched(t);
 616                        set_preempt_need_resched();
 617                }
 618                return;
 619        }
 620
 621        /*
 622         * Second, the less-common case of being in an RCU read-side
 623         * critical section.  In this case we can count on a future
 624         * rcu_read_unlock().  However, this rcu_read_unlock() might
 625         * execute on some other CPU, but in that case there will be
 626         * a future context switch.  Either way, if the expedited
 627         * grace period is still waiting on this CPU, set ->deferred_qs
 628         * so that the eventual quiescent state will be reported.
 629         * Note that there is a large group of race conditions that
 630         * can have caused this quiescent state to already have been
 631         * reported, so we really do need to check ->expmask.
 632         */
 633        if (t->rcu_read_lock_nesting > 0) {
 634                raw_spin_lock_irqsave_rcu_node(rnp, flags);
 635                if (rnp->expmask & rdp->grpmask) {
 636                        rdp->deferred_qs = true;
 637                        t->rcu_read_unlock_special.b.exp_hint = true;
 638                }
 639                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 640                return;
 641        }
 642
 643        /*
 644         * The final and least likely case is where the interrupted
 645         * code was just about to or just finished exiting the RCU-preempt
 646         * read-side critical section, and no, we can't tell which.
 647         * So either way, set ->deferred_qs to flag later code that
 648         * a quiescent state is required.
 649         *
 650         * If the CPU is fully enabled (or if some buggy RCU-preempt
 651         * read-side critical section is being used from idle), just
 652         * invoke rcu_preempt_deferred_qs() to immediately report the
 653         * quiescent state.  We cannot use rcu_read_unlock_special()
 654         * because we are in an interrupt handler, which will cause that
 655         * function to take an early exit without doing anything.
 656         *
 657         * Otherwise, force a context switch after the CPU enables everything.
 658         */
 659        rdp->deferred_qs = true;
 660        if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 661            WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
 662                rcu_preempt_deferred_qs(t);
 663        } else {
 664                set_tsk_need_resched(t);
 665                set_preempt_need_resched();
 666        }
 667}
 668
 669/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
 670static void sync_sched_exp_online_cleanup(int cpu)
 671{
 672}
 673
 674/*
 675 * Scan the current list of tasks blocked within RCU read-side critical
 676 * sections, printing out the tid of each that is blocking the current
 677 * expedited grace period.
 678 */
 679static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 680{
 681        struct task_struct *t;
 682        int ndetected = 0;
 683
 684        if (!rnp->exp_tasks)
 685                return 0;
 686        t = list_entry(rnp->exp_tasks->prev,
 687                       struct task_struct, rcu_node_entry);
 688        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 689                pr_cont(" P%d", t->pid);
 690                ndetected++;
 691        }
 692        return ndetected;
 693}
 694
 695#else /* #ifdef CONFIG_PREEMPT_RCU */
 696
 697/* Invoked on each online non-idle CPU for expedited quiescent state. */
 698static void rcu_exp_handler(void *unused)
 699{
 700        struct rcu_data *rdp;
 701        struct rcu_node *rnp;
 702
 703        rdp = this_cpu_ptr(&rcu_data);
 704        rnp = rdp->mynode;
 705        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 706            __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 707                return;
 708        if (rcu_is_cpu_rrupt_from_idle()) {
 709                rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 710                return;
 711        }
 712        __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
 713        /* Store .exp before .rcu_urgent_qs. */
 714        smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
 715        set_tsk_need_resched(current);
 716        set_preempt_need_resched();
 717}
 718
 719/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
 720static void sync_sched_exp_online_cleanup(int cpu)
 721{
 722        struct rcu_data *rdp;
 723        int ret;
 724        struct rcu_node *rnp;
 725
 726        rdp = per_cpu_ptr(&rcu_data, cpu);
 727        rnp = rdp->mynode;
 728        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
 729                return;
 730        ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
 731        WARN_ON_ONCE(ret);
 732}
 733
 734/*
 735 * Because preemptible RCU does not exist, we never have to check for
 736 * tasks blocked within RCU read-side critical sections that are
 737 * blocking the current expedited grace period.
 738 */
 739static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 740{
 741        return 0;
 742}
 743
 744#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 745
 746/**
 747 * synchronize_rcu_expedited - Brute-force RCU grace period
 748 *
 749 * Wait for an RCU grace period, but expedite it.  The basic idea is to
 750 * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
 751 * the CPU is in an RCU critical section, and if so, it sets a flag that
 752 * causes the outermost rcu_read_unlock() to report the quiescent state
 753 * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
 754 * other hand, if the CPU is not in an RCU read-side critical section,
 755 * the IPI handler reports the quiescent state immediately.
 756 *
 757 * Although this is a greate improvement over previous expedited
 758 * implementations, it is still unfriendly to real-time workloads, so is
 759 * thus not recommended for any sort of common-case code.  In fact, if
 760 * you are using synchronize_rcu_expedited() in a loop, please restructure
 761 * your code to batch your updates, and then Use a single synchronize_rcu()
 762 * instead.
 763 *
 764 * This has the same semantics as (but is more brutal than) synchronize_rcu().
 765 */
 766void synchronize_rcu_expedited(void)
 767{
 768        struct rcu_data *rdp;
 769        struct rcu_exp_work rew;
 770        struct rcu_node *rnp;
 771        unsigned long s;
 772
 773        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 774                         lock_is_held(&rcu_lock_map) ||
 775                         lock_is_held(&rcu_sched_lock_map),
 776                         "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
 777
 778        /* Is the state is such that the call is a grace period? */
 779        if (rcu_blocking_is_gp())
 780                return;
 781
 782        /* If expedited grace periods are prohibited, fall back to normal. */
 783        if (rcu_gp_is_normal()) {
 784                wait_rcu_gp(call_rcu);
 785                return;
 786        }
 787
 788        /* Take a snapshot of the sequence number.  */
 789        s = rcu_exp_gp_seq_snap();
 790        if (exp_funnel_lock(s))
 791                return;  /* Someone else did our work for us. */
 792
 793        /* Ensure that load happens before action based on it. */
 794        if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
 795                /* Direct call during scheduler init and early_initcalls(). */
 796                rcu_exp_sel_wait_wake(s);
 797        } else {
 798                /* Marshall arguments & schedule the expedited grace period. */
 799                rew.rew_s = s;
 800                INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
 801                queue_work(rcu_gp_wq, &rew.rew_work);
 802        }
 803
 804        /* Wait for expedited grace period to complete. */
 805        rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 806        rnp = rcu_get_root();
 807        wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 808                   sync_exp_work_done(s));
 809        smp_mb(); /* Workqueue actions happen before return. */
 810
 811        /* Let the next expedited grace period start. */
 812        mutex_unlock(&rcu_state.exp_mutex);
 813}
 814EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 815