linux/kernel/rcu/tree_plugin.h
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   3 * Internal non-public definitions that provide either classic
   4 * or preemptible semantics.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, you can access it online at
  18 * http://www.gnu.org/licenses/gpl-2.0.html.
  19 *
  20 * Copyright Red Hat, 2009
  21 * Copyright IBM Corporation, 2009
  22 *
  23 * Author: Ingo Molnar <mingo@elte.hu>
  24 *         Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25 */
  26
  27#include <linux/delay.h>
  28#include <linux/gfp.h>
  29#include <linux/oom.h>
  30#include <linux/smpboot.h>
  31#include "../time/tick-internal.h"
  32
  33#ifdef CONFIG_RCU_BOOST
  34
  35#include "../locking/rtmutex_common.h"
  36
  37/* rcuc/rcub kthread realtime priority */
  38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
  39module_param(kthread_prio, int, 0644);
  40
  41/*
  42 * Control variables for per-CPU and per-rcu_node kthreads.  These
  43 * handle all flavors of RCU.
  44 */
  45static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
  46DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  47DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  48DEFINE_PER_CPU(char, rcu_cpu_has_work);
  49
  50#endif /* #ifdef CONFIG_RCU_BOOST */
  51
  52#ifdef CONFIG_RCU_NOCB_CPU
  53static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
  54static bool have_rcu_nocb_mask;     /* Was rcu_nocb_mask allocated? */
  55static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
  56static char __initdata nocb_buf[NR_CPUS * 5];
  57#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
  58
  59/*
  60 * Check the RCU kernel configuration parameters and print informative
  61 * messages about anything out of the ordinary.  If you like #ifdef, you
  62 * will love this function.
  63 */
  64static void __init rcu_bootup_announce_oddness(void)
  65{
  66#ifdef CONFIG_RCU_TRACE
  67        pr_info("\tRCU debugfs-based tracing is enabled.\n");
  68#endif
  69#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
  70        pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  71               CONFIG_RCU_FANOUT);
  72#endif
  73#ifdef CONFIG_RCU_FANOUT_EXACT
  74        pr_info("\tHierarchical RCU autobalancing is disabled.\n");
  75#endif
  76#ifdef CONFIG_RCU_FAST_NO_HZ
  77        pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  78#endif
  79#ifdef CONFIG_PROVE_RCU
  80        pr_info("\tRCU lockdep checking is enabled.\n");
  81#endif
  82#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
  83        pr_info("\tRCU torture testing starts during boot.\n");
  84#endif
  85#if defined(CONFIG_RCU_CPU_STALL_INFO)
  86        pr_info("\tAdditional per-CPU info printed with stalls.\n");
  87#endif
  88#if NUM_RCU_LVL_4 != 0
  89        pr_info("\tFour-level hierarchy is enabled.\n");
  90#endif
  91        if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
  92                pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
  93        if (nr_cpu_ids != NR_CPUS)
  94                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
  95#ifdef CONFIG_RCU_BOOST
  96        pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
  97#endif
  98}
  99
 100#ifdef CONFIG_PREEMPT_RCU
 101
 102RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 103static struct rcu_state *rcu_state_p = &rcu_preempt_state;
 104
 105static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 106
 107/*
 108 * Tell them what RCU they are running.
 109 */
 110static void __init rcu_bootup_announce(void)
 111{
 112        pr_info("Preemptible hierarchical RCU implementation.\n");
 113        rcu_bootup_announce_oddness();
 114}
 115
 116/*
 117 * Return the number of RCU-preempt batches processed thus far
 118 * for debug and statistics.
 119 */
 120static long rcu_batches_completed_preempt(void)
 121{
 122        return rcu_preempt_state.completed;
 123}
 124EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
 125
 126/*
 127 * Return the number of RCU batches processed thus far for debug & stats.
 128 */
 129long rcu_batches_completed(void)
 130{
 131        return rcu_batches_completed_preempt();
 132}
 133EXPORT_SYMBOL_GPL(rcu_batches_completed);
 134
 135/*
 136 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
 137 * that this just means that the task currently running on the CPU is
 138 * not in a quiescent state.  There might be any number of tasks blocked
 139 * while in an RCU read-side critical section.
 140 *
 141 * As with the other rcu_*_qs() functions, callers to this function
 142 * must disable preemption.
 143 */
 144static void rcu_preempt_qs(void)
 145{
 146        if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
 147                trace_rcu_grace_period(TPS("rcu_preempt"),
 148                                       __this_cpu_read(rcu_preempt_data.gpnum),
 149                                       TPS("cpuqs"));
 150                __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
 151                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
 152                current->rcu_read_unlock_special.b.need_qs = false;
 153        }
 154}
 155
 156/*
 157 * We have entered the scheduler, and the current task might soon be
 158 * context-switched away from.  If this task is in an RCU read-side
 159 * critical section, we will no longer be able to rely on the CPU to
 160 * record that fact, so we enqueue the task on the blkd_tasks list.
 161 * The task will dequeue itself when it exits the outermost enclosing
 162 * RCU read-side critical section.  Therefore, the current grace period
 163 * cannot be permitted to complete until the blkd_tasks list entries
 164 * predating the current grace period drain, in other words, until
 165 * rnp->gp_tasks becomes NULL.
 166 *
 167 * Caller must disable preemption.
 168 */
 169static void rcu_preempt_note_context_switch(void)
 170{
 171        struct task_struct *t = current;
 172        unsigned long flags;
 173        struct rcu_data *rdp;
 174        struct rcu_node *rnp;
 175
 176        if (t->rcu_read_lock_nesting > 0 &&
 177            !t->rcu_read_unlock_special.b.blocked) {
 178
 179                /* Possibly blocking in an RCU read-side critical section. */
 180                rdp = this_cpu_ptr(rcu_preempt_state.rda);
 181                rnp = rdp->mynode;
 182                raw_spin_lock_irqsave(&rnp->lock, flags);
 183                smp_mb__after_unlock_lock();
 184                t->rcu_read_unlock_special.b.blocked = true;
 185                t->rcu_blocked_node = rnp;
 186
 187                /*
 188                 * If this CPU has already checked in, then this task
 189                 * will hold up the next grace period rather than the
 190                 * current grace period.  Queue the task accordingly.
 191                 * If the task is queued for the current grace period
 192                 * (i.e., this CPU has not yet passed through a quiescent
 193                 * state for the current grace period), then as long
 194                 * as that task remains queued, the current grace period
 195                 * cannot end.  Note that there is some uncertainty as
 196                 * to exactly when the current grace period started.
 197                 * We take a conservative approach, which can result
 198                 * in unnecessarily waiting on tasks that started very
 199                 * slightly after the current grace period began.  C'est
 200                 * la vie!!!
 201                 *
 202                 * But first, note that the current CPU must still be
 203                 * on line!
 204                 */
 205                WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
 206                WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
 207                if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
 208                        list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
 209                        rnp->gp_tasks = &t->rcu_node_entry;
 210#ifdef CONFIG_RCU_BOOST
 211                        if (rnp->boost_tasks != NULL)
 212                                rnp->boost_tasks = rnp->gp_tasks;
 213#endif /* #ifdef CONFIG_RCU_BOOST */
 214                } else {
 215                        list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
 216                        if (rnp->qsmask & rdp->grpmask)
 217                                rnp->gp_tasks = &t->rcu_node_entry;
 218                }
 219                trace_rcu_preempt_task(rdp->rsp->name,
 220                                       t->pid,
 221                                       (rnp->qsmask & rdp->grpmask)
 222                                       ? rnp->gpnum
 223                                       : rnp->gpnum + 1);
 224                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 225        } else if (t->rcu_read_lock_nesting < 0 &&
 226                   t->rcu_read_unlock_special.s) {
 227
 228                /*
 229                 * Complete exit from RCU read-side critical section on
 230                 * behalf of preempted instance of __rcu_read_unlock().
 231                 */
 232                rcu_read_unlock_special(t);
 233        }
 234
 235        /*
 236         * Either we were not in an RCU read-side critical section to
 237         * begin with, or we have now recorded that critical section
 238         * globally.  Either way, we can now note a quiescent state
 239         * for this CPU.  Again, if we were in an RCU read-side critical
 240         * section, and if that critical section was blocking the current
 241         * grace period, then the fact that the task has been enqueued
 242         * means that we continue to block the current grace period.
 243         */
 244        rcu_preempt_qs();
 245}
 246
 247/*
 248 * Check for preempted RCU readers blocking the current grace period
 249 * for the specified rcu_node structure.  If the caller needs a reliable
 250 * answer, it must hold the rcu_node's ->lock.
 251 */
 252static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 253{
 254        return rnp->gp_tasks != NULL;
 255}
 256
 257/*
 258 * Record a quiescent state for all tasks that were previously queued
 259 * on the specified rcu_node structure and that were blocking the current
 260 * RCU grace period.  The caller must hold the specified rnp->lock with
 261 * irqs disabled, and this lock is released upon return, but irqs remain
 262 * disabled.
 263 */
 264static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 265        __releases(rnp->lock)
 266{
 267        unsigned long mask;
 268        struct rcu_node *rnp_p;
 269
 270        if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 271                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 272                return;  /* Still need more quiescent states! */
 273        }
 274
 275        rnp_p = rnp->parent;
 276        if (rnp_p == NULL) {
 277                /*
 278                 * Either there is only one rcu_node in the tree,
 279                 * or tasks were kicked up to root rcu_node due to
 280                 * CPUs going offline.
 281                 */
 282                rcu_report_qs_rsp(&rcu_preempt_state, flags);
 283                return;
 284        }
 285
 286        /* Report up the rest of the hierarchy. */
 287        mask = rnp->grpmask;
 288        raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
 289        raw_spin_lock(&rnp_p->lock);    /* irqs already disabled. */
 290        smp_mb__after_unlock_lock();
 291        rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
 292}
 293
 294/*
 295 * Advance a ->blkd_tasks-list pointer to the next entry, instead
 296 * returning NULL if at the end of the list.
 297 */
 298static struct list_head *rcu_next_node_entry(struct task_struct *t,
 299                                             struct rcu_node *rnp)
 300{
 301        struct list_head *np;
 302
 303        np = t->rcu_node_entry.next;
 304        if (np == &rnp->blkd_tasks)
 305                np = NULL;
 306        return np;
 307}
 308
 309/*
 310 * Handle special cases during rcu_read_unlock(), such as needing to
 311 * notify RCU core processing or task having blocked during the RCU
 312 * read-side critical section.
 313 */
 314void rcu_read_unlock_special(struct task_struct *t)
 315{
 316        int empty;
 317        int empty_exp;
 318        int empty_exp_now;
 319        unsigned long flags;
 320        struct list_head *np;
 321#ifdef CONFIG_RCU_BOOST
 322        bool drop_boost_mutex = false;
 323#endif /* #ifdef CONFIG_RCU_BOOST */
 324        struct rcu_node *rnp;
 325        union rcu_special special;
 326
 327        /* NMI handlers cannot block and cannot safely manipulate state. */
 328        if (in_nmi())
 329                return;
 330
 331        local_irq_save(flags);
 332
 333        /*
 334         * If RCU core is waiting for this CPU to exit critical section,
 335         * let it know that we have done so.  Because irqs are disabled,
 336         * t->rcu_read_unlock_special cannot change.
 337         */
 338        special = t->rcu_read_unlock_special;
 339        if (special.b.need_qs) {
 340                rcu_preempt_qs();
 341                if (!t->rcu_read_unlock_special.s) {
 342                        local_irq_restore(flags);
 343                        return;
 344                }
 345        }
 346
 347        /* Hardware IRQ handlers cannot block, complain if they get here. */
 348        if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
 349                local_irq_restore(flags);
 350                return;
 351        }
 352
 353        /* Clean up if blocked during RCU read-side critical section. */
 354        if (special.b.blocked) {
 355                t->rcu_read_unlock_special.b.blocked = false;
 356
 357                /*
 358                 * Remove this task from the list it blocked on.  The
 359                 * task can migrate while we acquire the lock, but at
 360                 * most one time.  So at most two passes through loop.
 361                 */
 362                for (;;) {
 363                        rnp = t->rcu_blocked_node;
 364                        raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
 365                        smp_mb__after_unlock_lock();
 366                        if (rnp == t->rcu_blocked_node)
 367                                break;
 368                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 369                }
 370                empty = !rcu_preempt_blocked_readers_cgp(rnp);
 371                empty_exp = !rcu_preempted_readers_exp(rnp);
 372                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
 373                np = rcu_next_node_entry(t, rnp);
 374                list_del_init(&t->rcu_node_entry);
 375                t->rcu_blocked_node = NULL;
 376                trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
 377                                                rnp->gpnum, t->pid);
 378                if (&t->rcu_node_entry == rnp->gp_tasks)
 379                        rnp->gp_tasks = np;
 380                if (&t->rcu_node_entry == rnp->exp_tasks)
 381                        rnp->exp_tasks = np;
 382#ifdef CONFIG_RCU_BOOST
 383                if (&t->rcu_node_entry == rnp->boost_tasks)
 384                        rnp->boost_tasks = np;
 385                /* Snapshot ->boost_mtx ownership with rcu_node lock held. */
 386                drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 387#endif /* #ifdef CONFIG_RCU_BOOST */
 388
 389                /*
 390                 * If this was the last task on the current list, and if
 391                 * we aren't waiting on any CPUs, report the quiescent state.
 392                 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
 393                 * so we must take a snapshot of the expedited state.
 394                 */
 395                empty_exp_now = !rcu_preempted_readers_exp(rnp);
 396                if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
 397                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
 398                                                         rnp->gpnum,
 399                                                         0, rnp->qsmask,
 400                                                         rnp->level,
 401                                                         rnp->grplo,
 402                                                         rnp->grphi,
 403                                                         !!rnp->gp_tasks);
 404                        rcu_report_unblock_qs_rnp(rnp, flags);
 405                } else {
 406                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 407                }
 408
 409#ifdef CONFIG_RCU_BOOST
 410                /* Unboost if we were boosted. */
 411                if (drop_boost_mutex) {
 412                        rt_mutex_unlock(&rnp->boost_mtx);
 413                        complete(&rnp->boost_completion);
 414                }
 415#endif /* #ifdef CONFIG_RCU_BOOST */
 416
 417                /*
 418                 * If this was the last task on the expedited lists,
 419                 * then we need to report up the rcu_node hierarchy.
 420                 */
 421                if (!empty_exp && empty_exp_now)
 422                        rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
 423        } else {
 424                local_irq_restore(flags);
 425        }
 426}
 427
 428/*
 429 * Dump detailed information for all tasks blocking the current RCU
 430 * grace period on the specified rcu_node structure.
 431 */
 432static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 433{
 434        unsigned long flags;
 435        struct task_struct *t;
 436
 437        raw_spin_lock_irqsave(&rnp->lock, flags);
 438        if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 439                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 440                return;
 441        }
 442        t = list_entry(rnp->gp_tasks,
 443                       struct task_struct, rcu_node_entry);
 444        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
 445                sched_show_task(t);
 446        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 447}
 448
 449/*
 450 * Dump detailed information for all tasks blocking the current RCU
 451 * grace period.
 452 */
 453static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 454{
 455        struct rcu_node *rnp = rcu_get_root(rsp);
 456
 457        rcu_print_detail_task_stall_rnp(rnp);
 458        rcu_for_each_leaf_node(rsp, rnp)
 459                rcu_print_detail_task_stall_rnp(rnp);
 460}
 461
 462#ifdef CONFIG_RCU_CPU_STALL_INFO
 463
 464static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 465{
 466        pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 467               rnp->level, rnp->grplo, rnp->grphi);
 468}
 469
 470static void rcu_print_task_stall_end(void)
 471{
 472        pr_cont("\n");
 473}
 474
 475#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
 476
 477static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 478{
 479}
 480
 481static void rcu_print_task_stall_end(void)
 482{
 483}
 484
 485#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
 486
 487/*
 488 * Scan the current list of tasks blocked within RCU read-side critical
 489 * sections, printing out the tid of each.
 490 */
 491static int rcu_print_task_stall(struct rcu_node *rnp)
 492{
 493        struct task_struct *t;
 494        int ndetected = 0;
 495
 496        if (!rcu_preempt_blocked_readers_cgp(rnp))
 497                return 0;
 498        rcu_print_task_stall_begin(rnp);
 499        t = list_entry(rnp->gp_tasks,
 500                       struct task_struct, rcu_node_entry);
 501        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 502                pr_cont(" P%d", t->pid);
 503                ndetected++;
 504        }
 505        rcu_print_task_stall_end();
 506        return ndetected;
 507}
 508
 509/*
 510 * Check that the list of blocked tasks for the newly completed grace
 511 * period is in fact empty.  It is a serious bug to complete a grace
 512 * period that still has RCU readers blocked!  This function must be
 513 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
 514 * must be held by the caller.
 515 *
 516 * Also, if there are blocked tasks on the list, they automatically
 517 * block the newly created grace period, so set up ->gp_tasks accordingly.
 518 */
 519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 520{
 521        WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 522        if (!list_empty(&rnp->blkd_tasks))
 523                rnp->gp_tasks = rnp->blkd_tasks.next;
 524        WARN_ON_ONCE(rnp->qsmask);
 525}
 526
 527#ifdef CONFIG_HOTPLUG_CPU
 528
 529/*
 530 * Handle tasklist migration for case in which all CPUs covered by the
 531 * specified rcu_node have gone offline.  Move them up to the root
 532 * rcu_node.  The reason for not just moving them to the immediate
 533 * parent is to remove the need for rcu_read_unlock_special() to
 534 * make more than two attempts to acquire the target rcu_node's lock.
 535 * Returns true if there were tasks blocking the current RCU grace
 536 * period.
 537 *
 538 * Returns 1 if there was previously a task blocking the current grace
 539 * period on the specified rcu_node structure.
 540 *
 541 * The caller must hold rnp->lock with irqs disabled.
 542 */
 543static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 544                                     struct rcu_node *rnp,
 545                                     struct rcu_data *rdp)
 546{
 547        struct list_head *lp;
 548        struct list_head *lp_root;
 549        int retval = 0;
 550        struct rcu_node *rnp_root = rcu_get_root(rsp);
 551        struct task_struct *t;
 552
 553        if (rnp == rnp_root) {
 554                WARN_ONCE(1, "Last CPU thought to be offlined?");
 555                return 0;  /* Shouldn't happen: at least one CPU online. */
 556        }
 557
 558        /* If we are on an internal node, complain bitterly. */
 559        WARN_ON_ONCE(rnp != rdp->mynode);
 560
 561        /*
 562         * Move tasks up to root rcu_node.  Don't try to get fancy for
 563         * this corner-case operation -- just put this node's tasks
 564         * at the head of the root node's list, and update the root node's
 565         * ->gp_tasks and ->exp_tasks pointers to those of this node's,
 566         * if non-NULL.  This might result in waiting for more tasks than
 567         * absolutely necessary, but this is a good performance/complexity
 568         * tradeoff.
 569         */
 570        if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
 571                retval |= RCU_OFL_TASKS_NORM_GP;
 572        if (rcu_preempted_readers_exp(rnp))
 573                retval |= RCU_OFL_TASKS_EXP_GP;
 574        lp = &rnp->blkd_tasks;
 575        lp_root = &rnp_root->blkd_tasks;
 576        while (!list_empty(lp)) {
 577                t = list_entry(lp->next, typeof(*t), rcu_node_entry);
 578                raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 579                smp_mb__after_unlock_lock();
 580                list_del(&t->rcu_node_entry);
 581                t->rcu_blocked_node = rnp_root;
 582                list_add(&t->rcu_node_entry, lp_root);
 583                if (&t->rcu_node_entry == rnp->gp_tasks)
 584                        rnp_root->gp_tasks = rnp->gp_tasks;
 585                if (&t->rcu_node_entry == rnp->exp_tasks)
 586                        rnp_root->exp_tasks = rnp->exp_tasks;
 587#ifdef CONFIG_RCU_BOOST
 588                if (&t->rcu_node_entry == rnp->boost_tasks)
 589                        rnp_root->boost_tasks = rnp->boost_tasks;
 590#endif /* #ifdef CONFIG_RCU_BOOST */
 591                raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 592        }
 593
 594        rnp->gp_tasks = NULL;
 595        rnp->exp_tasks = NULL;
 596#ifdef CONFIG_RCU_BOOST
 597        rnp->boost_tasks = NULL;
 598        /*
 599         * In case root is being boosted and leaf was not.  Make sure
 600         * that we boost the tasks blocking the current grace period
 601         * in this case.
 602         */
 603        raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 604        smp_mb__after_unlock_lock();
 605        if (rnp_root->boost_tasks != NULL &&
 606            rnp_root->boost_tasks != rnp_root->gp_tasks &&
 607            rnp_root->boost_tasks != rnp_root->exp_tasks)
 608                rnp_root->boost_tasks = rnp_root->gp_tasks;
 609        raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 610#endif /* #ifdef CONFIG_RCU_BOOST */
 611
 612        return retval;
 613}
 614
 615#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 616
 617/*
 618 * Check for a quiescent state from the current CPU.  When a task blocks,
 619 * the task is recorded in the corresponding CPU's rcu_node structure,
 620 * which is checked elsewhere.
 621 *
 622 * Caller must disable hard irqs.
 623 */
 624static void rcu_preempt_check_callbacks(void)
 625{
 626        struct task_struct *t = current;
 627
 628        if (t->rcu_read_lock_nesting == 0) {
 629                rcu_preempt_qs();
 630                return;
 631        }
 632        if (t->rcu_read_lock_nesting > 0 &&
 633            __this_cpu_read(rcu_preempt_data.qs_pending) &&
 634            !__this_cpu_read(rcu_preempt_data.passed_quiesce))
 635                t->rcu_read_unlock_special.b.need_qs = true;
 636}
 637
 638#ifdef CONFIG_RCU_BOOST
 639
 640static void rcu_preempt_do_callbacks(void)
 641{
 642        rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
 643}
 644
 645#endif /* #ifdef CONFIG_RCU_BOOST */
 646
 647/*
 648 * Queue a preemptible-RCU callback for invocation after a grace period.
 649 */
 650void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 651{
 652        __call_rcu(head, func, &rcu_preempt_state, -1, 0);
 653}
 654EXPORT_SYMBOL_GPL(call_rcu);
 655
 656/**
 657 * synchronize_rcu - wait until a grace period has elapsed.
 658 *
 659 * Control will return to the caller some time after a full grace
 660 * period has elapsed, in other words after all currently executing RCU
 661 * read-side critical sections have completed.  Note, however, that
 662 * upon return from synchronize_rcu(), the caller might well be executing
 663 * concurrently with new RCU read-side critical sections that began while
 664 * synchronize_rcu() was waiting.  RCU read-side critical sections are
 665 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
 666 *
 667 * See the description of synchronize_sched() for more detailed information
 668 * on memory ordering guarantees.
 669 */
 670void synchronize_rcu(void)
 671{
 672        rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
 673                           !lock_is_held(&rcu_lock_map) &&
 674                           !lock_is_held(&rcu_sched_lock_map),
 675                           "Illegal synchronize_rcu() in RCU read-side critical section");
 676        if (!rcu_scheduler_active)
 677                return;
 678        if (rcu_expedited)
 679                synchronize_rcu_expedited();
 680        else
 681                wait_rcu_gp(call_rcu);
 682}
 683EXPORT_SYMBOL_GPL(synchronize_rcu);
 684
 685static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
 686static unsigned long sync_rcu_preempt_exp_count;
 687static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
 688
 689/*
 690 * Return non-zero if there are any tasks in RCU read-side critical
 691 * sections blocking the current preemptible-RCU expedited grace period.
 692 * If there is no preemptible-RCU expedited grace period currently in
 693 * progress, returns zero unconditionally.
 694 */
 695static int rcu_preempted_readers_exp(struct rcu_node *rnp)
 696{
 697        return rnp->exp_tasks != NULL;
 698}
 699
 700/*
 701 * return non-zero if there is no RCU expedited grace period in progress
 702 * for the specified rcu_node structure, in other words, if all CPUs and
 703 * tasks covered by the specified rcu_node structure have done their bit
 704 * for the current expedited grace period.  Works only for preemptible
 705 * RCU -- other RCU implementation use other means.
 706 *
 707 * Caller must hold sync_rcu_preempt_exp_mutex.
 708 */
 709static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 710{
 711        return !rcu_preempted_readers_exp(rnp) &&
 712               ACCESS_ONCE(rnp->expmask) == 0;
 713}
 714
 715/*
 716 * Report the exit from RCU read-side critical section for the last task
 717 * that queued itself during or before the current expedited preemptible-RCU
 718 * grace period.  This event is reported either to the rcu_node structure on
 719 * which the task was queued or to one of that rcu_node structure's ancestors,
 720 * recursively up the tree.  (Calm down, calm down, we do the recursion
 721 * iteratively!)
 722 *
 723 * Most callers will set the "wake" flag, but the task initiating the
 724 * expedited grace period need not wake itself.
 725 *
 726 * Caller must hold sync_rcu_preempt_exp_mutex.
 727 */
 728static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 729                               bool wake)
 730{
 731        unsigned long flags;
 732        unsigned long mask;
 733
 734        raw_spin_lock_irqsave(&rnp->lock, flags);
 735        smp_mb__after_unlock_lock();
 736        for (;;) {
 737                if (!sync_rcu_preempt_exp_done(rnp)) {
 738                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 739                        break;
 740                }
 741                if (rnp->parent == NULL) {
 742                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 743                        if (wake) {
 744                                smp_mb(); /* EGP done before wake_up(). */
 745                                wake_up(&sync_rcu_preempt_exp_wq);
 746                        }
 747                        break;
 748                }
 749                mask = rnp->grpmask;
 750                raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
 751                rnp = rnp->parent;
 752                raw_spin_lock(&rnp->lock); /* irqs already disabled */
 753                smp_mb__after_unlock_lock();
 754                rnp->expmask &= ~mask;
 755        }
 756}
 757
 758/*
 759 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
 760 * grace period for the specified rcu_node structure.  If there are no such
 761 * tasks, report it up the rcu_node hierarchy.
 762 *
 763 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
 764 * CPU hotplug operations.
 765 */
 766static void
 767sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 768{
 769        unsigned long flags;
 770        int must_wait = 0;
 771
 772        raw_spin_lock_irqsave(&rnp->lock, flags);
 773        smp_mb__after_unlock_lock();
 774        if (list_empty(&rnp->blkd_tasks)) {
 775                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 776        } else {
 777                rnp->exp_tasks = rnp->blkd_tasks.next;
 778                rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
 779                must_wait = 1;
 780        }
 781        if (!must_wait)
 782                rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 783}
 784
 785/**
 786 * synchronize_rcu_expedited - Brute-force RCU grace period
 787 *
 788 * Wait for an RCU-preempt grace period, but expedite it.  The basic
 789 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
 790 * the ->blkd_tasks lists and wait for this list to drain.  This consumes
 791 * significant time on all CPUs and is unfriendly to real-time workloads,
 792 * so is thus not recommended for any sort of common-case code.
 793 * In fact, if you are using synchronize_rcu_expedited() in a loop,
 794 * please restructure your code to batch your updates, and then Use a
 795 * single synchronize_rcu() instead.
 796 */
 797void synchronize_rcu_expedited(void)
 798{
 799        unsigned long flags;
 800        struct rcu_node *rnp;
 801        struct rcu_state *rsp = &rcu_preempt_state;
 802        unsigned long snap;
 803        int trycount = 0;
 804
 805        smp_mb(); /* Caller's modifications seen first by other CPUs. */
 806        snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
 807        smp_mb(); /* Above access cannot bleed into critical section. */
 808
 809        /*
 810         * Block CPU-hotplug operations.  This means that any CPU-hotplug
 811         * operation that finds an rcu_node structure with tasks in the
 812         * process of being boosted will know that all tasks blocking
 813         * this expedited grace period will already be in the process of
 814         * being boosted.  This simplifies the process of moving tasks
 815         * from leaf to root rcu_node structures.
 816         */
 817        if (!try_get_online_cpus()) {
 818                /* CPU-hotplug operation in flight, fall back to normal GP. */
 819                wait_rcu_gp(call_rcu);
 820                return;
 821        }
 822
 823        /*
 824         * Acquire lock, falling back to synchronize_rcu() if too many
 825         * lock-acquisition failures.  Of course, if someone does the
 826         * expedited grace period for us, just leave.
 827         */
 828        while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
 829                if (ULONG_CMP_LT(snap,
 830                    ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
 831                        put_online_cpus();
 832                        goto mb_ret; /* Others did our work for us. */
 833                }
 834                if (trycount++ < 10) {
 835                        udelay(trycount * num_online_cpus());
 836                } else {
 837                        put_online_cpus();
 838                        wait_rcu_gp(call_rcu);
 839                        return;
 840                }
 841        }
 842        if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
 843                put_online_cpus();
 844                goto unlock_mb_ret; /* Others did our work for us. */
 845        }
 846
 847        /* force all RCU readers onto ->blkd_tasks lists. */
 848        synchronize_sched_expedited();
 849
 850        /* Initialize ->expmask for all non-leaf rcu_node structures. */
 851        rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
 852                raw_spin_lock_irqsave(&rnp->lock, flags);
 853                smp_mb__after_unlock_lock();
 854                rnp->expmask = rnp->qsmaskinit;
 855                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 856        }
 857
 858        /* Snapshot current state of ->blkd_tasks lists. */
 859        rcu_for_each_leaf_node(rsp, rnp)
 860                sync_rcu_preempt_exp_init(rsp, rnp);
 861        if (NUM_RCU_NODES > 1)
 862                sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
 863
 864        put_online_cpus();
 865
 866        /* Wait for snapshotted ->blkd_tasks lists to drain. */
 867        rnp = rcu_get_root(rsp);
 868        wait_event(sync_rcu_preempt_exp_wq,
 869                   sync_rcu_preempt_exp_done(rnp));
 870
 871        /* Clean up and exit. */
 872        smp_mb(); /* ensure expedited GP seen before counter increment. */
 873        ACCESS_ONCE(sync_rcu_preempt_exp_count) =
 874                                        sync_rcu_preempt_exp_count + 1;
 875unlock_mb_ret:
 876        mutex_unlock(&sync_rcu_preempt_exp_mutex);
 877mb_ret:
 878        smp_mb(); /* ensure subsequent action seen after grace period. */
 879}
 880EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 881
 882/**
 883 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 884 *
 885 * Note that this primitive does not necessarily wait for an RCU grace period
 886 * to complete.  For example, if there are no RCU callbacks queued anywhere
 887 * in the system, then rcu_barrier() is within its rights to return
 888 * immediately, without waiting for anything, much less an RCU grace period.
 889 */
 890void rcu_barrier(void)
 891{
 892        _rcu_barrier(&rcu_preempt_state);
 893}
 894EXPORT_SYMBOL_GPL(rcu_barrier);
 895
 896/*
 897 * Initialize preemptible RCU's state structures.
 898 */
 899static void __init __rcu_init_preempt(void)
 900{
 901        rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 902}
 903
 904/*
 905 * Check for a task exiting while in a preemptible-RCU read-side
 906 * critical section, clean up if so.  No need to issue warnings,
 907 * as debug_check_no_locks_held() already does this if lockdep
 908 * is enabled.
 909 */
 910void exit_rcu(void)
 911{
 912        struct task_struct *t = current;
 913
 914        if (likely(list_empty(&current->rcu_node_entry)))
 915                return;
 916        t->rcu_read_lock_nesting = 1;
 917        barrier();
 918        t->rcu_read_unlock_special.b.blocked = true;
 919        __rcu_read_unlock();
 920}
 921
 922#else /* #ifdef CONFIG_PREEMPT_RCU */
 923
 924static struct rcu_state *rcu_state_p = &rcu_sched_state;
 925
 926/*
 927 * Tell them what RCU they are running.
 928 */
 929static void __init rcu_bootup_announce(void)
 930{
 931        pr_info("Hierarchical RCU implementation.\n");
 932        rcu_bootup_announce_oddness();
 933}
 934
 935/*
 936 * Return the number of RCU batches processed thus far for debug & stats.
 937 */
 938long rcu_batches_completed(void)
 939{
 940        return rcu_batches_completed_sched();
 941}
 942EXPORT_SYMBOL_GPL(rcu_batches_completed);
 943
 944/*
 945 * Because preemptible RCU does not exist, we never have to check for
 946 * CPUs being in quiescent states.
 947 */
 948static void rcu_preempt_note_context_switch(void)
 949{
 950}
 951
 952/*
 953 * Because preemptible RCU does not exist, there are never any preempted
 954 * RCU readers.
 955 */
 956static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 957{
 958        return 0;
 959}
 960
 961#ifdef CONFIG_HOTPLUG_CPU
 962
 963/* Because preemptible RCU does not exist, no quieting of tasks. */
 964static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 965        __releases(rnp->lock)
 966{
 967        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 968}
 969
 970#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 971
 972/*
 973 * Because preemptible RCU does not exist, we never have to check for
 974 * tasks blocked within RCU read-side critical sections.
 975 */
 976static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 977{
 978}
 979
 980/*
 981 * Because preemptible RCU does not exist, we never have to check for
 982 * tasks blocked within RCU read-side critical sections.
 983 */
 984static int rcu_print_task_stall(struct rcu_node *rnp)
 985{
 986        return 0;
 987}
 988
 989/*
 990 * Because there is no preemptible RCU, there can be no readers blocked,
 991 * so there is no need to check for blocked tasks.  So check only for
 992 * bogus qsmask values.
 993 */
 994static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 995{
 996        WARN_ON_ONCE(rnp->qsmask);
 997}
 998
 999#ifdef CONFIG_HOTPLUG_CPU
1000
1001/*
1002 * Because preemptible RCU does not exist, it never needs to migrate
1003 * tasks that were blocked within RCU read-side critical sections, and
1004 * such non-existent tasks cannot possibly have been blocking the current
1005 * grace period.
1006 */
1007static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1008                                     struct rcu_node *rnp,
1009                                     struct rcu_data *rdp)
1010{
1011        return 0;
1012}
1013
1014#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1015
1016/*
1017 * Because preemptible RCU does not exist, it never has any callbacks
1018 * to check.
1019 */
1020static void rcu_preempt_check_callbacks(void)
1021{
1022}
1023
1024/*
1025 * Wait for an rcu-preempt grace period, but make it happen quickly.
1026 * But because preemptible RCU does not exist, map to rcu-sched.
1027 */
1028void synchronize_rcu_expedited(void)
1029{
1030        synchronize_sched_expedited();
1031}
1032EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1033
1034#ifdef CONFIG_HOTPLUG_CPU
1035
1036/*
1037 * Because preemptible RCU does not exist, there is never any need to
1038 * report on tasks preempted in RCU read-side critical sections during
1039 * expedited RCU grace periods.
1040 */
1041static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1042                               bool wake)
1043{
1044}
1045
1046#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1047
1048/*
1049 * Because preemptible RCU does not exist, rcu_barrier() is just
1050 * another name for rcu_barrier_sched().
1051 */
1052void rcu_barrier(void)
1053{
1054        rcu_barrier_sched();
1055}
1056EXPORT_SYMBOL_GPL(rcu_barrier);
1057
1058/*
1059 * Because preemptible RCU does not exist, it need not be initialized.
1060 */
1061static void __init __rcu_init_preempt(void)
1062{
1063}
1064
1065/*
1066 * Because preemptible RCU does not exist, tasks cannot possibly exit
1067 * while in preemptible RCU read-side critical sections.
1068 */
1069void exit_rcu(void)
1070{
1071}
1072
1073#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1074
1075#ifdef CONFIG_RCU_BOOST
1076
1077#include "../locking/rtmutex_common.h"
1078
1079#ifdef CONFIG_RCU_TRACE
1080
1081static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1082{
1083        if (list_empty(&rnp->blkd_tasks))
1084                rnp->n_balk_blkd_tasks++;
1085        else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1086                rnp->n_balk_exp_gp_tasks++;
1087        else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1088                rnp->n_balk_boost_tasks++;
1089        else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1090                rnp->n_balk_notblocked++;
1091        else if (rnp->gp_tasks != NULL &&
1092                 ULONG_CMP_LT(jiffies, rnp->boost_time))
1093                rnp->n_balk_notyet++;
1094        else
1095                rnp->n_balk_nos++;
1096}
1097
1098#else /* #ifdef CONFIG_RCU_TRACE */
1099
1100static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1101{
1102}
1103
1104#endif /* #else #ifdef CONFIG_RCU_TRACE */
1105
1106static void rcu_wake_cond(struct task_struct *t, int status)
1107{
1108        /*
1109         * If the thread is yielding, only wake it when this
1110         * is invoked from idle
1111         */
1112        if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1113                wake_up_process(t);
1114}
1115
1116/*
1117 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1118 * or ->boost_tasks, advancing the pointer to the next task in the
1119 * ->blkd_tasks list.
1120 *
1121 * Note that irqs must be enabled: boosting the task can block.
1122 * Returns 1 if there are more tasks needing to be boosted.
1123 */
1124static int rcu_boost(struct rcu_node *rnp)
1125{
1126        unsigned long flags;
1127        struct task_struct *t;
1128        struct list_head *tb;
1129
1130        if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1131                return 0;  /* Nothing left to boost. */
1132
1133        raw_spin_lock_irqsave(&rnp->lock, flags);
1134        smp_mb__after_unlock_lock();
1135
1136        /*
1137         * Recheck under the lock: all tasks in need of boosting
1138         * might exit their RCU read-side critical sections on their own.
1139         */
1140        if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1141                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1142                return 0;
1143        }
1144
1145        /*
1146         * Preferentially boost tasks blocking expedited grace periods.
1147         * This cannot starve the normal grace periods because a second
1148         * expedited grace period must boost all blocked tasks, including
1149         * those blocking the pre-existing normal grace period.
1150         */
1151        if (rnp->exp_tasks != NULL) {
1152                tb = rnp->exp_tasks;
1153                rnp->n_exp_boosts++;
1154        } else {
1155                tb = rnp->boost_tasks;
1156                rnp->n_normal_boosts++;
1157        }
1158        rnp->n_tasks_boosted++;
1159
1160        /*
1161         * We boost task t by manufacturing an rt_mutex that appears to
1162         * be held by task t.  We leave a pointer to that rt_mutex where
1163         * task t can find it, and task t will release the mutex when it
1164         * exits its outermost RCU read-side critical section.  Then
1165         * simply acquiring this artificial rt_mutex will boost task
1166         * t's priority.  (Thanks to tglx for suggesting this approach!)
1167         *
1168         * Note that task t must acquire rnp->lock to remove itself from
1169         * the ->blkd_tasks list, which it will do from exit() if from
1170         * nowhere else.  We therefore are guaranteed that task t will
1171         * stay around at least until we drop rnp->lock.  Note that
1172         * rnp->lock also resolves races between our priority boosting
1173         * and task t's exiting its outermost RCU read-side critical
1174         * section.
1175         */
1176        t = container_of(tb, struct task_struct, rcu_node_entry);
1177        rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1178        init_completion(&rnp->boost_completion);
1179        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180        /* Lock only for side effect: boosts task t's priority. */
1181        rt_mutex_lock(&rnp->boost_mtx);
1182        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1183
1184        /* Wait for boostee to be done w/boost_mtx before reinitializing. */
1185        wait_for_completion(&rnp->boost_completion);
1186
1187        return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1188               ACCESS_ONCE(rnp->boost_tasks) != NULL;
1189}
1190
1191/*
1192 * Priority-boosting kthread.  One per leaf rcu_node and one for the
1193 * root rcu_node.
1194 */
1195static int rcu_boost_kthread(void *arg)
1196{
1197        struct rcu_node *rnp = (struct rcu_node *)arg;
1198        int spincnt = 0;
1199        int more2boost;
1200
1201        trace_rcu_utilization(TPS("Start boost kthread@init"));
1202        for (;;) {
1203                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1204                trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1205                rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1206                trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1207                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1208                more2boost = rcu_boost(rnp);
1209                if (more2boost)
1210                        spincnt++;
1211                else
1212                        spincnt = 0;
1213                if (spincnt > 10) {
1214                        rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1215                        trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1216                        schedule_timeout_interruptible(2);
1217                        trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1218                        spincnt = 0;
1219                }
1220        }
1221        /* NOTREACHED */
1222        trace_rcu_utilization(TPS("End boost kthread@notreached"));
1223        return 0;
1224}
1225
1226/*
1227 * Check to see if it is time to start boosting RCU readers that are
1228 * blocking the current grace period, and, if so, tell the per-rcu_node
1229 * kthread to start boosting them.  If there is an expedited grace
1230 * period in progress, it is always time to boost.
1231 *
1232 * The caller must hold rnp->lock, which this function releases.
1233 * The ->boost_kthread_task is immortal, so we don't need to worry
1234 * about it going away.
1235 */
1236static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1237        __releases(rnp->lock)
1238{
1239        struct task_struct *t;
1240
1241        if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1242                rnp->n_balk_exp_gp_tasks++;
1243                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1244                return;
1245        }
1246        if (rnp->exp_tasks != NULL ||
1247            (rnp->gp_tasks != NULL &&
1248             rnp->boost_tasks == NULL &&
1249             rnp->qsmask == 0 &&
1250             ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1251                if (rnp->exp_tasks == NULL)
1252                        rnp->boost_tasks = rnp->gp_tasks;
1253                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1254                t = rnp->boost_kthread_task;
1255                if (t)
1256                        rcu_wake_cond(t, rnp->boost_kthread_status);
1257        } else {
1258                rcu_initiate_boost_trace(rnp);
1259                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1260        }
1261}
1262
1263/*
1264 * Wake up the per-CPU kthread to invoke RCU callbacks.
1265 */
1266static void invoke_rcu_callbacks_kthread(void)
1267{
1268        unsigned long flags;
1269
1270        local_irq_save(flags);
1271        __this_cpu_write(rcu_cpu_has_work, 1);
1272        if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1273            current != __this_cpu_read(rcu_cpu_kthread_task)) {
1274                rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1275                              __this_cpu_read(rcu_cpu_kthread_status));
1276        }
1277        local_irq_restore(flags);
1278}
1279
1280/*
1281 * Is the current CPU running the RCU-callbacks kthread?
1282 * Caller must have preemption disabled.
1283 */
1284static bool rcu_is_callbacks_kthread(void)
1285{
1286        return __this_cpu_read(rcu_cpu_kthread_task) == current;
1287}
1288
1289#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1290
1291/*
1292 * Do priority-boost accounting for the start of a new grace period.
1293 */
1294static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1295{
1296        rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1297}
1298
1299/*
1300 * Create an RCU-boost kthread for the specified node if one does not
1301 * already exist.  We only create this kthread for preemptible RCU.
1302 * Returns zero if all is well, a negated errno otherwise.
1303 */
1304static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1305                                                 struct rcu_node *rnp)
1306{
1307        int rnp_index = rnp - &rsp->node[0];
1308        unsigned long flags;
1309        struct sched_param sp;
1310        struct task_struct *t;
1311
1312        if (&rcu_preempt_state != rsp)
1313                return 0;
1314
1315        if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1316                return 0;
1317
1318        rsp->boost = 1;
1319        if (rnp->boost_kthread_task != NULL)
1320                return 0;
1321        t = kthread_create(rcu_boost_kthread, (void *)rnp,
1322                           "rcub/%d", rnp_index);
1323        if (IS_ERR(t))
1324                return PTR_ERR(t);
1325        raw_spin_lock_irqsave(&rnp->lock, flags);
1326        smp_mb__after_unlock_lock();
1327        rnp->boost_kthread_task = t;
1328        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1329        sp.sched_priority = kthread_prio;
1330        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1331        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1332        return 0;
1333}
1334
1335static void rcu_kthread_do_work(void)
1336{
1337        rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1338        rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1339        rcu_preempt_do_callbacks();
1340}
1341
1342static void rcu_cpu_kthread_setup(unsigned int cpu)
1343{
1344        struct sched_param sp;
1345
1346        sp.sched_priority = kthread_prio;
1347        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1348}
1349
1350static void rcu_cpu_kthread_park(unsigned int cpu)
1351{
1352        per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1353}
1354
1355static int rcu_cpu_kthread_should_run(unsigned int cpu)
1356{
1357        return __this_cpu_read(rcu_cpu_has_work);
1358}
1359
1360/*
1361 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1362 * RCU softirq used in flavors and configurations of RCU that do not
1363 * support RCU priority boosting.
1364 */
1365static void rcu_cpu_kthread(unsigned int cpu)
1366{
1367        unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1368        char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1369        int spincnt;
1370
1371        for (spincnt = 0; spincnt < 10; spincnt++) {
1372                trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1373                local_bh_disable();
1374                *statusp = RCU_KTHREAD_RUNNING;
1375                this_cpu_inc(rcu_cpu_kthread_loops);
1376                local_irq_disable();
1377                work = *workp;
1378                *workp = 0;
1379                local_irq_enable();
1380                if (work)
1381                        rcu_kthread_do_work();
1382                local_bh_enable();
1383                if (*workp == 0) {
1384                        trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1385                        *statusp = RCU_KTHREAD_WAITING;
1386                        return;
1387                }
1388        }
1389        *statusp = RCU_KTHREAD_YIELDING;
1390        trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1391        schedule_timeout_interruptible(2);
1392        trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1393        *statusp = RCU_KTHREAD_WAITING;
1394}
1395
1396/*
1397 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1398 * served by the rcu_node in question.  The CPU hotplug lock is still
1399 * held, so the value of rnp->qsmaskinit will be stable.
1400 *
1401 * We don't include outgoingcpu in the affinity set, use -1 if there is
1402 * no outgoing CPU.  If there are no CPUs left in the affinity set,
1403 * this function allows the kthread to execute on any CPU.
1404 */
1405static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1406{
1407        struct task_struct *t = rnp->boost_kthread_task;
1408        unsigned long mask = rnp->qsmaskinit;
1409        cpumask_var_t cm;
1410        int cpu;
1411
1412        if (!t)
1413                return;
1414        if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1415                return;
1416        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1417                if ((mask & 0x1) && cpu != outgoingcpu)
1418                        cpumask_set_cpu(cpu, cm);
1419        if (cpumask_weight(cm) == 0) {
1420                cpumask_setall(cm);
1421                for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1422                        cpumask_clear_cpu(cpu, cm);
1423                WARN_ON_ONCE(cpumask_weight(cm) == 0);
1424        }
1425        set_cpus_allowed_ptr(t, cm);
1426        free_cpumask_var(cm);
1427}
1428
1429static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1430        .store                  = &rcu_cpu_kthread_task,
1431        .thread_should_run      = rcu_cpu_kthread_should_run,
1432        .thread_fn              = rcu_cpu_kthread,
1433        .thread_comm            = "rcuc/%u",
1434        .setup                  = rcu_cpu_kthread_setup,
1435        .park                   = rcu_cpu_kthread_park,
1436};
1437
1438/*
1439 * Spawn boost kthreads -- called as soon as the scheduler is running.
1440 */
1441static void __init rcu_spawn_boost_kthreads(void)
1442{
1443        struct rcu_node *rnp;
1444        int cpu;
1445
1446        for_each_possible_cpu(cpu)
1447                per_cpu(rcu_cpu_has_work, cpu) = 0;
1448        BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1449        rnp = rcu_get_root(rcu_state_p);
1450        (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1451        if (NUM_RCU_NODES > 1) {
1452                rcu_for_each_leaf_node(rcu_state_p, rnp)
1453                        (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1454        }
1455}
1456
1457static void rcu_prepare_kthreads(int cpu)
1458{
1459        struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1460        struct rcu_node *rnp = rdp->mynode;
1461
1462        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1463        if (rcu_scheduler_fully_active)
1464                (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1465}
1466
1467#else /* #ifdef CONFIG_RCU_BOOST */
1468
1469static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1470        __releases(rnp->lock)
1471{
1472        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1473}
1474
1475static void invoke_rcu_callbacks_kthread(void)
1476{
1477        WARN_ON_ONCE(1);
1478}
1479
1480static bool rcu_is_callbacks_kthread(void)
1481{
1482        return false;
1483}
1484
1485static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1486{
1487}
1488
1489static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1490{
1491}
1492
1493static void __init rcu_spawn_boost_kthreads(void)
1494{
1495}
1496
1497static void rcu_prepare_kthreads(int cpu)
1498{
1499}
1500
1501#endif /* #else #ifdef CONFIG_RCU_BOOST */
1502
1503#if !defined(CONFIG_RCU_FAST_NO_HZ)
1504
1505/*
1506 * Check to see if any future RCU-related work will need to be done
1507 * by the current CPU, even if none need be done immediately, returning
1508 * 1 if so.  This function is part of the RCU implementation; it is -not-
1509 * an exported member of the RCU API.
1510 *
1511 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1512 * any flavor of RCU.
1513 */
1514#ifndef CONFIG_RCU_NOCB_CPU_ALL
1515int rcu_needs_cpu(unsigned long *delta_jiffies)
1516{
1517        *delta_jiffies = ULONG_MAX;
1518        return rcu_cpu_has_callbacks(NULL);
1519}
1520#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1521
1522/*
1523 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1524 * after it.
1525 */
1526static void rcu_cleanup_after_idle(void)
1527{
1528}
1529
1530/*
1531 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1532 * is nothing.
1533 */
1534static void rcu_prepare_for_idle(void)
1535{
1536}
1537
1538/*
1539 * Don't bother keeping a running count of the number of RCU callbacks
1540 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1541 */
1542static void rcu_idle_count_callbacks_posted(void)
1543{
1544}
1545
1546#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1547
1548/*
1549 * This code is invoked when a CPU goes idle, at which point we want
1550 * to have the CPU do everything required for RCU so that it can enter
1551 * the energy-efficient dyntick-idle mode.  This is handled by a
1552 * state machine implemented by rcu_prepare_for_idle() below.
1553 *
1554 * The following three proprocessor symbols control this state machine:
1555 *
1556 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1557 *      to sleep in dyntick-idle mode with RCU callbacks pending.  This
1558 *      is sized to be roughly one RCU grace period.  Those energy-efficiency
1559 *      benchmarkers who might otherwise be tempted to set this to a large
1560 *      number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1561 *      system.  And if you are -that- concerned about energy efficiency,
1562 *      just power the system down and be done with it!
1563 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1564 *      permitted to sleep in dyntick-idle mode with only lazy RCU
1565 *      callbacks pending.  Setting this too high can OOM your system.
1566 *
1567 * The values below work well in practice.  If future workloads require
1568 * adjustment, they can be converted into kernel config parameters, though
1569 * making the state machine smarter might be a better option.
1570 */
1571#define RCU_IDLE_GP_DELAY 4             /* Roughly one grace period. */
1572#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1573
1574static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1575module_param(rcu_idle_gp_delay, int, 0644);
1576static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1577module_param(rcu_idle_lazy_gp_delay, int, 0644);
1578
1579extern int tick_nohz_active;
1580
1581/*
1582 * Try to advance callbacks for all flavors of RCU on the current CPU, but
1583 * only if it has been awhile since the last time we did so.  Afterwards,
1584 * if there are any callbacks ready for immediate invocation, return true.
1585 */
1586static bool __maybe_unused rcu_try_advance_all_cbs(void)
1587{
1588        bool cbs_ready = false;
1589        struct rcu_data *rdp;
1590        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1591        struct rcu_node *rnp;
1592        struct rcu_state *rsp;
1593
1594        /* Exit early if we advanced recently. */
1595        if (jiffies == rdtp->last_advance_all)
1596                return false;
1597        rdtp->last_advance_all = jiffies;
1598
1599        for_each_rcu_flavor(rsp) {
1600                rdp = this_cpu_ptr(rsp->rda);
1601                rnp = rdp->mynode;
1602
1603                /*
1604                 * Don't bother checking unless a grace period has
1605                 * completed since we last checked and there are
1606                 * callbacks not yet ready to invoke.
1607                 */
1608                if (rdp->completed != rnp->completed &&
1609                    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1610                        note_gp_changes(rsp, rdp);
1611
1612                if (cpu_has_callbacks_ready_to_invoke(rdp))
1613                        cbs_ready = true;
1614        }
1615        return cbs_ready;
1616}
1617
1618/*
1619 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1620 * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1621 * caller to set the timeout based on whether or not there are non-lazy
1622 * callbacks.
1623 *
1624 * The caller must have disabled interrupts.
1625 */
1626#ifndef CONFIG_RCU_NOCB_CPU_ALL
1627int rcu_needs_cpu(unsigned long *dj)
1628{
1629        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1630
1631        /* Snapshot to detect later posting of non-lazy callback. */
1632        rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1633
1634        /* If no callbacks, RCU doesn't need the CPU. */
1635        if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1636                *dj = ULONG_MAX;
1637                return 0;
1638        }
1639
1640        /* Attempt to advance callbacks. */
1641        if (rcu_try_advance_all_cbs()) {
1642                /* Some ready to invoke, so initiate later invocation. */
1643                invoke_rcu_core();
1644                return 1;
1645        }
1646        rdtp->last_accelerate = jiffies;
1647
1648        /* Request timer delay depending on laziness, and round. */
1649        if (!rdtp->all_lazy) {
1650                *dj = round_up(rcu_idle_gp_delay + jiffies,
1651                               rcu_idle_gp_delay) - jiffies;
1652        } else {
1653                *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1654        }
1655        return 0;
1656}
1657#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1658
1659/*
1660 * Prepare a CPU for idle from an RCU perspective.  The first major task
1661 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1662 * The second major task is to check to see if a non-lazy callback has
1663 * arrived at a CPU that previously had only lazy callbacks.  The third
1664 * major task is to accelerate (that is, assign grace-period numbers to)
1665 * any recently arrived callbacks.
1666 *
1667 * The caller must have disabled interrupts.
1668 */
1669static void rcu_prepare_for_idle(void)
1670{
1671#ifndef CONFIG_RCU_NOCB_CPU_ALL
1672        bool needwake;
1673        struct rcu_data *rdp;
1674        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1675        struct rcu_node *rnp;
1676        struct rcu_state *rsp;
1677        int tne;
1678
1679        /* Handle nohz enablement switches conservatively. */
1680        tne = ACCESS_ONCE(tick_nohz_active);
1681        if (tne != rdtp->tick_nohz_enabled_snap) {
1682                if (rcu_cpu_has_callbacks(NULL))
1683                        invoke_rcu_core(); /* force nohz to see update. */
1684                rdtp->tick_nohz_enabled_snap = tne;
1685                return;
1686        }
1687        if (!tne)
1688                return;
1689
1690        /* If this is a no-CBs CPU, no callbacks, just return. */
1691        if (rcu_is_nocb_cpu(smp_processor_id()))
1692                return;
1693
1694        /*
1695         * If a non-lazy callback arrived at a CPU having only lazy
1696         * callbacks, invoke RCU core for the side-effect of recalculating
1697         * idle duration on re-entry to idle.
1698         */
1699        if (rdtp->all_lazy &&
1700            rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1701                rdtp->all_lazy = false;
1702                rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1703                invoke_rcu_core();
1704                return;
1705        }
1706
1707        /*
1708         * If we have not yet accelerated this jiffy, accelerate all
1709         * callbacks on this CPU.
1710         */
1711        if (rdtp->last_accelerate == jiffies)
1712                return;
1713        rdtp->last_accelerate = jiffies;
1714        for_each_rcu_flavor(rsp) {
1715                rdp = this_cpu_ptr(rsp->rda);
1716                if (!*rdp->nxttail[RCU_DONE_TAIL])
1717                        continue;
1718                rnp = rdp->mynode;
1719                raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1720                smp_mb__after_unlock_lock();
1721                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1722                raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1723                if (needwake)
1724                        rcu_gp_kthread_wake(rsp);
1725        }
1726#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1727}
1728
1729/*
1730 * Clean up for exit from idle.  Attempt to advance callbacks based on
1731 * any grace periods that elapsed while the CPU was idle, and if any
1732 * callbacks are now ready to invoke, initiate invocation.
1733 */
1734static void rcu_cleanup_after_idle(void)
1735{
1736#ifndef CONFIG_RCU_NOCB_CPU_ALL
1737        if (rcu_is_nocb_cpu(smp_processor_id()))
1738                return;
1739        if (rcu_try_advance_all_cbs())
1740                invoke_rcu_core();
1741#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1742}
1743
1744/*
1745 * Keep a running count of the number of non-lazy callbacks posted
1746 * on this CPU.  This running counter (which is never decremented) allows
1747 * rcu_prepare_for_idle() to detect when something out of the idle loop
1748 * posts a callback, even if an equal number of callbacks are invoked.
1749 * Of course, callbacks should only be posted from within a trace event
1750 * designed to be called from idle or from within RCU_NONIDLE().
1751 */
1752static void rcu_idle_count_callbacks_posted(void)
1753{
1754        __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1755}
1756
1757/*
1758 * Data for flushing lazy RCU callbacks at OOM time.
1759 */
1760static atomic_t oom_callback_count;
1761static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1762
1763/*
1764 * RCU OOM callback -- decrement the outstanding count and deliver the
1765 * wake-up if we are the last one.
1766 */
1767static void rcu_oom_callback(struct rcu_head *rhp)
1768{
1769        if (atomic_dec_and_test(&oom_callback_count))
1770                wake_up(&oom_callback_wq);
1771}
1772
1773/*
1774 * Post an rcu_oom_notify callback on the current CPU if it has at
1775 * least one lazy callback.  This will unnecessarily post callbacks
1776 * to CPUs that already have a non-lazy callback at the end of their
1777 * callback list, but this is an infrequent operation, so accept some
1778 * extra overhead to keep things simple.
1779 */
1780static void rcu_oom_notify_cpu(void *unused)
1781{
1782        struct rcu_state *rsp;
1783        struct rcu_data *rdp;
1784
1785        for_each_rcu_flavor(rsp) {
1786                rdp = raw_cpu_ptr(rsp->rda);
1787                if (rdp->qlen_lazy != 0) {
1788                        atomic_inc(&oom_callback_count);
1789                        rsp->call(&rdp->oom_head, rcu_oom_callback);
1790                }
1791        }
1792}
1793
1794/*
1795 * If low on memory, ensure that each CPU has a non-lazy callback.
1796 * This will wake up CPUs that have only lazy callbacks, in turn
1797 * ensuring that they free up the corresponding memory in a timely manner.
1798 * Because an uncertain amount of memory will be freed in some uncertain
1799 * timeframe, we do not claim to have freed anything.
1800 */
1801static int rcu_oom_notify(struct notifier_block *self,
1802                          unsigned long notused, void *nfreed)
1803{
1804        int cpu;
1805
1806        /* Wait for callbacks from earlier instance to complete. */
1807        wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1808        smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1809
1810        /*
1811         * Prevent premature wakeup: ensure that all increments happen
1812         * before there is a chance of the counter reaching zero.
1813         */
1814        atomic_set(&oom_callback_count, 1);
1815
1816        get_online_cpus();
1817        for_each_online_cpu(cpu) {
1818                smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1819                cond_resched_rcu_qs();
1820        }
1821        put_online_cpus();
1822
1823        /* Unconditionally decrement: no need to wake ourselves up. */
1824        atomic_dec(&oom_callback_count);
1825
1826        return NOTIFY_OK;
1827}
1828
1829static struct notifier_block rcu_oom_nb = {
1830        .notifier_call = rcu_oom_notify
1831};
1832
1833static int __init rcu_register_oom_notifier(void)
1834{
1835        register_oom_notifier(&rcu_oom_nb);
1836        return 0;
1837}
1838early_initcall(rcu_register_oom_notifier);
1839
1840#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1841
1842#ifdef CONFIG_RCU_CPU_STALL_INFO
1843
1844#ifdef CONFIG_RCU_FAST_NO_HZ
1845
1846static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1847{
1848        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1849        unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1850
1851        sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1852                rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1853                ulong2long(nlpd),
1854                rdtp->all_lazy ? 'L' : '.',
1855                rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1856}
1857
1858#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1859
1860static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1861{
1862        *cp = '\0';
1863}
1864
1865#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1866
1867/* Initiate the stall-info list. */
1868static void print_cpu_stall_info_begin(void)
1869{
1870        pr_cont("\n");
1871}
1872
1873/*
1874 * Print out diagnostic information for the specified stalled CPU.
1875 *
1876 * If the specified CPU is aware of the current RCU grace period
1877 * (flavor specified by rsp), then print the number of scheduling
1878 * clock interrupts the CPU has taken during the time that it has
1879 * been aware.  Otherwise, print the number of RCU grace periods
1880 * that this CPU is ignorant of, for example, "1" if the CPU was
1881 * aware of the previous grace period.
1882 *
1883 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1884 */
1885static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1886{
1887        char fast_no_hz[72];
1888        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1889        struct rcu_dynticks *rdtp = rdp->dynticks;
1890        char *ticks_title;
1891        unsigned long ticks_value;
1892
1893        if (rsp->gpnum == rdp->gpnum) {
1894                ticks_title = "ticks this GP";
1895                ticks_value = rdp->ticks_this_gp;
1896        } else {
1897                ticks_title = "GPs behind";
1898                ticks_value = rsp->gpnum - rdp->gpnum;
1899        }
1900        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1901        pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
1902               cpu, ticks_value, ticks_title,
1903               atomic_read(&rdtp->dynticks) & 0xfff,
1904               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1905               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1906               fast_no_hz);
1907}
1908
1909/* Terminate the stall-info list. */
1910static void print_cpu_stall_info_end(void)
1911{
1912        pr_err("\t");
1913}
1914
1915/* Zero ->ticks_this_gp for all flavors of RCU. */
1916static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1917{
1918        rdp->ticks_this_gp = 0;
1919        rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1920}
1921
1922/* Increment ->ticks_this_gp for all flavors of RCU. */
1923static void increment_cpu_stall_ticks(void)
1924{
1925        struct rcu_state *rsp;
1926
1927        for_each_rcu_flavor(rsp)
1928                raw_cpu_inc(rsp->rda->ticks_this_gp);
1929}
1930
1931#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1932
1933static void print_cpu_stall_info_begin(void)
1934{
1935        pr_cont(" {");
1936}
1937
1938static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1939{
1940        pr_cont(" %d", cpu);
1941}
1942
1943static void print_cpu_stall_info_end(void)
1944{
1945        pr_cont("} ");
1946}
1947
1948static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1949{
1950}
1951
1952static void increment_cpu_stall_ticks(void)
1953{
1954}
1955
1956#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
1957
1958#ifdef CONFIG_RCU_NOCB_CPU
1959
1960/*
1961 * Offload callback processing from the boot-time-specified set of CPUs
1962 * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1963 * kthread created that pulls the callbacks from the corresponding CPU,
1964 * waits for a grace period to elapse, and invokes the callbacks.
1965 * The no-CBs CPUs do a wake_up() on their kthread when they insert
1966 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1967 * has been specified, in which case each kthread actively polls its
1968 * CPU.  (Which isn't so great for energy efficiency, but which does
1969 * reduce RCU's overhead on that CPU.)
1970 *
1971 * This is intended to be used in conjunction with Frederic Weisbecker's
1972 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1973 * running CPU-bound user-mode computations.
1974 *
1975 * Offloading of callback processing could also in theory be used as
1976 * an energy-efficiency measure because CPUs with no RCU callbacks
1977 * queued are more aggressive about entering dyntick-idle mode.
1978 */
1979
1980
1981/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1982static int __init rcu_nocb_setup(char *str)
1983{
1984        alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1985        have_rcu_nocb_mask = true;
1986        cpulist_parse(str, rcu_nocb_mask);
1987        return 1;
1988}
1989__setup("rcu_nocbs=", rcu_nocb_setup);
1990
1991static int __init parse_rcu_nocb_poll(char *arg)
1992{
1993        rcu_nocb_poll = 1;
1994        return 0;
1995}
1996early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1997
1998/*
1999 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2000 * grace period.
2001 */
2002static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2003{
2004        wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
2005}
2006
2007/*
2008 * Set the root rcu_node structure's ->need_future_gp field
2009 * based on the sum of those of all rcu_node structures.  This does
2010 * double-count the root rcu_node structure's requests, but this
2011 * is necessary to handle the possibility of a rcu_nocb_kthread()
2012 * having awakened during the time that the rcu_node structures
2013 * were being updated for the end of the previous grace period.
2014 */
2015static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2016{
2017        rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
2018}
2019
2020static void rcu_init_one_nocb(struct rcu_node *rnp)
2021{
2022        init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2023        init_waitqueue_head(&rnp->nocb_gp_wq[1]);
2024}
2025
2026#ifndef CONFIG_RCU_NOCB_CPU_ALL
2027/* Is the specified CPU a no-CBs CPU? */
2028bool rcu_is_nocb_cpu(int cpu)
2029{
2030        if (have_rcu_nocb_mask)
2031                return cpumask_test_cpu(cpu, rcu_nocb_mask);
2032        return false;
2033}
2034#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
2035
2036/*
2037 * Kick the leader kthread for this NOCB group.
2038 */
2039static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2040{
2041        struct rcu_data *rdp_leader = rdp->nocb_leader;
2042
2043        if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
2044                return;
2045        if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
2046                /* Prior smp_mb__after_atomic() orders against prior enqueue. */
2047                ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
2048                wake_up(&rdp_leader->nocb_wq);
2049        }
2050}
2051
2052/*
2053 * Does the specified CPU need an RCU callback for the specified flavor
2054 * of rcu_barrier()?
2055 */
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{
2058        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2059        struct rcu_head *rhp;
2060
2061        /* No-CBs CPUs might have callbacks on any of three lists. */
2062        rhp = ACCESS_ONCE(rdp->nocb_head);
2063        if (!rhp)
2064                rhp = ACCESS_ONCE(rdp->nocb_gp_head);
2065        if (!rhp)
2066                rhp = ACCESS_ONCE(rdp->nocb_follower_head);
2067
2068        /* Having no rcuo kthread but CBs after scheduler starts is bad! */
2069        if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
2070                /* RCU callback enqueued before CPU first came online??? */
2071                pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
2072                       cpu, rhp->func);
2073                WARN_ON_ONCE(1);
2074        }
2075
2076        return !!rhp;
2077}
2078
2079/*
2080 * Enqueue the specified string of rcu_head structures onto the specified
2081 * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
2082 * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
2083 * counts are supplied by rhcount and rhcount_lazy.
2084 *
2085 * If warranted, also wake up the kthread servicing this CPUs queues.
2086 */
2087static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2088                                    struct rcu_head *rhp,
2089                                    struct rcu_head **rhtp,
2090                                    int rhcount, int rhcount_lazy,
2091                                    unsigned long flags)
2092{
2093        int len;
2094        struct rcu_head **old_rhpp;
2095        struct task_struct *t;
2096
2097        /* Enqueue the callback on the nocb list and update counts. */
2098        old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2099        ACCESS_ONCE(*old_rhpp) = rhp;
2100        atomic_long_add(rhcount, &rdp->nocb_q_count);
2101        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2102        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
2103
2104        /* If we are not being polled and there is a kthread, awaken it ... */
2105        t = ACCESS_ONCE(rdp->nocb_kthread);
2106        if (rcu_nocb_poll || !t) {
2107                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2108                                    TPS("WakeNotPoll"));
2109                return;
2110        }
2111        len = atomic_long_read(&rdp->nocb_q_count);
2112        if (old_rhpp == &rdp->nocb_head) {
2113                if (!irqs_disabled_flags(flags)) {
2114                        /* ... if queue was empty ... */
2115                        wake_nocb_leader(rdp, false);
2116                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2117                                            TPS("WakeEmpty"));
2118                } else {
2119                        rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
2120                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2121                                            TPS("WakeEmptyIsDeferred"));
2122                }
2123                rdp->qlen_last_fqs_check = 0;
2124        } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2125                /* ... or if many callbacks queued. */
2126                if (!irqs_disabled_flags(flags)) {
2127                        wake_nocb_leader(rdp, true);
2128                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2129                                            TPS("WakeOvf"));
2130                } else {
2131                        rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
2132                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2133                                            TPS("WakeOvfIsDeferred"));
2134                }
2135                rdp->qlen_last_fqs_check = LONG_MAX / 2;
2136        } else {
2137                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
2138        }
2139        return;
2140}
2141
2142/*
2143 * This is a helper for __call_rcu(), which invokes this when the normal
2144 * callback queue is inoperable.  If this is not a no-CBs CPU, this
2145 * function returns failure back to __call_rcu(), which can complain
2146 * appropriately.
2147 *
2148 * Otherwise, this function queues the callback where the corresponding
2149 * "rcuo" kthread can find it.
2150 */
2151static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2152                            bool lazy, unsigned long flags)
2153{
2154
2155        if (!rcu_is_nocb_cpu(rdp->cpu))
2156                return false;
2157        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2158        if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2159                trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2160                                         (unsigned long)rhp->func,
2161                                         -atomic_long_read(&rdp->nocb_q_count_lazy),
2162                                         -atomic_long_read(&rdp->nocb_q_count));
2163        else
2164                trace_rcu_callback(rdp->rsp->name, rhp,
2165                                   -atomic_long_read(&rdp->nocb_q_count_lazy),
2166                                   -atomic_long_read(&rdp->nocb_q_count));
2167
2168        /*
2169         * If called from an extended quiescent state with interrupts
2170         * disabled, invoke the RCU core in order to allow the idle-entry
2171         * deferred-wakeup check to function.
2172         */
2173        if (irqs_disabled_flags(flags) &&
2174            !rcu_is_watching() &&
2175            cpu_online(smp_processor_id()))
2176                invoke_rcu_core();
2177
2178        return true;
2179}
2180
2181/*
2182 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2183 * not a no-CBs CPU.
2184 */
2185static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2186                                                     struct rcu_data *rdp,
2187                                                     unsigned long flags)
2188{
2189        long ql = rsp->qlen;
2190        long qll = rsp->qlen_lazy;
2191
2192        /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2193        if (!rcu_is_nocb_cpu(smp_processor_id()))
2194                return false;
2195        rsp->qlen = 0;
2196        rsp->qlen_lazy = 0;
2197
2198        /* First, enqueue the donelist, if any.  This preserves CB ordering. */
2199        if (rsp->orphan_donelist != NULL) {
2200                __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2201                                        rsp->orphan_donetail, ql, qll, flags);
2202                ql = qll = 0;
2203                rsp->orphan_donelist = NULL;
2204                rsp->orphan_donetail = &rsp->orphan_donelist;
2205        }
2206        if (rsp->orphan_nxtlist != NULL) {
2207                __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2208                                        rsp->orphan_nxttail, ql, qll, flags);
2209                ql = qll = 0;
2210                rsp->orphan_nxtlist = NULL;
2211                rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2212        }
2213        return true;
2214}
2215
2216/*
2217 * If necessary, kick off a new grace period, and either way wait
2218 * for a subsequent grace period to complete.
2219 */
2220static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2221{
2222        unsigned long c;
2223        bool d;
2224        unsigned long flags;
2225        bool needwake;
2226        struct rcu_node *rnp = rdp->mynode;
2227
2228        raw_spin_lock_irqsave(&rnp->lock, flags);
2229        smp_mb__after_unlock_lock();
2230        needwake = rcu_start_future_gp(rnp, rdp, &c);
2231        raw_spin_unlock_irqrestore(&rnp->lock, flags);
2232        if (needwake)
2233                rcu_gp_kthread_wake(rdp->rsp);
2234
2235        /*
2236         * Wait for the grace period.  Do so interruptibly to avoid messing
2237         * up the load average.
2238         */
2239        trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2240        for (;;) {
2241                wait_event_interruptible(
2242                        rnp->nocb_gp_wq[c & 0x1],
2243                        (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2244                if (likely(d))
2245                        break;
2246                WARN_ON(signal_pending(current));
2247                trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2248        }
2249        trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2250        smp_mb(); /* Ensure that CB invocation happens after GP end. */
2251}
2252
2253/*
2254 * Leaders come here to wait for additional callbacks to show up.
2255 * This function does not return until callbacks appear.
2256 */
2257static void nocb_leader_wait(struct rcu_data *my_rdp)
2258{
2259        bool firsttime = true;
2260        bool gotcbs;
2261        struct rcu_data *rdp;
2262        struct rcu_head **tail;
2263
2264wait_again:
2265
2266        /* Wait for callbacks to appear. */
2267        if (!rcu_nocb_poll) {
2268                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2269                wait_event_interruptible(my_rdp->nocb_wq,
2270                                !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
2271                /* Memory barrier handled by smp_mb() calls below and repoll. */
2272        } else if (firsttime) {
2273                firsttime = false; /* Don't drown trace log with "Poll"! */
2274                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
2275        }
2276
2277        /*
2278         * Each pass through the following loop checks a follower for CBs.
2279         * We are our own first follower.  Any CBs found are moved to
2280         * nocb_gp_head, where they await a grace period.
2281         */
2282        gotcbs = false;
2283        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2284                rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
2285                if (!rdp->nocb_gp_head)
2286                        continue;  /* No CBs here, try next follower. */
2287
2288                /* Move callbacks to wait-for-GP list, which is empty. */
2289                ACCESS_ONCE(rdp->nocb_head) = NULL;
2290                rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2291                rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
2292                rdp->nocb_gp_count_lazy =
2293                        atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2294                gotcbs = true;
2295        }
2296
2297        /*
2298         * If there were no callbacks, sleep a bit, rescan after a
2299         * memory barrier, and go retry.
2300         */
2301        if (unlikely(!gotcbs)) {
2302                if (!rcu_nocb_poll)
2303                        trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2304                                            "WokeEmpty");
2305                WARN_ON(signal_pending(current));
2306                schedule_timeout_interruptible(1);
2307
2308                /* Rescan in case we were a victim of memory ordering. */
2309                my_rdp->nocb_leader_sleep = true;
2310                smp_mb();  /* Ensure _sleep true before scan. */
2311                for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2312                        if (ACCESS_ONCE(rdp->nocb_head)) {
2313                                /* Found CB, so short-circuit next wait. */
2314                                my_rdp->nocb_leader_sleep = false;
2315                                break;
2316                        }
2317                goto wait_again;
2318        }
2319
2320        /* Wait for one grace period. */
2321        rcu_nocb_wait_gp(my_rdp);
2322
2323        /*
2324         * We left ->nocb_leader_sleep unset to reduce cache thrashing.
2325         * We set it now, but recheck for new callbacks while
2326         * traversing our follower list.
2327         */
2328        my_rdp->nocb_leader_sleep = true;
2329        smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
2330
2331        /* Each pass through the following loop wakes a follower, if needed. */
2332        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2333                if (ACCESS_ONCE(rdp->nocb_head))
2334                        my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2335                if (!rdp->nocb_gp_head)
2336                        continue; /* No CBs, so no need to wake follower. */
2337
2338                /* Append callbacks to follower's "done" list. */
2339                tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2340                *tail = rdp->nocb_gp_head;
2341                atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2342                atomic_long_add(rdp->nocb_gp_count_lazy,
2343                                &rdp->nocb_follower_count_lazy);
2344                smp_mb__after_atomic(); /* Store *tail before wakeup. */
2345                if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2346                        /*
2347                         * List was empty, wake up the follower.
2348                         * Memory barriers supplied by atomic_long_add().
2349                         */
2350                        wake_up(&rdp->nocb_wq);
2351                }
2352        }
2353
2354        /* If we (the leader) don't have CBs, go wait some more. */
2355        if (!my_rdp->nocb_follower_head)
2356                goto wait_again;
2357}
2358
2359/*
2360 * Followers come here to wait for additional callbacks to show up.
2361 * This function does not return until callbacks appear.
2362 */
2363static void nocb_follower_wait(struct rcu_data *rdp)
2364{
2365        bool firsttime = true;
2366
2367        for (;;) {
2368                if (!rcu_nocb_poll) {
2369                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2370                                            "FollowerSleep");
2371                        wait_event_interruptible(rdp->nocb_wq,
2372                                                 ACCESS_ONCE(rdp->nocb_follower_head));
2373                } else if (firsttime) {
2374                        /* Don't drown trace log with "Poll"! */
2375                        firsttime = false;
2376                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
2377                }
2378                if (smp_load_acquire(&rdp->nocb_follower_head)) {
2379                        /* ^^^ Ensure CB invocation follows _head test. */
2380                        return;
2381                }
2382                if (!rcu_nocb_poll)
2383                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2384                                            "WokeEmpty");
2385                WARN_ON(signal_pending(current));
2386                schedule_timeout_interruptible(1);
2387        }
2388}
2389
2390/*
2391 * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2392 * callbacks queued by the corresponding no-CBs CPU, however, there is
2393 * an optional leader-follower relationship so that the grace-period
2394 * kthreads don't have to do quite so many wakeups.
2395 */
2396static int rcu_nocb_kthread(void *arg)
2397{
2398        int c, cl;
2399        struct rcu_head *list;
2400        struct rcu_head *next;
2401        struct rcu_head **tail;
2402        struct rcu_data *rdp = arg;
2403
2404        /* Each pass through this loop invokes one batch of callbacks */
2405        for (;;) {
2406                /* Wait for callbacks. */
2407                if (rdp->nocb_leader == rdp)
2408                        nocb_leader_wait(rdp);
2409                else
2410                        nocb_follower_wait(rdp);
2411
2412                /* Pull the ready-to-invoke callbacks onto local list. */
2413                list = ACCESS_ONCE(rdp->nocb_follower_head);
2414                BUG_ON(!list);
2415                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2416                ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2417                tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2418                c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
2419                cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
2420                rdp->nocb_p_count += c;
2421                rdp->nocb_p_count_lazy += cl;
2422
2423                /* Each pass through the following loop invokes a callback. */
2424                trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2425                c = cl = 0;
2426                while (list) {
2427                        next = list->next;
2428                        /* Wait for enqueuing to complete, if needed. */
2429                        while (next == NULL && &list->next != tail) {
2430                                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2431                                                    TPS("WaitQueue"));
2432                                schedule_timeout_interruptible(1);
2433                                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2434                                                    TPS("WokeQueue"));
2435                                next = list->next;
2436                        }
2437                        debug_rcu_head_unqueue(list);
2438                        local_bh_disable();
2439                        if (__rcu_reclaim(rdp->rsp->name, list))
2440                                cl++;
2441                        c++;
2442                        local_bh_enable();
2443                        list = next;
2444                }
2445                trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2446                ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
2447                ACCESS_ONCE(rdp->nocb_p_count_lazy) =
2448                                                rdp->nocb_p_count_lazy - cl;
2449                rdp->n_nocbs_invoked += c;
2450        }
2451        return 0;
2452}
2453
2454/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2455static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2456{
2457        return ACCESS_ONCE(rdp->nocb_defer_wakeup);
2458}
2459
2460/* Do a deferred wakeup of rcu_nocb_kthread(). */
2461static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2462{
2463        int ndw;
2464
2465        if (!rcu_nocb_need_deferred_wakeup(rdp))
2466                return;
2467        ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
2468        ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
2469        wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
2470        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2471}
2472
2473void __init rcu_init_nohz(void)
2474{
2475        int cpu;
2476        bool need_rcu_nocb_mask = true;
2477        struct rcu_state *rsp;
2478
2479#ifdef CONFIG_RCU_NOCB_CPU_NONE
2480        need_rcu_nocb_mask = false;
2481#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
2482
2483#if defined(CONFIG_NO_HZ_FULL)
2484        if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2485                need_rcu_nocb_mask = true;
2486#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2487
2488        if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2489                if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2490                        pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2491                        return;
2492                }
2493                have_rcu_nocb_mask = true;
2494        }
2495        if (!have_rcu_nocb_mask)
2496                return;
2497
2498#ifdef CONFIG_RCU_NOCB_CPU_ZERO
2499        pr_info("\tOffload RCU callbacks from CPU 0\n");
2500        cpumask_set_cpu(0, rcu_nocb_mask);
2501#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
2502#ifdef CONFIG_RCU_NOCB_CPU_ALL
2503        pr_info("\tOffload RCU callbacks from all CPUs\n");
2504        cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2505#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
2506#if defined(CONFIG_NO_HZ_FULL)
2507        if (tick_nohz_full_running)
2508                cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2509#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2510
2511        if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2512                pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2513                cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2514                            rcu_nocb_mask);
2515        }
2516        cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
2517        pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
2518        if (rcu_nocb_poll)
2519                pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2520
2521        for_each_rcu_flavor(rsp) {
2522                for_each_cpu(cpu, rcu_nocb_mask) {
2523                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2524
2525                        /*
2526                         * If there are early callbacks, they will need
2527                         * to be moved to the nocb lists.
2528                         */
2529                        WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
2530                                     &rdp->nxtlist &&
2531                                     rdp->nxttail[RCU_NEXT_TAIL] != NULL);
2532                        init_nocb_callback_list(rdp);
2533                }
2534                rcu_organize_nocb_kthreads(rsp);
2535        }
2536}
2537
2538/* Initialize per-rcu_data variables for no-CBs CPUs. */
2539static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2540{
2541        rdp->nocb_tail = &rdp->nocb_head;
2542        init_waitqueue_head(&rdp->nocb_wq);
2543        rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2544}
2545
2546/*
2547 * If the specified CPU is a no-CBs CPU that does not already have its
2548 * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2549 * brought online out of order, this can require re-organizing the
2550 * leader-follower relationships.
2551 */
2552static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2553{
2554        struct rcu_data *rdp;
2555        struct rcu_data *rdp_last;
2556        struct rcu_data *rdp_old_leader;
2557        struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2558        struct task_struct *t;
2559
2560        /*
2561         * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2562         * then nothing to do.
2563         */
2564        if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2565                return;
2566
2567        /* If we didn't spawn the leader first, reorganize! */
2568        rdp_old_leader = rdp_spawn->nocb_leader;
2569        if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2570                rdp_last = NULL;
2571                rdp = rdp_old_leader;
2572                do {
2573                        rdp->nocb_leader = rdp_spawn;
2574                        if (rdp_last && rdp != rdp_spawn)
2575                                rdp_last->nocb_next_follower = rdp;
2576                        if (rdp == rdp_spawn) {
2577                                rdp = rdp->nocb_next_follower;
2578                        } else {
2579                                rdp_last = rdp;
2580                                rdp = rdp->nocb_next_follower;
2581                                rdp_last->nocb_next_follower = NULL;
2582                        }
2583                } while (rdp);
2584                rdp_spawn->nocb_next_follower = rdp_old_leader;
2585        }
2586
2587        /* Spawn the kthread for this CPU and RCU flavor. */
2588        t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2589                        "rcuo%c/%d", rsp->abbr, cpu);
2590        BUG_ON(IS_ERR(t));
2591        ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
2592}
2593
2594/*
2595 * If the specified CPU is a no-CBs CPU that does not already have its
2596 * rcuo kthreads, spawn them.
2597 */
2598static void rcu_spawn_all_nocb_kthreads(int cpu)
2599{
2600        struct rcu_state *rsp;
2601
2602        if (rcu_scheduler_fully_active)
2603                for_each_rcu_flavor(rsp)
2604                        rcu_spawn_one_nocb_kthread(rsp, cpu);
2605}
2606
2607/*
2608 * Once the scheduler is running, spawn rcuo kthreads for all online
2609 * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2610 * non-boot CPUs come online -- if this changes, we will need to add
2611 * some mutual exclusion.
2612 */
2613static void __init rcu_spawn_nocb_kthreads(void)
2614{
2615        int cpu;
2616
2617        for_each_online_cpu(cpu)
2618                rcu_spawn_all_nocb_kthreads(cpu);
2619}
2620
2621/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2622static int rcu_nocb_leader_stride = -1;
2623module_param(rcu_nocb_leader_stride, int, 0444);
2624
2625/*
2626 * Initialize leader-follower relationships for all no-CBs CPU.
2627 */
2628static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2629{
2630        int cpu;
2631        int ls = rcu_nocb_leader_stride;
2632        int nl = 0;  /* Next leader. */
2633        struct rcu_data *rdp;
2634        struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2635        struct rcu_data *rdp_prev = NULL;
2636
2637        if (!have_rcu_nocb_mask)
2638                return;
2639        if (ls == -1) {
2640                ls = int_sqrt(nr_cpu_ids);
2641                rcu_nocb_leader_stride = ls;
2642        }
2643
2644        /*
2645         * Each pass through this loop sets up one rcu_data structure and
2646         * spawns one rcu_nocb_kthread().
2647         */
2648        for_each_cpu(cpu, rcu_nocb_mask) {
2649                rdp = per_cpu_ptr(rsp->rda, cpu);
2650                if (rdp->cpu >= nl) {
2651                        /* New leader, set up for followers & next leader. */
2652                        nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2653                        rdp->nocb_leader = rdp;
2654                        rdp_leader = rdp;
2655                } else {
2656                        /* Another follower, link to previous leader. */
2657                        rdp->nocb_leader = rdp_leader;
2658                        rdp_prev->nocb_next_follower = rdp;
2659                }
2660                rdp_prev = rdp;
2661        }
2662}
2663
2664/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2665static bool init_nocb_callback_list(struct rcu_data *rdp)
2666{
2667        if (!rcu_is_nocb_cpu(rdp->cpu))
2668                return false;
2669
2670        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2671        return true;
2672}
2673
2674#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2675
2676static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2677{
2678        WARN_ON_ONCE(1); /* Should be dead code. */
2679        return false;
2680}
2681
2682static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2683{
2684}
2685
2686static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2687{
2688}
2689
2690static void rcu_init_one_nocb(struct rcu_node *rnp)
2691{
2692}
2693
2694static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2695                            bool lazy, unsigned long flags)
2696{
2697        return false;
2698}
2699
2700static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2701                                                     struct rcu_data *rdp,
2702                                                     unsigned long flags)
2703{
2704        return false;
2705}
2706
2707static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2708{
2709}
2710
2711static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2712{
2713        return false;
2714}
2715
2716static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2717{
2718}
2719
2720static void rcu_spawn_all_nocb_kthreads(int cpu)
2721{
2722}
2723
2724static void __init rcu_spawn_nocb_kthreads(void)
2725{
2726}
2727
2728static bool init_nocb_callback_list(struct rcu_data *rdp)
2729{
2730        return false;
2731}
2732
2733#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2734
2735/*
2736 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2737 * arbitrarily long period of time with the scheduling-clock tick turned
2738 * off.  RCU will be paying attention to this CPU because it is in the
2739 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2740 * machine because the scheduling-clock tick has been disabled.  Therefore,
2741 * if an adaptive-ticks CPU is failing to respond to the current grace
2742 * period and has not be idle from an RCU perspective, kick it.
2743 */
2744static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2745{
2746#ifdef CONFIG_NO_HZ_FULL
2747        if (tick_nohz_full_cpu(cpu))
2748                smp_send_reschedule(cpu);
2749#endif /* #ifdef CONFIG_NO_HZ_FULL */
2750}
2751
2752
2753#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2754
2755static int full_sysidle_state;          /* Current system-idle state. */
2756#define RCU_SYSIDLE_NOT         0       /* Some CPU is not idle. */
2757#define RCU_SYSIDLE_SHORT       1       /* All CPUs idle for brief period. */
2758#define RCU_SYSIDLE_LONG        2       /* All CPUs idle for long enough. */
2759#define RCU_SYSIDLE_FULL        3       /* All CPUs idle, ready for sysidle. */
2760#define RCU_SYSIDLE_FULL_NOTED  4       /* Actually entered sysidle state. */
2761
2762/*
2763 * Invoked to note exit from irq or task transition to idle.  Note that
2764 * usermode execution does -not- count as idle here!  After all, we want
2765 * to detect full-system idle states, not RCU quiescent states and grace
2766 * periods.  The caller must have disabled interrupts.
2767 */
2768static void rcu_sysidle_enter(int irq)
2769{
2770        unsigned long j;
2771        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2772
2773        /* If there are no nohz_full= CPUs, no need to track this. */
2774        if (!tick_nohz_full_enabled())
2775                return;
2776
2777        /* Adjust nesting, check for fully idle. */
2778        if (irq) {
2779                rdtp->dynticks_idle_nesting--;
2780                WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2781                if (rdtp->dynticks_idle_nesting != 0)
2782                        return;  /* Still not fully idle. */
2783        } else {
2784                if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2785                    DYNTICK_TASK_NEST_VALUE) {
2786                        rdtp->dynticks_idle_nesting = 0;
2787                } else {
2788                        rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2789                        WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2790                        return;  /* Still not fully idle. */
2791                }
2792        }
2793
2794        /* Record start of fully idle period. */
2795        j = jiffies;
2796        ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2797        smp_mb__before_atomic();
2798        atomic_inc(&rdtp->dynticks_idle);
2799        smp_mb__after_atomic();
2800        WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2801}
2802
2803/*
2804 * Unconditionally force exit from full system-idle state.  This is
2805 * invoked when a normal CPU exits idle, but must be called separately
2806 * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
2807 * is that the timekeeping CPU is permitted to take scheduling-clock
2808 * interrupts while the system is in system-idle state, and of course
2809 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
2810 * interrupt from any other type of interrupt.
2811 */
2812void rcu_sysidle_force_exit(void)
2813{
2814        int oldstate = ACCESS_ONCE(full_sysidle_state);
2815        int newoldstate;
2816
2817        /*
2818         * Each pass through the following loop attempts to exit full
2819         * system-idle state.  If contention proves to be a problem,
2820         * a trylock-based contention tree could be used here.
2821         */
2822        while (oldstate > RCU_SYSIDLE_SHORT) {
2823                newoldstate = cmpxchg(&full_sysidle_state,
2824                                      oldstate, RCU_SYSIDLE_NOT);
2825                if (oldstate == newoldstate &&
2826                    oldstate == RCU_SYSIDLE_FULL_NOTED) {
2827                        rcu_kick_nohz_cpu(tick_do_timer_cpu);
2828                        return; /* We cleared it, done! */
2829                }
2830                oldstate = newoldstate;
2831        }
2832        smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
2833}
2834
2835/*
2836 * Invoked to note entry to irq or task transition from idle.  Note that
2837 * usermode execution does -not- count as idle here!  The caller must
2838 * have disabled interrupts.
2839 */
2840static void rcu_sysidle_exit(int irq)
2841{
2842        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2843
2844        /* If there are no nohz_full= CPUs, no need to track this. */
2845        if (!tick_nohz_full_enabled())
2846                return;
2847
2848        /* Adjust nesting, check for already non-idle. */
2849        if (irq) {
2850                rdtp->dynticks_idle_nesting++;
2851                WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2852                if (rdtp->dynticks_idle_nesting != 1)
2853                        return; /* Already non-idle. */
2854        } else {
2855                /*
2856                 * Allow for irq misnesting.  Yes, it really is possible
2857                 * to enter an irq handler then never leave it, and maybe
2858                 * also vice versa.  Handle both possibilities.
2859                 */
2860                if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2861                        rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2862                        WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2863                        return; /* Already non-idle. */
2864                } else {
2865                        rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2866                }
2867        }
2868
2869        /* Record end of idle period. */
2870        smp_mb__before_atomic();
2871        atomic_inc(&rdtp->dynticks_idle);
2872        smp_mb__after_atomic();
2873        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2874
2875        /*
2876         * If we are the timekeeping CPU, we are permitted to be non-idle
2877         * during a system-idle state.  This must be the case, because
2878         * the timekeeping CPU has to take scheduling-clock interrupts
2879         * during the time that the system is transitioning to full
2880         * system-idle state.  This means that the timekeeping CPU must
2881         * invoke rcu_sysidle_force_exit() directly if it does anything
2882         * more than take a scheduling-clock interrupt.
2883         */
2884        if (smp_processor_id() == tick_do_timer_cpu)
2885                return;
2886
2887        /* Update system-idle state: We are clearly no longer fully idle! */
2888        rcu_sysidle_force_exit();
2889}
2890
2891/*
2892 * Check to see if the current CPU is idle.  Note that usermode execution
2893 * does not count as idle.  The caller must have disabled interrupts.
2894 */
2895static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2896                                  unsigned long *maxj)
2897{
2898        int cur;
2899        unsigned long j;
2900        struct rcu_dynticks *rdtp = rdp->dynticks;
2901
2902        /* If there are no nohz_full= CPUs, don't check system-wide idleness. */
2903        if (!tick_nohz_full_enabled())
2904                return;
2905
2906        /*
2907         * If some other CPU has already reported non-idle, if this is
2908         * not the flavor of RCU that tracks sysidle state, or if this
2909         * is an offline or the timekeeping CPU, nothing to do.
2910         */
2911        if (!*isidle || rdp->rsp != rcu_state_p ||
2912            cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2913                return;
2914        if (rcu_gp_in_progress(rdp->rsp))
2915                WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2916
2917        /* Pick up current idle and NMI-nesting counter and check. */
2918        cur = atomic_read(&rdtp->dynticks_idle);
2919        if (cur & 0x1) {
2920                *isidle = false; /* We are not idle! */
2921                return;
2922        }
2923        smp_mb(); /* Read counters before timestamps. */
2924
2925        /* Pick up timestamps. */
2926        j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
2927        /* If this CPU entered idle more recently, update maxj timestamp. */
2928        if (ULONG_CMP_LT(*maxj, j))
2929                *maxj = j;
2930}
2931
2932/*
2933 * Is this the flavor of RCU that is handling full-system idle?
2934 */
2935static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2936{
2937        return rsp == rcu_state_p;
2938}
2939
2940/*
2941 * Return a delay in jiffies based on the number of CPUs, rcu_node
2942 * leaf fanout, and jiffies tick rate.  The idea is to allow larger
2943 * systems more time to transition to full-idle state in order to
2944 * avoid the cache thrashing that otherwise occur on the state variable.
2945 * Really small systems (less than a couple of tens of CPUs) should
2946 * instead use a single global atomically incremented counter, and later
2947 * versions of this will automatically reconfigure themselves accordingly.
2948 */
2949static unsigned long rcu_sysidle_delay(void)
2950{
2951        if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2952                return 0;
2953        return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2954}
2955
2956/*
2957 * Advance the full-system-idle state.  This is invoked when all of
2958 * the non-timekeeping CPUs are idle.
2959 */
2960static void rcu_sysidle(unsigned long j)
2961{
2962        /* Check the current state. */
2963        switch (ACCESS_ONCE(full_sysidle_state)) {
2964        case RCU_SYSIDLE_NOT:
2965
2966                /* First time all are idle, so note a short idle period. */
2967                ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
2968                break;
2969
2970        case RCU_SYSIDLE_SHORT:
2971
2972                /*
2973                 * Idle for a bit, time to advance to next state?
2974                 * cmpxchg failure means race with non-idle, let them win.
2975                 */
2976                if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2977                        (void)cmpxchg(&full_sysidle_state,
2978                                      RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2979                break;
2980
2981        case RCU_SYSIDLE_LONG:
2982
2983                /*
2984                 * Do an additional check pass before advancing to full.
2985                 * cmpxchg failure means race with non-idle, let them win.
2986                 */
2987                if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2988                        (void)cmpxchg(&full_sysidle_state,
2989                                      RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2990                break;
2991
2992        default:
2993                break;
2994        }
2995}
2996
2997/*
2998 * Found a non-idle non-timekeeping CPU, so kick the system-idle state
2999 * back to the beginning.
3000 */
3001static void rcu_sysidle_cancel(void)
3002{
3003        smp_mb();
3004        if (full_sysidle_state > RCU_SYSIDLE_SHORT)
3005                ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
3006}
3007
3008/*
3009 * Update the sysidle state based on the results of a force-quiescent-state
3010 * scan of the CPUs' dyntick-idle state.
3011 */
3012static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
3013                               unsigned long maxj, bool gpkt)
3014{
3015        if (rsp != rcu_state_p)
3016                return;  /* Wrong flavor, ignore. */
3017        if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
3018                return;  /* Running state machine from timekeeping CPU. */
3019        if (isidle)
3020                rcu_sysidle(maxj);    /* More idle! */
3021        else
3022                rcu_sysidle_cancel(); /* Idle is over. */
3023}
3024
3025/*
3026 * Wrapper for rcu_sysidle_report() when called from the grace-period
3027 * kthread's context.
3028 */
3029static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
3030                                  unsigned long maxj)
3031{
3032        /* If there are no nohz_full= CPUs, no need to track this. */
3033        if (!tick_nohz_full_enabled())
3034                return;
3035
3036        rcu_sysidle_report(rsp, isidle, maxj, true);
3037}
3038
3039/* Callback and function for forcing an RCU grace period. */
3040struct rcu_sysidle_head {
3041        struct rcu_head rh;
3042        int inuse;
3043};
3044
3045static void rcu_sysidle_cb(struct rcu_head *rhp)
3046{
3047        struct rcu_sysidle_head *rshp;
3048
3049        /*
3050         * The following memory barrier is needed to replace the
3051         * memory barriers that would normally be in the memory
3052         * allocator.
3053         */
3054        smp_mb();  /* grace period precedes setting inuse. */
3055
3056        rshp = container_of(rhp, struct rcu_sysidle_head, rh);
3057        ACCESS_ONCE(rshp->inuse) = 0;
3058}
3059
3060/*
3061 * Check to see if the system is fully idle, other than the timekeeping CPU.
3062 * The caller must have disabled interrupts.  This is not intended to be
3063 * called unless tick_nohz_full_enabled().
3064 */
3065bool rcu_sys_is_idle(void)
3066{
3067        static struct rcu_sysidle_head rsh;
3068        int rss = ACCESS_ONCE(full_sysidle_state);
3069
3070        if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
3071                return false;
3072
3073        /* Handle small-system case by doing a full scan of CPUs. */
3074        if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
3075                int oldrss = rss - 1;
3076
3077                /*
3078                 * One pass to advance to each state up to _FULL.
3079                 * Give up if any pass fails to advance the state.
3080                 */
3081                while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
3082                        int cpu;
3083                        bool isidle = true;
3084                        unsigned long maxj = jiffies - ULONG_MAX / 4;
3085                        struct rcu_data *rdp;
3086
3087                        /* Scan all the CPUs looking for nonidle CPUs. */
3088                        for_each_possible_cpu(cpu) {
3089                                rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3090                                rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
3091                                if (!isidle)
3092                                        break;
3093                        }
3094                        rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
3095                        oldrss = rss;
3096                        rss = ACCESS_ONCE(full_sysidle_state);
3097                }
3098        }
3099
3100        /* If this is the first observation of an idle period, record it. */
3101        if (rss == RCU_SYSIDLE_FULL) {
3102                rss = cmpxchg(&full_sysidle_state,
3103                              RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
3104                return rss == RCU_SYSIDLE_FULL;
3105        }
3106
3107        smp_mb(); /* ensure rss load happens before later caller actions. */
3108
3109        /* If already fully idle, tell the caller (in case of races). */
3110        if (rss == RCU_SYSIDLE_FULL_NOTED)
3111                return true;
3112
3113        /*
3114         * If we aren't there yet, and a grace period is not in flight,
3115         * initiate a grace period.  Either way, tell the caller that
3116         * we are not there yet.  We use an xchg() rather than an assignment
3117         * to make up for the memory barriers that would otherwise be
3118         * provided by the memory allocator.
3119         */
3120        if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
3121            !rcu_gp_in_progress(rcu_state_p) &&
3122            !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
3123                call_rcu(&rsh.rh, rcu_sysidle_cb);
3124        return false;
3125}
3126
3127/*
3128 * Initialize dynticks sysidle state for CPUs coming online.
3129 */
3130static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3131{
3132        rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
3133}
3134
3135#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3136
3137static void rcu_sysidle_enter(int irq)
3138{
3139}
3140
3141static void rcu_sysidle_exit(int irq)
3142{
3143}
3144
3145static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
3146                                  unsigned long *maxj)
3147{
3148}
3149
3150static bool is_sysidle_rcu_state(struct rcu_state *rsp)
3151{
3152        return false;
3153}
3154
3155static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
3156                                  unsigned long maxj)
3157{
3158}
3159
3160static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3161{
3162}
3163
3164#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3165
3166/*
3167 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
3168 * grace-period kthread will do force_quiescent_state() processing?
3169 * The idea is to avoid waking up RCU core processing on such a
3170 * CPU unless the grace period has extended for too long.
3171 *
3172 * This code relies on the fact that all NO_HZ_FULL CPUs are also
3173 * CONFIG_RCU_NOCB_CPU CPUs.
3174 */
3175static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
3176{
3177#ifdef CONFIG_NO_HZ_FULL
3178        if (tick_nohz_full_cpu(smp_processor_id()) &&
3179            (!rcu_gp_in_progress(rsp) ||
3180             ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
3181                return 1;
3182#endif /* #ifdef CONFIG_NO_HZ_FULL */
3183        return 0;
3184}
3185
3186/*
3187 * Bind the grace-period kthread for the sysidle flavor of RCU to the
3188 * timekeeping CPU.
3189 */
3190static void rcu_bind_gp_kthread(void)
3191{
3192        int __maybe_unused cpu;
3193
3194        if (!tick_nohz_full_enabled())
3195                return;
3196#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
3197        cpu = tick_do_timer_cpu;
3198        if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
3199                set_cpus_allowed_ptr(current, cpumask_of(cpu));
3200#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3201        if (!is_housekeeping_cpu(raw_smp_processor_id()))
3202                housekeeping_affine(current);
3203#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3204}
3205
3206/* Record the current task on dyntick-idle entry. */
3207static void rcu_dynticks_task_enter(void)
3208{
3209#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3210        ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
3211#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3212}
3213
3214/* Record no current task on dyntick-idle exit. */
3215static void rcu_dynticks_task_exit(void)
3216{
3217#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3218        ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
3219#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3220}
3221