linux/kernel/rcu/tasks.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * Task-based RCU implementations.
   4 *
   5 * Copyright (C) 2020 Paul E. McKenney
   6 */
   7
   8#ifdef CONFIG_TASKS_RCU_GENERIC
   9#include "rcu_segcblist.h"
  10
  11////////////////////////////////////////////////////////////////////////
  12//
  13// Generic data structures.
  14
  15struct rcu_tasks;
  16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
  17typedef void (*pregp_func_t)(void);
  18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
  19typedef void (*postscan_func_t)(struct list_head *hop);
  20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
  21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  22
  23/**
  24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
  25 * @cblist: Callback list.
  26 * @lock: Lock protecting per-CPU callback list.
  27 * @rtp_jiffies: Jiffies counter value for statistics.
  28 * @rtp_n_lock_retries: Rough lock-contention statistic.
  29 * @rtp_work: Work queue for invoking callbacks.
  30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
  31 * @barrier_q_head: RCU callback for barrier operation.
  32 * @cpu: CPU number corresponding to this entry.
  33 * @rtpp: Pointer to the rcu_tasks structure.
  34 */
  35struct rcu_tasks_percpu {
  36        struct rcu_segcblist cblist;
  37        raw_spinlock_t __private lock;
  38        unsigned long rtp_jiffies;
  39        unsigned long rtp_n_lock_retries;
  40        struct work_struct rtp_work;
  41        struct irq_work rtp_irq_work;
  42        struct rcu_head barrier_q_head;
  43        int cpu;
  44        struct rcu_tasks *rtpp;
  45};
  46
  47/**
  48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
  49 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
  50 * @cbs_gbl_lock: Lock protecting callback list.
  51 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
  52 * @gp_func: This flavor's grace-period-wait function.
  53 * @gp_state: Grace period's most recent state transition (debugging).
  54 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
  55 * @init_fract: Initial backoff sleep interval.
  56 * @gp_jiffies: Time of last @gp_state transition.
  57 * @gp_start: Most recent grace-period start in jiffies.
  58 * @tasks_gp_seq: Number of grace periods completed since boot.
  59 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
  60 * @n_ipis_fails: Number of IPI-send failures.
  61 * @pregp_func: This flavor's pre-grace-period function (optional).
  62 * @pertask_func: This flavor's per-task scan function (optional).
  63 * @postscan_func: This flavor's post-task scan function (optional).
  64 * @holdouts_func: This flavor's holdout-list scan function (optional).
  65 * @postgp_func: This flavor's post-grace-period function (optional).
  66 * @call_func: This flavor's call_rcu()-equivalent function.
  67 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
  68 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
  69 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
  70 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
  71 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
  72 * @barrier_q_mutex: Serialize barrier operations.
  73 * @barrier_q_count: Number of queues being waited on.
  74 * @barrier_q_completion: Barrier wait/wakeup mechanism.
  75 * @barrier_q_seq: Sequence number for barrier operations.
  76 * @name: This flavor's textual name.
  77 * @kname: This flavor's kthread name.
  78 */
  79struct rcu_tasks {
  80        struct rcuwait cbs_wait;
  81        raw_spinlock_t cbs_gbl_lock;
  82        int gp_state;
  83        int gp_sleep;
  84        int init_fract;
  85        unsigned long gp_jiffies;
  86        unsigned long gp_start;
  87        unsigned long tasks_gp_seq;
  88        unsigned long n_ipis;
  89        unsigned long n_ipis_fails;
  90        struct task_struct *kthread_ptr;
  91        rcu_tasks_gp_func_t gp_func;
  92        pregp_func_t pregp_func;
  93        pertask_func_t pertask_func;
  94        postscan_func_t postscan_func;
  95        holdouts_func_t holdouts_func;
  96        postgp_func_t postgp_func;
  97        call_rcu_func_t call_func;
  98        struct rcu_tasks_percpu __percpu *rtpcpu;
  99        int percpu_enqueue_shift;
 100        int percpu_enqueue_lim;
 101        int percpu_dequeue_lim;
 102        unsigned long percpu_dequeue_gpseq;
 103        struct mutex barrier_q_mutex;
 104        atomic_t barrier_q_count;
 105        struct completion barrier_q_completion;
 106        unsigned long barrier_q_seq;
 107        char *name;
 108        char *kname;
 109};
 110
 111static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
 112
 113#define DEFINE_RCU_TASKS(rt_name, gp, call, n)                                          \
 114static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {                 \
 115        .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),            \
 116        .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),                   \
 117};                                                                                      \
 118static struct rcu_tasks rt_name =                                                       \
 119{                                                                                       \
 120        .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),                                \
 121        .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),                 \
 122        .gp_func = gp,                                                                  \
 123        .call_func = call,                                                              \
 124        .rtpcpu = &rt_name ## __percpu,                                                 \
 125        .name = n,                                                                      \
 126        .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),                           \
 127        .percpu_enqueue_lim = 1,                                                        \
 128        .percpu_dequeue_lim = 1,                                                        \
 129        .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),                \
 130        .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,                             \
 131        .kname = #rt_name,                                                              \
 132}
 133
 134/* Track exiting tasks in order to allow them to be waited for. */
 135DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
 136
 137/* Avoid IPIing CPUs early in the grace period. */
 138#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
 139static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
 140module_param(rcu_task_ipi_delay, int, 0644);
 141
 142/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 143#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
 144static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
 145module_param(rcu_task_stall_timeout, int, 0644);
 146#define RCU_TASK_STALL_INFO (HZ * 10)
 147static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
 148module_param(rcu_task_stall_info, int, 0644);
 149static int rcu_task_stall_info_mult __read_mostly = 3;
 150module_param(rcu_task_stall_info_mult, int, 0444);
 151
 152static int rcu_task_enqueue_lim __read_mostly = -1;
 153module_param(rcu_task_enqueue_lim, int, 0444);
 154
 155static bool rcu_task_cb_adjust;
 156static int rcu_task_contend_lim __read_mostly = 100;
 157module_param(rcu_task_contend_lim, int, 0444);
 158static int rcu_task_collapse_lim __read_mostly = 10;
 159module_param(rcu_task_collapse_lim, int, 0444);
 160
 161/* RCU tasks grace-period state for debugging. */
 162#define RTGS_INIT                0
 163#define RTGS_WAIT_WAIT_CBS       1
 164#define RTGS_WAIT_GP             2
 165#define RTGS_PRE_WAIT_GP         3
 166#define RTGS_SCAN_TASKLIST       4
 167#define RTGS_POST_SCAN_TASKLIST  5
 168#define RTGS_WAIT_SCAN_HOLDOUTS  6
 169#define RTGS_SCAN_HOLDOUTS       7
 170#define RTGS_POST_GP             8
 171#define RTGS_WAIT_READERS        9
 172#define RTGS_INVOKE_CBS         10
 173#define RTGS_WAIT_CBS           11
 174#ifndef CONFIG_TINY_RCU
 175static const char * const rcu_tasks_gp_state_names[] = {
 176        "RTGS_INIT",
 177        "RTGS_WAIT_WAIT_CBS",
 178        "RTGS_WAIT_GP",
 179        "RTGS_PRE_WAIT_GP",
 180        "RTGS_SCAN_TASKLIST",
 181        "RTGS_POST_SCAN_TASKLIST",
 182        "RTGS_WAIT_SCAN_HOLDOUTS",
 183        "RTGS_SCAN_HOLDOUTS",
 184        "RTGS_POST_GP",
 185        "RTGS_WAIT_READERS",
 186        "RTGS_INVOKE_CBS",
 187        "RTGS_WAIT_CBS",
 188};
 189#endif /* #ifndef CONFIG_TINY_RCU */
 190
 191////////////////////////////////////////////////////////////////////////
 192//
 193// Generic code.
 194
 195static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
 196
 197/* Record grace-period phase and time. */
 198static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
 199{
 200        rtp->gp_state = newstate;
 201        rtp->gp_jiffies = jiffies;
 202}
 203
 204#ifndef CONFIG_TINY_RCU
 205/* Return state name. */
 206static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
 207{
 208        int i = data_race(rtp->gp_state); // Let KCSAN detect update races
 209        int j = READ_ONCE(i); // Prevent the compiler from reading twice
 210
 211        if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
 212                return "???";
 213        return rcu_tasks_gp_state_names[j];
 214}
 215#endif /* #ifndef CONFIG_TINY_RCU */
 216
 217// Initialize per-CPU callback lists for the specified flavor of
 218// Tasks RCU.
 219static void cblist_init_generic(struct rcu_tasks *rtp)
 220{
 221        int cpu;
 222        unsigned long flags;
 223        int lim;
 224        int shift;
 225
 226        raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 227        if (rcu_task_enqueue_lim < 0) {
 228                rcu_task_enqueue_lim = 1;
 229                rcu_task_cb_adjust = true;
 230                pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
 231        } else if (rcu_task_enqueue_lim == 0) {
 232                rcu_task_enqueue_lim = 1;
 233        }
 234        lim = rcu_task_enqueue_lim;
 235
 236        if (lim > nr_cpu_ids)
 237                lim = nr_cpu_ids;
 238        shift = ilog2(nr_cpu_ids / lim);
 239        if (((nr_cpu_ids - 1) >> shift) >= lim)
 240                shift++;
 241        WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
 242        WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
 243        smp_store_release(&rtp->percpu_enqueue_lim, lim);
 244        for_each_possible_cpu(cpu) {
 245                struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 246
 247                WARN_ON_ONCE(!rtpcp);
 248                if (cpu)
 249                        raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
 250                raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 251                if (rcu_segcblist_empty(&rtpcp->cblist))
 252                        rcu_segcblist_init(&rtpcp->cblist);
 253                INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
 254                rtpcp->cpu = cpu;
 255                rtpcp->rtpp = rtp;
 256                raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
 257        }
 258        raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 259        pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
 260}
 261
 262// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
 263static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
 264{
 265        struct rcu_tasks *rtp;
 266        struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
 267
 268        rtp = rtpcp->rtpp;
 269        rcuwait_wake_up(&rtp->cbs_wait);
 270}
 271
 272// Enqueue a callback for the specified flavor of Tasks RCU.
 273static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
 274                                   struct rcu_tasks *rtp)
 275{
 276        int chosen_cpu;
 277        unsigned long flags;
 278        int ideal_cpu;
 279        unsigned long j;
 280        bool needadjust = false;
 281        bool needwake;
 282        struct rcu_tasks_percpu *rtpcp;
 283
 284        rhp->next = NULL;
 285        rhp->func = func;
 286        local_irq_save(flags);
 287        rcu_read_lock();
 288        ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
 289        chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
 290        rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
 291        if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
 292                raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 293                j = jiffies;
 294                if (rtpcp->rtp_jiffies != j) {
 295                        rtpcp->rtp_jiffies = j;
 296                        rtpcp->rtp_n_lock_retries = 0;
 297                }
 298                if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
 299                    READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
 300                        needadjust = true;  // Defer adjustment to avoid deadlock.
 301        }
 302        if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
 303                raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
 304                cblist_init_generic(rtp);
 305                raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 306        }
 307        needwake = rcu_segcblist_empty(&rtpcp->cblist);
 308        rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
 309        raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 310        if (unlikely(needadjust)) {
 311                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 312                if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
 313                        WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
 314                        WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
 315                        smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
 316                        pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
 317                }
 318                raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 319        }
 320        rcu_read_unlock();
 321        /* We can't create the thread unless interrupts are enabled. */
 322        if (needwake && READ_ONCE(rtp->kthread_ptr))
 323                irq_work_queue(&rtpcp->rtp_irq_work);
 324}
 325
 326// Wait for a grace period for the specified flavor of Tasks RCU.
 327static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
 328{
 329        /* Complain if the scheduler has not started.  */
 330        RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
 331                         "synchronize_rcu_tasks called too soon");
 332
 333        /* Wait for the grace period. */
 334        wait_rcu_gp(rtp->call_func);
 335}
 336
 337// RCU callback function for rcu_barrier_tasks_generic().
 338static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
 339{
 340        struct rcu_tasks *rtp;
 341        struct rcu_tasks_percpu *rtpcp;
 342
 343        rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
 344        rtp = rtpcp->rtpp;
 345        if (atomic_dec_and_test(&rtp->barrier_q_count))
 346                complete(&rtp->barrier_q_completion);
 347}
 348
 349// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
 350// Operates in a manner similar to rcu_barrier().
 351static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
 352{
 353        int cpu;
 354        unsigned long flags;
 355        struct rcu_tasks_percpu *rtpcp;
 356        unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
 357
 358        mutex_lock(&rtp->barrier_q_mutex);
 359        if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
 360                smp_mb();
 361                mutex_unlock(&rtp->barrier_q_mutex);
 362                return;
 363        }
 364        rcu_seq_start(&rtp->barrier_q_seq);
 365        init_completion(&rtp->barrier_q_completion);
 366        atomic_set(&rtp->barrier_q_count, 2);
 367        for_each_possible_cpu(cpu) {
 368                if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
 369                        break;
 370                rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 371                rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
 372                raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 373                if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
 374                        atomic_inc(&rtp->barrier_q_count);
 375                raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 376        }
 377        if (atomic_sub_and_test(2, &rtp->barrier_q_count))
 378                complete(&rtp->barrier_q_completion);
 379        wait_for_completion(&rtp->barrier_q_completion);
 380        rcu_seq_end(&rtp->barrier_q_seq);
 381        mutex_unlock(&rtp->barrier_q_mutex);
 382}
 383
 384// Advance callbacks and indicate whether either a grace period or
 385// callback invocation is needed.
 386static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
 387{
 388        int cpu;
 389        unsigned long flags;
 390        long n;
 391        long ncbs = 0;
 392        long ncbsnz = 0;
 393        int needgpcb = 0;
 394
 395        for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
 396                struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 397
 398                /* Advance and accelerate any new callbacks. */
 399                if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
 400                        continue;
 401                raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 402                // Should we shrink down to a single callback queue?
 403                n = rcu_segcblist_n_cbs(&rtpcp->cblist);
 404                if (n) {
 405                        ncbs += n;
 406                        if (cpu > 0)
 407                                ncbsnz += n;
 408                }
 409                rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 410                (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 411                if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
 412                        needgpcb |= 0x3;
 413                if (!rcu_segcblist_empty(&rtpcp->cblist))
 414                        needgpcb |= 0x1;
 415                raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 416        }
 417
 418        // Shrink down to a single callback queue if appropriate.
 419        // This is done in two stages: (1) If there are no more than
 420        // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
 421        // CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
 422        // if there has not been an increase in callbacks, limit dequeuing
 423        // to CPU 0.  Note the matching RCU read-side critical section in
 424        // call_rcu_tasks_generic().
 425        if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
 426                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 427                if (rtp->percpu_enqueue_lim > 1) {
 428                        WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
 429                        smp_store_release(&rtp->percpu_enqueue_lim, 1);
 430                        rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
 431                        pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
 432                }
 433                raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 434        }
 435        if (rcu_task_cb_adjust && !ncbsnz &&
 436            poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
 437                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 438                if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
 439                        WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
 440                        pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
 441                }
 442                raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 443        }
 444
 445        return needgpcb;
 446}
 447
 448// Advance callbacks and invoke any that are ready.
 449static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
 450{
 451        int cpu;
 452        int cpunext;
 453        unsigned long flags;
 454        int len;
 455        struct rcu_head *rhp;
 456        struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 457        struct rcu_tasks_percpu *rtpcp_next;
 458
 459        cpu = rtpcp->cpu;
 460        cpunext = cpu * 2 + 1;
 461        if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 462                rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 463                queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
 464                cpunext++;
 465                if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 466                        rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 467                        queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
 468                }
 469        }
 470
 471        if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
 472                return;
 473        raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 474        rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 475        rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
 476        raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 477        len = rcl.len;
 478        for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
 479                local_bh_disable();
 480                rhp->func(rhp);
 481                local_bh_enable();
 482                cond_resched();
 483        }
 484        raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 485        rcu_segcblist_add_len(&rtpcp->cblist, -len);
 486        (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 487        raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 488}
 489
 490// Workqueue flood to advance callbacks and invoke any that are ready.
 491static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
 492{
 493        struct rcu_tasks *rtp;
 494        struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
 495
 496        rtp = rtpcp->rtpp;
 497        rcu_tasks_invoke_cbs(rtp, rtpcp);
 498}
 499
 500/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
 501static int __noreturn rcu_tasks_kthread(void *arg)
 502{
 503        int needgpcb;
 504        struct rcu_tasks *rtp = arg;
 505
 506        /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
 507        housekeeping_affine(current, HK_TYPE_RCU);
 508        WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
 509
 510        /*
 511         * Each pass through the following loop makes one check for
 512         * newly arrived callbacks, and, if there are some, waits for
 513         * one RCU-tasks grace period and then invokes the callbacks.
 514         * This loop is terminated by the system going down.  ;-)
 515         */
 516        for (;;) {
 517                set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
 518
 519                /* If there were none, wait a bit and start over. */
 520                rcuwait_wait_event(&rtp->cbs_wait,
 521                                   (needgpcb = rcu_tasks_need_gpcb(rtp)),
 522                                   TASK_IDLE);
 523
 524                if (needgpcb & 0x2) {
 525                        // Wait for one grace period.
 526                        set_tasks_gp_state(rtp, RTGS_WAIT_GP);
 527                        rtp->gp_start = jiffies;
 528                        rcu_seq_start(&rtp->tasks_gp_seq);
 529                        rtp->gp_func(rtp);
 530                        rcu_seq_end(&rtp->tasks_gp_seq);
 531                }
 532
 533                /* Invoke callbacks. */
 534                set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
 535                rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
 536
 537                /* Paranoid sleep to keep this from entering a tight loop */
 538                schedule_timeout_idle(rtp->gp_sleep);
 539        }
 540}
 541
 542/* Spawn RCU-tasks grace-period kthread. */
 543static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 544{
 545        struct task_struct *t;
 546
 547        t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
 548        if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
 549                return;
 550        smp_mb(); /* Ensure others see full kthread. */
 551}
 552
 553#ifndef CONFIG_TINY_RCU
 554
 555/*
 556 * Print any non-default Tasks RCU settings.
 557 */
 558static void __init rcu_tasks_bootup_oddness(void)
 559{
 560#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 561        int rtsimc;
 562
 563        if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
 564                pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
 565        rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
 566        if (rtsimc != rcu_task_stall_info_mult) {
 567                pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
 568                rcu_task_stall_info_mult = rtsimc;
 569        }
 570#endif /* #ifdef CONFIG_TASKS_RCU */
 571#ifdef CONFIG_TASKS_RCU
 572        pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
 573#endif /* #ifdef CONFIG_TASKS_RCU */
 574#ifdef CONFIG_TASKS_RUDE_RCU
 575        pr_info("\tRude variant of Tasks RCU enabled.\n");
 576#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
 577#ifdef CONFIG_TASKS_TRACE_RCU
 578        pr_info("\tTracing variant of Tasks RCU enabled.\n");
 579#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 580}
 581
 582#endif /* #ifndef CONFIG_TINY_RCU */
 583
 584#ifndef CONFIG_TINY_RCU
 585/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
 586static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
 587{
 588        int cpu;
 589        bool havecbs = false;
 590
 591        for_each_possible_cpu(cpu) {
 592                struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 593
 594                if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
 595                        havecbs = true;
 596                        break;
 597                }
 598        }
 599        pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
 600                rtp->kname,
 601                tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
 602                jiffies - data_race(rtp->gp_jiffies),
 603                data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
 604                data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
 605                ".k"[!!data_race(rtp->kthread_ptr)],
 606                ".C"[havecbs],
 607                s);
 608}
 609#endif // #ifndef CONFIG_TINY_RCU
 610
 611static void exit_tasks_rcu_finish_trace(struct task_struct *t);
 612
 613#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 614
 615////////////////////////////////////////////////////////////////////////
 616//
 617// Shared code between task-list-scanning variants of Tasks RCU.
 618
 619/* Wait for one RCU-tasks grace period. */
 620static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
 621{
 622        struct task_struct *g;
 623        int fract;
 624        LIST_HEAD(holdouts);
 625        unsigned long j;
 626        unsigned long lastinfo;
 627        unsigned long lastreport;
 628        bool reported = false;
 629        int rtsi;
 630        struct task_struct *t;
 631
 632        set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
 633        rtp->pregp_func();
 634
 635        /*
 636         * There were callbacks, so we need to wait for an RCU-tasks
 637         * grace period.  Start off by scanning the task list for tasks
 638         * that are not already voluntarily blocked.  Mark these tasks
 639         * and make a list of them in holdouts.
 640         */
 641        set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
 642        rcu_read_lock();
 643        for_each_process_thread(g, t)
 644                rtp->pertask_func(t, &holdouts);
 645        rcu_read_unlock();
 646
 647        set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
 648        rtp->postscan_func(&holdouts);
 649
 650        /*
 651         * Each pass through the following loop scans the list of holdout
 652         * tasks, removing any that are no longer holdouts.  When the list
 653         * is empty, we are done.
 654         */
 655        lastreport = jiffies;
 656        lastinfo = lastreport;
 657        rtsi = READ_ONCE(rcu_task_stall_info);
 658
 659        // Start off with initial wait and slowly back off to 1 HZ wait.
 660        fract = rtp->init_fract;
 661
 662        while (!list_empty(&holdouts)) {
 663                ktime_t exp;
 664                bool firstreport;
 665                bool needreport;
 666                int rtst;
 667
 668                // Slowly back off waiting for holdouts
 669                set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
 670                if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 671                        schedule_timeout_idle(fract);
 672                } else {
 673                        exp = jiffies_to_nsecs(fract);
 674                        __set_current_state(TASK_IDLE);
 675                        schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
 676                }
 677
 678                if (fract < HZ)
 679                        fract++;
 680
 681                rtst = READ_ONCE(rcu_task_stall_timeout);
 682                needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
 683                if (needreport) {
 684                        lastreport = jiffies;
 685                        reported = true;
 686                }
 687                firstreport = true;
 688                WARN_ON(signal_pending(current));
 689                set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
 690                rtp->holdouts_func(&holdouts, needreport, &firstreport);
 691
 692                // Print pre-stall informational messages if needed.
 693                j = jiffies;
 694                if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
 695                        lastinfo = j;
 696                        rtsi = rtsi * rcu_task_stall_info_mult;
 697                        pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
 698                                __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
 699                }
 700        }
 701
 702        set_tasks_gp_state(rtp, RTGS_POST_GP);
 703        rtp->postgp_func(rtp);
 704}
 705
 706#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
 707
 708#ifdef CONFIG_TASKS_RCU
 709
 710////////////////////////////////////////////////////////////////////////
 711//
 712// Simple variant of RCU whose quiescent states are voluntary context
 713// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
 714// As such, grace periods can take one good long time.  There are no
 715// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
 716// because this implementation is intended to get the system into a safe
 717// state for some of the manipulations involved in tracing and the like.
 718// Finally, this implementation does not support high call_rcu_tasks()
 719// rates from multiple CPUs.  If this is required, per-CPU callback lists
 720// will be needed.
 721//
 722// The implementation uses rcu_tasks_wait_gp(), which relies on function
 723// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
 724// function sets these function pointers up so that rcu_tasks_wait_gp()
 725// invokes these functions in this order:
 726//
 727// rcu_tasks_pregp_step():
 728//      Invokes synchronize_rcu() in order to wait for all in-flight
 729//      t->on_rq and t->nvcsw transitions to complete.  This works because
 730//      all such transitions are carried out with interrupts disabled.
 731// rcu_tasks_pertask(), invoked on every non-idle task:
 732//      For every runnable non-idle task other than the current one, use
 733//      get_task_struct() to pin down that task, snapshot that task's
 734//      number of voluntary context switches, and add that task to the
 735//      holdout list.
 736// rcu_tasks_postscan():
 737//      Invoke synchronize_srcu() to ensure that all tasks that were
 738//      in the process of exiting (and which thus might not know to
 739//      synchronize with this RCU Tasks grace period) have completed
 740//      exiting.
 741// check_all_holdout_tasks(), repeatedly until holdout list is empty:
 742//      Scans the holdout list, attempting to identify a quiescent state
 743//      for each task on the list.  If there is a quiescent state, the
 744//      corresponding task is removed from the holdout list.
 745// rcu_tasks_postgp():
 746//      Invokes synchronize_rcu() in order to ensure that all prior
 747//      t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
 748//      to have happened before the end of this RCU Tasks grace period.
 749//      Again, this works because all such transitions are carried out
 750//      with interrupts disabled.
 751//
 752// For each exiting task, the exit_tasks_rcu_start() and
 753// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
 754// read-side critical sections waited for by rcu_tasks_postscan().
 755//
 756// Pre-grace-period update-side code is ordered before the grace
 757// via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
 758// is ordered before the grace period via synchronize_rcu() call in
 759// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
 760// disabling.
 761
 762/* Pre-grace-period preparation. */
 763static void rcu_tasks_pregp_step(void)
 764{
 765        /*
 766         * Wait for all pre-existing t->on_rq and t->nvcsw transitions
 767         * to complete.  Invoking synchronize_rcu() suffices because all
 768         * these transitions occur with interrupts disabled.  Without this
 769         * synchronize_rcu(), a read-side critical section that started
 770         * before the grace period might be incorrectly seen as having
 771         * started after the grace period.
 772         *
 773         * This synchronize_rcu() also dispenses with the need for a
 774         * memory barrier on the first store to t->rcu_tasks_holdout,
 775         * as it forces the store to happen after the beginning of the
 776         * grace period.
 777         */
 778        synchronize_rcu();
 779}
 780
 781/* Per-task initial processing. */
 782static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
 783{
 784        if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
 785                get_task_struct(t);
 786                t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
 787                WRITE_ONCE(t->rcu_tasks_holdout, true);
 788                list_add(&t->rcu_tasks_holdout_list, hop);
 789        }
 790}
 791
 792/* Processing between scanning taskslist and draining the holdout list. */
 793static void rcu_tasks_postscan(struct list_head *hop)
 794{
 795        /*
 796         * Wait for tasks that are in the process of exiting.  This
 797         * does only part of the job, ensuring that all tasks that were
 798         * previously exiting reach the point where they have disabled
 799         * preemption, allowing the later synchronize_rcu() to finish
 800         * the job.
 801         */
 802        synchronize_srcu(&tasks_rcu_exit_srcu);
 803}
 804
 805/* See if tasks are still holding out, complain if so. */
 806static void check_holdout_task(struct task_struct *t,
 807                               bool needreport, bool *firstreport)
 808{
 809        int cpu;
 810
 811        if (!READ_ONCE(t->rcu_tasks_holdout) ||
 812            t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
 813            !READ_ONCE(t->on_rq) ||
 814            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
 815             !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
 816                WRITE_ONCE(t->rcu_tasks_holdout, false);
 817                list_del_init(&t->rcu_tasks_holdout_list);
 818                put_task_struct(t);
 819                return;
 820        }
 821        rcu_request_urgent_qs_task(t);
 822        if (!needreport)
 823                return;
 824        if (*firstreport) {
 825                pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
 826                *firstreport = false;
 827        }
 828        cpu = task_cpu(t);
 829        pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
 830                 t, ".I"[is_idle_task(t)],
 831                 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
 832                 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
 833                 t->rcu_tasks_idle_cpu, cpu);
 834        sched_show_task(t);
 835}
 836
 837/* Scan the holdout lists for tasks no longer holding out. */
 838static void check_all_holdout_tasks(struct list_head *hop,
 839                                    bool needreport, bool *firstreport)
 840{
 841        struct task_struct *t, *t1;
 842
 843        list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
 844                check_holdout_task(t, needreport, firstreport);
 845                cond_resched();
 846        }
 847}
 848
 849/* Finish off the Tasks-RCU grace period. */
 850static void rcu_tasks_postgp(struct rcu_tasks *rtp)
 851{
 852        /*
 853         * Because ->on_rq and ->nvcsw are not guaranteed to have a full
 854         * memory barriers prior to them in the schedule() path, memory
 855         * reordering on other CPUs could cause their RCU-tasks read-side
 856         * critical sections to extend past the end of the grace period.
 857         * However, because these ->nvcsw updates are carried out with
 858         * interrupts disabled, we can use synchronize_rcu() to force the
 859         * needed ordering on all such CPUs.
 860         *
 861         * This synchronize_rcu() also confines all ->rcu_tasks_holdout
 862         * accesses to be within the grace period, avoiding the need for
 863         * memory barriers for ->rcu_tasks_holdout accesses.
 864         *
 865         * In addition, this synchronize_rcu() waits for exiting tasks
 866         * to complete their final preempt_disable() region of execution,
 867         * cleaning up after the synchronize_srcu() above.
 868         */
 869        synchronize_rcu();
 870}
 871
 872void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
 873DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
 874
 875/**
 876 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
 877 * @rhp: structure to be used for queueing the RCU updates.
 878 * @func: actual callback function to be invoked after the grace period
 879 *
 880 * The callback function will be invoked some time after a full grace
 881 * period elapses, in other words after all currently executing RCU
 882 * read-side critical sections have completed. call_rcu_tasks() assumes
 883 * that the read-side critical sections end at a voluntary context
 884 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
 885 * or transition to usermode execution.  As such, there are no read-side
 886 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
 887 * this primitive is intended to determine that all tasks have passed
 888 * through a safe state, not so much for data-structure synchronization.
 889 *
 890 * See the description of call_rcu() for more detailed information on
 891 * memory ordering guarantees.
 892 */
 893void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 894{
 895        call_rcu_tasks_generic(rhp, func, &rcu_tasks);
 896}
 897EXPORT_SYMBOL_GPL(call_rcu_tasks);
 898
 899/**
 900 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
 901 *
 902 * Control will return to the caller some time after a full rcu-tasks
 903 * grace period has elapsed, in other words after all currently
 904 * executing rcu-tasks read-side critical sections have elapsed.  These
 905 * read-side critical sections are delimited by calls to schedule(),
 906 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
 907 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
 908 *
 909 * This is a very specialized primitive, intended only for a few uses in
 910 * tracing and other situations requiring manipulation of function
 911 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
 912 * is not (yet) intended for heavy use from multiple CPUs.
 913 *
 914 * See the description of synchronize_rcu() for more detailed information
 915 * on memory ordering guarantees.
 916 */
 917void synchronize_rcu_tasks(void)
 918{
 919        synchronize_rcu_tasks_generic(&rcu_tasks);
 920}
 921EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
 922
 923/**
 924 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
 925 *
 926 * Although the current implementation is guaranteed to wait, it is not
 927 * obligated to, for example, if there are no pending callbacks.
 928 */
 929void rcu_barrier_tasks(void)
 930{
 931        rcu_barrier_tasks_generic(&rcu_tasks);
 932}
 933EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
 934
 935static int __init rcu_spawn_tasks_kthread(void)
 936{
 937        cblist_init_generic(&rcu_tasks);
 938        rcu_tasks.gp_sleep = HZ / 10;
 939        rcu_tasks.init_fract = HZ / 10;
 940        rcu_tasks.pregp_func = rcu_tasks_pregp_step;
 941        rcu_tasks.pertask_func = rcu_tasks_pertask;
 942        rcu_tasks.postscan_func = rcu_tasks_postscan;
 943        rcu_tasks.holdouts_func = check_all_holdout_tasks;
 944        rcu_tasks.postgp_func = rcu_tasks_postgp;
 945        rcu_spawn_tasks_kthread_generic(&rcu_tasks);
 946        return 0;
 947}
 948
 949#if !defined(CONFIG_TINY_RCU)
 950void show_rcu_tasks_classic_gp_kthread(void)
 951{
 952        show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
 953}
 954EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
 955#endif // !defined(CONFIG_TINY_RCU)
 956
 957/* Do the srcu_read_lock() for the above synchronize_srcu().  */
 958void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
 959{
 960        preempt_disable();
 961        current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
 962        preempt_enable();
 963}
 964
 965/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
 966void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
 967{
 968        struct task_struct *t = current;
 969
 970        preempt_disable();
 971        __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
 972        preempt_enable();
 973        exit_tasks_rcu_finish_trace(t);
 974}
 975
 976#else /* #ifdef CONFIG_TASKS_RCU */
 977void exit_tasks_rcu_start(void) { }
 978void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
 979#endif /* #else #ifdef CONFIG_TASKS_RCU */
 980
 981#ifdef CONFIG_TASKS_RUDE_RCU
 982
 983////////////////////////////////////////////////////////////////////////
 984//
 985// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
 986// passing an empty function to schedule_on_each_cpu().  This approach
 987// provides an asynchronous call_rcu_tasks_rude() API and batching of
 988// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
 989// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
 990// and induces otherwise unnecessary context switches on all online CPUs,
 991// whether idle or not.
 992//
 993// Callback handling is provided by the rcu_tasks_kthread() function.
 994//
 995// Ordering is provided by the scheduler's context-switch code.
 996
 997// Empty function to allow workqueues to force a context switch.
 998static void rcu_tasks_be_rude(struct work_struct *work)
 999{
1000}
1001
1002// Wait for one rude RCU-tasks grace period.
1003static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1004{
1005        if (num_online_cpus() <= 1)
1006                return; // Fastpath for only one CPU.
1007
1008        rtp->n_ipis += cpumask_weight(cpu_online_mask);
1009        schedule_on_each_cpu(rcu_tasks_be_rude);
1010}
1011
1012void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1013DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1014                 "RCU Tasks Rude");
1015
1016/**
1017 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1018 * @rhp: structure to be used for queueing the RCU updates.
1019 * @func: actual callback function to be invoked after the grace period
1020 *
1021 * The callback function will be invoked some time after a full grace
1022 * period elapses, in other words after all currently executing RCU
1023 * read-side critical sections have completed. call_rcu_tasks_rude()
1024 * assumes that the read-side critical sections end at context switch,
1025 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1026 * usermode execution is schedulable). As such, there are no read-side
1027 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1028 * this primitive is intended to determine that all tasks have passed
1029 * through a safe state, not so much for data-structure synchronization.
1030 *
1031 * See the description of call_rcu() for more detailed information on
1032 * memory ordering guarantees.
1033 */
1034void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1035{
1036        call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1037}
1038EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1039
1040/**
1041 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1042 *
1043 * Control will return to the caller some time after a rude rcu-tasks
1044 * grace period has elapsed, in other words after all currently
1045 * executing rcu-tasks read-side critical sections have elapsed.  These
1046 * read-side critical sections are delimited by calls to schedule(),
1047 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1048 * context), and (in theory, anyway) cond_resched().
1049 *
1050 * This is a very specialized primitive, intended only for a few uses in
1051 * tracing and other situations requiring manipulation of function preambles
1052 * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1053 * (yet) intended for heavy use from multiple CPUs.
1054 *
1055 * See the description of synchronize_rcu() for more detailed information
1056 * on memory ordering guarantees.
1057 */
1058void synchronize_rcu_tasks_rude(void)
1059{
1060        synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1061}
1062EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1063
1064/**
1065 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1066 *
1067 * Although the current implementation is guaranteed to wait, it is not
1068 * obligated to, for example, if there are no pending callbacks.
1069 */
1070void rcu_barrier_tasks_rude(void)
1071{
1072        rcu_barrier_tasks_generic(&rcu_tasks_rude);
1073}
1074EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1075
1076static int __init rcu_spawn_tasks_rude_kthread(void)
1077{
1078        cblist_init_generic(&rcu_tasks_rude);
1079        rcu_tasks_rude.gp_sleep = HZ / 10;
1080        rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1081        return 0;
1082}
1083
1084#if !defined(CONFIG_TINY_RCU)
1085void show_rcu_tasks_rude_gp_kthread(void)
1086{
1087        show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1088}
1089EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1090#endif // !defined(CONFIG_TINY_RCU)
1091#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1092
1093////////////////////////////////////////////////////////////////////////
1094//
1095// Tracing variant of Tasks RCU.  This variant is designed to be used
1096// to protect tracing hooks, including those of BPF.  This variant
1097// therefore:
1098//
1099// 1.   Has explicit read-side markers to allow finite grace periods
1100//      in the face of in-kernel loops for PREEMPT=n builds.
1101//
1102// 2.   Protects code in the idle loop, exception entry/exit, and
1103//      CPU-hotplug code paths, similar to the capabilities of SRCU.
1104//
1105// 3.   Avoids expensive read-side instructions, having overhead similar
1106//      to that of Preemptible RCU.
1107//
1108// There are of course downsides.  The grace-period code can send IPIs to
1109// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
1110// It is necessary to scan the full tasklist, much as for Tasks RCU.  There
1111// is a single callback queue guarded by a single lock, again, much as for
1112// Tasks RCU.  If needed, these downsides can be at least partially remedied.
1113//
1114// Perhaps most important, this variant of RCU does not affect the vanilla
1115// flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1116// readers can operate from idle, offline, and exception entry/exit in no
1117// way allows rcu_preempt and rcu_sched readers to also do so.
1118//
1119// The implementation uses rcu_tasks_wait_gp(), which relies on function
1120// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1121// function sets these function pointers up so that rcu_tasks_wait_gp()
1122// invokes these functions in this order:
1123//
1124// rcu_tasks_trace_pregp_step():
1125//      Initialize the count of readers and block CPU-hotplug operations.
1126// rcu_tasks_trace_pertask(), invoked on every non-idle task:
1127//      Initialize per-task state and attempt to identify an immediate
1128//      quiescent state for that task, or, failing that, attempt to
1129//      set that task's .need_qs flag so that task's next outermost
1130//      rcu_read_unlock_trace() will report the quiescent state (in which
1131//      case the count of readers is incremented).  If both attempts fail,
1132//      the task is added to a "holdout" list.  Note that IPIs are used
1133//      to invoke trc_read_check_handler() in the context of running tasks
1134//      in order to avoid ordering overhead on common-case shared-variable
1135//      accessses.
1136// rcu_tasks_trace_postscan():
1137//      Initialize state and attempt to identify an immediate quiescent
1138//      state as above (but only for idle tasks), unblock CPU-hotplug
1139//      operations, and wait for an RCU grace period to avoid races with
1140//      tasks that are in the process of exiting.
1141// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1142//      Scans the holdout list, attempting to identify a quiescent state
1143//      for each task on the list.  If there is a quiescent state, the
1144//      corresponding task is removed from the holdout list.
1145// rcu_tasks_trace_postgp():
1146//      Wait for the count of readers do drop to zero, reporting any stalls.
1147//      Also execute full memory barriers to maintain ordering with code
1148//      executing after the grace period.
1149//
1150// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1151//
1152// Pre-grace-period update-side code is ordered before the grace
1153// period via the ->cbs_lock and barriers in rcu_tasks_kthread().
1154// Pre-grace-period read-side code is ordered before the grace period by
1155// atomic_dec_and_test() of the count of readers (for IPIed readers) and by
1156// scheduler context-switch ordering (for locked-down non-running readers).
1157
1158// The lockdep state must be outside of #ifdef to be useful.
1159#ifdef CONFIG_DEBUG_LOCK_ALLOC
1160static struct lock_class_key rcu_lock_trace_key;
1161struct lockdep_map rcu_trace_lock_map =
1162        STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1163EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1164#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1165
1166#ifdef CONFIG_TASKS_TRACE_RCU
1167
1168static atomic_t trc_n_readers_need_end;         // Number of waited-for readers.
1169static DECLARE_WAIT_QUEUE_HEAD(trc_wait);       // List of holdout tasks.
1170
1171// Record outstanding IPIs to each CPU.  No point in sending two...
1172static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1173
1174// The number of detections of task quiescent state relying on
1175// heavyweight readers executing explicit memory barriers.
1176static unsigned long n_heavy_reader_attempts;
1177static unsigned long n_heavy_reader_updates;
1178static unsigned long n_heavy_reader_ofl_updates;
1179
1180void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1181DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1182                 "RCU Tasks Trace");
1183
1184/*
1185 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
1186 * while the scheduler locks are held.
1187 */
1188static void rcu_read_unlock_iw(struct irq_work *iwp)
1189{
1190        wake_up(&trc_wait);
1191}
1192static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
1193
1194/* If we are the last reader, wake up the grace-period kthread. */
1195void rcu_read_unlock_trace_special(struct task_struct *t)
1196{
1197        int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
1198
1199        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
1200            t->trc_reader_special.b.need_mb)
1201                smp_mb(); // Pairs with update-side barriers.
1202        // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1203        if (nq)
1204                WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1205        WRITE_ONCE(t->trc_reader_nesting, 0);
1206        if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
1207                irq_work_queue(&rcu_tasks_trace_iw);
1208}
1209EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1210
1211/* Add a task to the holdout list, if it is not already on the list. */
1212static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1213{
1214        if (list_empty(&t->trc_holdout_list)) {
1215                get_task_struct(t);
1216                list_add(&t->trc_holdout_list, bhp);
1217        }
1218}
1219
1220/* Remove a task from the holdout list, if it is in fact present. */
1221static void trc_del_holdout(struct task_struct *t)
1222{
1223        if (!list_empty(&t->trc_holdout_list)) {
1224                list_del_init(&t->trc_holdout_list);
1225                put_task_struct(t);
1226        }
1227}
1228
1229/* IPI handler to check task state. */
1230static void trc_read_check_handler(void *t_in)
1231{
1232        struct task_struct *t = current;
1233        struct task_struct *texp = t_in;
1234
1235        // If the task is no longer running on this CPU, leave.
1236        if (unlikely(texp != t)) {
1237                goto reset_ipi; // Already on holdout list, so will check later.
1238        }
1239
1240        // If the task is not in a read-side critical section, and
1241        // if this is the last reader, awaken the grace-period kthread.
1242        if (likely(!READ_ONCE(t->trc_reader_nesting))) {
1243                WRITE_ONCE(t->trc_reader_checked, true);
1244                goto reset_ipi;
1245        }
1246        // If we are racing with an rcu_read_unlock_trace(), try again later.
1247        if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
1248                goto reset_ipi;
1249        WRITE_ONCE(t->trc_reader_checked, true);
1250
1251        // Get here if the task is in a read-side critical section.  Set
1252        // its state so that it will awaken the grace-period kthread upon
1253        // exit from that critical section.
1254        atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1255        WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1256        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1257
1258reset_ipi:
1259        // Allow future IPIs to be sent on CPU and for task.
1260        // Also order this IPI handler against any later manipulations of
1261        // the intended task.
1262        smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1263        smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1264}
1265
1266/* Callback function for scheduler to check locked-down task.  */
1267static int trc_inspect_reader(struct task_struct *t, void *arg)
1268{
1269        int cpu = task_cpu(t);
1270        int nesting;
1271        bool ofl = cpu_is_offline(cpu);
1272
1273        if (task_curr(t)) {
1274                WARN_ON_ONCE(ofl && !is_idle_task(t));
1275
1276                // If no chance of heavyweight readers, do it the hard way.
1277                if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1278                        return -EINVAL;
1279
1280                // If heavyweight readers are enabled on the remote task,
1281                // we can inspect its state despite its currently running.
1282                // However, we cannot safely change its state.
1283                n_heavy_reader_attempts++;
1284                if (!ofl && // Check for "running" idle tasks on offline CPUs.
1285                    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1286                        return -EINVAL; // No quiescent state, do it the hard way.
1287                n_heavy_reader_updates++;
1288                if (ofl)
1289                        n_heavy_reader_ofl_updates++;
1290                nesting = 0;
1291        } else {
1292                // The task is not running, so C-language access is safe.
1293                nesting = t->trc_reader_nesting;
1294        }
1295
1296        // If not exiting a read-side critical section, mark as checked
1297        // so that the grace-period kthread will remove it from the
1298        // holdout list.
1299        t->trc_reader_checked = nesting >= 0;
1300        if (nesting <= 0)
1301                return nesting ? -EINVAL : 0;  // If in QS, done, otherwise try again later.
1302
1303        // The task is in a read-side critical section, so set up its
1304        // state so that it will awaken the grace-period kthread upon exit
1305        // from that critical section.
1306        atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1307        WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1308        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1309        return 0;
1310}
1311
1312/* Attempt to extract the state for the specified task. */
1313static void trc_wait_for_one_reader(struct task_struct *t,
1314                                    struct list_head *bhp)
1315{
1316        int cpu;
1317
1318        // If a previous IPI is still in flight, let it complete.
1319        if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1320                return;
1321
1322        // The current task had better be in a quiescent state.
1323        if (t == current) {
1324                t->trc_reader_checked = true;
1325                WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1326                return;
1327        }
1328
1329        // Attempt to nail down the task for inspection.
1330        get_task_struct(t);
1331        if (!task_call_func(t, trc_inspect_reader, NULL)) {
1332                put_task_struct(t);
1333                return;
1334        }
1335        put_task_struct(t);
1336
1337        // If this task is not yet on the holdout list, then we are in
1338        // an RCU read-side critical section.  Otherwise, the invocation of
1339        // trc_add_holdout() that added it to the list did the necessary
1340        // get_task_struct().  Either way, the task cannot be freed out
1341        // from under this code.
1342
1343        // If currently running, send an IPI, either way, add to list.
1344        trc_add_holdout(t, bhp);
1345        if (task_curr(t) &&
1346            time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1347                // The task is currently running, so try IPIing it.
1348                cpu = task_cpu(t);
1349
1350                // If there is already an IPI outstanding, let it happen.
1351                if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1352                        return;
1353
1354                per_cpu(trc_ipi_to_cpu, cpu) = true;
1355                t->trc_ipi_to_cpu = cpu;
1356                rcu_tasks_trace.n_ipis++;
1357                if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1358                        // Just in case there is some other reason for
1359                        // failure than the target CPU being offline.
1360                        WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1361                                  __func__, cpu);
1362                        rcu_tasks_trace.n_ipis_fails++;
1363                        per_cpu(trc_ipi_to_cpu, cpu) = false;
1364                        t->trc_ipi_to_cpu = -1;
1365                }
1366        }
1367}
1368
1369/* Initialize for a new RCU-tasks-trace grace period. */
1370static void rcu_tasks_trace_pregp_step(void)
1371{
1372        int cpu;
1373
1374        // Allow for fast-acting IPIs.
1375        atomic_set(&trc_n_readers_need_end, 1);
1376
1377        // There shouldn't be any old IPIs, but...
1378        for_each_possible_cpu(cpu)
1379                WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1380
1381        // Disable CPU hotplug across the tasklist scan.
1382        // This also waits for all readers in CPU-hotplug code paths.
1383        cpus_read_lock();
1384}
1385
1386/* Do first-round processing for the specified task. */
1387static void rcu_tasks_trace_pertask(struct task_struct *t,
1388                                    struct list_head *hop)
1389{
1390        // During early boot when there is only the one boot CPU, there
1391        // is no idle task for the other CPUs. Just return.
1392        if (unlikely(t == NULL))
1393                return;
1394
1395        WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1396        WRITE_ONCE(t->trc_reader_checked, false);
1397        t->trc_ipi_to_cpu = -1;
1398        trc_wait_for_one_reader(t, hop);
1399}
1400
1401/*
1402 * Do intermediate processing between task and holdout scans and
1403 * pick up the idle tasks.
1404 */
1405static void rcu_tasks_trace_postscan(struct list_head *hop)
1406{
1407        int cpu;
1408
1409        for_each_possible_cpu(cpu)
1410                rcu_tasks_trace_pertask(idle_task(cpu), hop);
1411
1412        // Re-enable CPU hotplug now that the tasklist scan has completed.
1413        cpus_read_unlock();
1414
1415        // Wait for late-stage exiting tasks to finish exiting.
1416        // These might have passed the call to exit_tasks_rcu_finish().
1417        synchronize_rcu();
1418        // Any tasks that exit after this point will set ->trc_reader_checked.
1419}
1420
1421/* Communicate task state back to the RCU tasks trace stall warning request. */
1422struct trc_stall_chk_rdr {
1423        int nesting;
1424        int ipi_to_cpu;
1425        u8 needqs;
1426};
1427
1428static int trc_check_slow_task(struct task_struct *t, void *arg)
1429{
1430        struct trc_stall_chk_rdr *trc_rdrp = arg;
1431
1432        if (task_curr(t))
1433                return false; // It is running, so decline to inspect it.
1434        trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1435        trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1436        trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs);
1437        return true;
1438}
1439
1440/* Show the state of a task stalling the current RCU tasks trace GP. */
1441static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1442{
1443        int cpu;
1444        struct trc_stall_chk_rdr trc_rdr;
1445        bool is_idle_tsk = is_idle_task(t);
1446
1447        if (*firstreport) {
1448                pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1449                *firstreport = false;
1450        }
1451        cpu = task_cpu(t);
1452        if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1453                pr_alert("P%d: %c\n",
1454                         t->pid,
1455                         ".i"[is_idle_tsk]);
1456        else
1457                pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1458                         t->pid,
1459                         ".I"[trc_rdr.ipi_to_cpu >= 0],
1460                         ".i"[is_idle_tsk],
1461                         ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1462                         trc_rdr.nesting,
1463                         " N"[!!trc_rdr.needqs],
1464                         cpu);
1465        sched_show_task(t);
1466}
1467
1468/* List stalled IPIs for RCU tasks trace. */
1469static void show_stalled_ipi_trace(void)
1470{
1471        int cpu;
1472
1473        for_each_possible_cpu(cpu)
1474                if (per_cpu(trc_ipi_to_cpu, cpu))
1475                        pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1476}
1477
1478/* Do one scan of the holdout list. */
1479static void check_all_holdout_tasks_trace(struct list_head *hop,
1480                                          bool needreport, bool *firstreport)
1481{
1482        struct task_struct *g, *t;
1483
1484        // Disable CPU hotplug across the holdout list scan.
1485        cpus_read_lock();
1486
1487        list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1488                // If safe and needed, try to check the current task.
1489                if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1490                    !READ_ONCE(t->trc_reader_checked))
1491                        trc_wait_for_one_reader(t, hop);
1492
1493                // If check succeeded, remove this task from the list.
1494                if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1495                    READ_ONCE(t->trc_reader_checked))
1496                        trc_del_holdout(t);
1497                else if (needreport)
1498                        show_stalled_task_trace(t, firstreport);
1499        }
1500
1501        // Re-enable CPU hotplug now that the holdout list scan has completed.
1502        cpus_read_unlock();
1503
1504        if (needreport) {
1505                if (*firstreport)
1506                        pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1507                show_stalled_ipi_trace();
1508        }
1509}
1510
1511static void rcu_tasks_trace_empty_fn(void *unused)
1512{
1513}
1514
1515/* Wait for grace period to complete and provide ordering. */
1516static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1517{
1518        int cpu;
1519        bool firstreport;
1520        struct task_struct *g, *t;
1521        LIST_HEAD(holdouts);
1522        long ret;
1523
1524        // Wait for any lingering IPI handlers to complete.  Note that
1525        // if a CPU has gone offline or transitioned to userspace in the
1526        // meantime, all IPI handlers should have been drained beforehand.
1527        // Yes, this assumes that CPUs process IPIs in order.  If that ever
1528        // changes, there will need to be a recheck and/or timed wait.
1529        for_each_online_cpu(cpu)
1530                if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1531                        smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1532
1533        // Remove the safety count.
1534        smp_mb__before_atomic();  // Order vs. earlier atomics
1535        atomic_dec(&trc_n_readers_need_end);
1536        smp_mb__after_atomic();  // Order vs. later atomics
1537
1538        // Wait for readers.
1539        set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1540        for (;;) {
1541                ret = wait_event_idle_exclusive_timeout(
1542                                trc_wait,
1543                                atomic_read(&trc_n_readers_need_end) == 0,
1544                                READ_ONCE(rcu_task_stall_timeout));
1545                if (ret)
1546                        break;  // Count reached zero.
1547                // Stall warning time, so make a list of the offenders.
1548                rcu_read_lock();
1549                for_each_process_thread(g, t)
1550                        if (READ_ONCE(t->trc_reader_special.b.need_qs))
1551                                trc_add_holdout(t, &holdouts);
1552                rcu_read_unlock();
1553                firstreport = true;
1554                list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1555                        if (READ_ONCE(t->trc_reader_special.b.need_qs))
1556                                show_stalled_task_trace(t, &firstreport);
1557                        trc_del_holdout(t); // Release task_struct reference.
1558                }
1559                if (firstreport)
1560                        pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1561                show_stalled_ipi_trace();
1562                pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1563        }
1564        smp_mb(); // Caller's code must be ordered after wakeup.
1565                  // Pairs with pretty much every ordering primitive.
1566}
1567
1568/* Report any needed quiescent state for this exiting task. */
1569static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1570{
1571        WRITE_ONCE(t->trc_reader_checked, true);
1572        WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1573        WRITE_ONCE(t->trc_reader_nesting, 0);
1574        if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1575                rcu_read_unlock_trace_special(t);
1576}
1577
1578/**
1579 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1580 * @rhp: structure to be used for queueing the RCU updates.
1581 * @func: actual callback function to be invoked after the grace period
1582 *
1583 * The callback function will be invoked some time after a trace rcu-tasks
1584 * grace period elapses, in other words after all currently executing
1585 * trace rcu-tasks read-side critical sections have completed. These
1586 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1587 * and rcu_read_unlock_trace().
1588 *
1589 * See the description of call_rcu() for more detailed information on
1590 * memory ordering guarantees.
1591 */
1592void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1593{
1594        call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1595}
1596EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1597
1598/**
1599 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1600 *
1601 * Control will return to the caller some time after a trace rcu-tasks
1602 * grace period has elapsed, in other words after all currently executing
1603 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1604 * critical sections are delimited by calls to rcu_read_lock_trace()
1605 * and rcu_read_unlock_trace().
1606 *
1607 * This is a very specialized primitive, intended only for a few uses in
1608 * tracing and other situations requiring manipulation of function preambles
1609 * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1610 * (yet) intended for heavy use from multiple CPUs.
1611 *
1612 * See the description of synchronize_rcu() for more detailed information
1613 * on memory ordering guarantees.
1614 */
1615void synchronize_rcu_tasks_trace(void)
1616{
1617        RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1618        synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1619}
1620EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1621
1622/**
1623 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1624 *
1625 * Although the current implementation is guaranteed to wait, it is not
1626 * obligated to, for example, if there are no pending callbacks.
1627 */
1628void rcu_barrier_tasks_trace(void)
1629{
1630        rcu_barrier_tasks_generic(&rcu_tasks_trace);
1631}
1632EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1633
1634static int __init rcu_spawn_tasks_trace_kthread(void)
1635{
1636        cblist_init_generic(&rcu_tasks_trace);
1637        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1638                rcu_tasks_trace.gp_sleep = HZ / 10;
1639                rcu_tasks_trace.init_fract = HZ / 10;
1640        } else {
1641                rcu_tasks_trace.gp_sleep = HZ / 200;
1642                if (rcu_tasks_trace.gp_sleep <= 0)
1643                        rcu_tasks_trace.gp_sleep = 1;
1644                rcu_tasks_trace.init_fract = HZ / 200;
1645                if (rcu_tasks_trace.init_fract <= 0)
1646                        rcu_tasks_trace.init_fract = 1;
1647        }
1648        rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1649        rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1650        rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1651        rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1652        rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1653        rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1654        return 0;
1655}
1656
1657#if !defined(CONFIG_TINY_RCU)
1658void show_rcu_tasks_trace_gp_kthread(void)
1659{
1660        char buf[64];
1661
1662        sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1663                data_race(n_heavy_reader_ofl_updates),
1664                data_race(n_heavy_reader_updates),
1665                data_race(n_heavy_reader_attempts));
1666        show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1667}
1668EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1669#endif // !defined(CONFIG_TINY_RCU)
1670
1671#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1672static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1673#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1674
1675#ifndef CONFIG_TINY_RCU
1676void show_rcu_tasks_gp_kthreads(void)
1677{
1678        show_rcu_tasks_classic_gp_kthread();
1679        show_rcu_tasks_rude_gp_kthread();
1680        show_rcu_tasks_trace_gp_kthread();
1681}
1682#endif /* #ifndef CONFIG_TINY_RCU */
1683
1684#ifdef CONFIG_PROVE_RCU
1685struct rcu_tasks_test_desc {
1686        struct rcu_head rh;
1687        const char *name;
1688        bool notrun;
1689};
1690
1691static struct rcu_tasks_test_desc tests[] = {
1692        {
1693                .name = "call_rcu_tasks()",
1694                /* If not defined, the test is skipped. */
1695                .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1696        },
1697        {
1698                .name = "call_rcu_tasks_rude()",
1699                /* If not defined, the test is skipped. */
1700                .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1701        },
1702        {
1703                .name = "call_rcu_tasks_trace()",
1704                /* If not defined, the test is skipped. */
1705                .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1706        }
1707};
1708
1709static void test_rcu_tasks_callback(struct rcu_head *rhp)
1710{
1711        struct rcu_tasks_test_desc *rttd =
1712                container_of(rhp, struct rcu_tasks_test_desc, rh);
1713
1714        pr_info("Callback from %s invoked.\n", rttd->name);
1715
1716        rttd->notrun = true;
1717}
1718
1719static void rcu_tasks_initiate_self_tests(void)
1720{
1721        pr_info("Running RCU-tasks wait API self tests\n");
1722#ifdef CONFIG_TASKS_RCU
1723        synchronize_rcu_tasks();
1724        call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1725#endif
1726
1727#ifdef CONFIG_TASKS_RUDE_RCU
1728        synchronize_rcu_tasks_rude();
1729        call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1730#endif
1731
1732#ifdef CONFIG_TASKS_TRACE_RCU
1733        synchronize_rcu_tasks_trace();
1734        call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1735#endif
1736}
1737
1738static int rcu_tasks_verify_self_tests(void)
1739{
1740        int ret = 0;
1741        int i;
1742
1743        for (i = 0; i < ARRAY_SIZE(tests); i++) {
1744                if (!tests[i].notrun) {         // still hanging.
1745                        pr_err("%s has been failed.\n", tests[i].name);
1746                        ret = -1;
1747                }
1748        }
1749
1750        if (ret)
1751                WARN_ON(1);
1752
1753        return ret;
1754}
1755late_initcall(rcu_tasks_verify_self_tests);
1756#else /* #ifdef CONFIG_PROVE_RCU */
1757static void rcu_tasks_initiate_self_tests(void) { }
1758#endif /* #else #ifdef CONFIG_PROVE_RCU */
1759
1760void __init rcu_init_tasks_generic(void)
1761{
1762#ifdef CONFIG_TASKS_RCU
1763        rcu_spawn_tasks_kthread();
1764#endif
1765
1766#ifdef CONFIG_TASKS_RUDE_RCU
1767        rcu_spawn_tasks_rude_kthread();
1768#endif
1769
1770#ifdef CONFIG_TASKS_TRACE_RCU
1771        rcu_spawn_tasks_trace_kthread();
1772#endif
1773
1774        // Run the self-tests.
1775        rcu_tasks_initiate_self_tests();
1776}
1777
1778#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1779static inline void rcu_tasks_bootup_oddness(void) {}
1780#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1781