linux/include/linux/rcutree.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   4 *
   5 * Copyright IBM Corporation, 2008
   6 *
   7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
   8 *         Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
   9 *
  10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
  11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  12 *
  13 * For detailed explanation of Read-Copy Update mechanism see -
  14 *      Documentation/RCU
  15 */
  16
  17#ifndef __LINUX_RCUTREE_H
  18#define __LINUX_RCUTREE_H
  19
  20void rcu_softirq_qs(void);
  21void rcu_note_context_switch(bool preempt);
  22int rcu_needs_cpu(u64 basem, u64 *nextevt);
  23void rcu_cpu_stall_reset(void);
  24
  25/*
  26 * Note a virtualization-based context switch.  This is simply a
  27 * wrapper around rcu_note_context_switch(), which allows TINY_RCU
  28 * to save a few bytes. The caller must have disabled interrupts.
  29 */
  30static inline void rcu_virt_note_context_switch(int cpu)
  31{
  32        rcu_note_context_switch(false);
  33}
  34
  35void synchronize_rcu_expedited(void);
  36void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
  37
  38void rcu_barrier(void);
  39bool rcu_eqs_special_set(int cpu);
  40void rcu_momentary_dyntick_idle(void);
  41void kfree_rcu_scheduler_running(void);
  42bool rcu_gp_might_be_stalled(void);
  43unsigned long get_state_synchronize_rcu(void);
  44unsigned long start_poll_synchronize_rcu(void);
  45bool poll_state_synchronize_rcu(unsigned long oldstate);
  46void cond_synchronize_rcu(unsigned long oldstate);
  47
  48void rcu_idle_enter(void);
  49void rcu_idle_exit(void);
  50void rcu_irq_enter(void);
  51void rcu_irq_exit(void);
  52void rcu_irq_enter_irqson(void);
  53void rcu_irq_exit_irqson(void);
  54bool rcu_is_idle_cpu(int cpu);
  55
  56#ifdef CONFIG_PROVE_RCU
  57void rcu_irq_exit_check_preempt(void);
  58#else
  59static inline void rcu_irq_exit_check_preempt(void) { }
  60#endif
  61
  62void exit_rcu(void);
  63
  64void rcu_scheduler_starting(void);
  65extern int rcu_scheduler_active __read_mostly;
  66void rcu_end_inkernel_boot(void);
  67bool rcu_inkernel_boot_has_ended(void);
  68bool rcu_is_watching(void);
  69#ifndef CONFIG_PREEMPTION
  70void rcu_all_qs(void);
  71#endif
  72
  73/* RCUtree hotplug events */
  74int rcutree_prepare_cpu(unsigned int cpu);
  75int rcutree_online_cpu(unsigned int cpu);
  76int rcutree_offline_cpu(unsigned int cpu);
  77int rcutree_dead_cpu(unsigned int cpu);
  78int rcutree_dying_cpu(unsigned int cpu);
  79void rcu_cpu_starting(unsigned int cpu);
  80
  81#endif /* __LINUX_RCUTREE_H */
  82