1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate_wait.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38#include <linux/slab.h>
39#include <linux/mm.h>
40
41#include "rcu.h"
42
43
44struct rcu_ctrlblk {
45 struct rcu_head *rcucblist;
46 struct rcu_head **donetail;
47 struct rcu_head **curtail;
48};
49
50
51static struct rcu_ctrlblk rcu_ctrlblk = {
52 .donetail = &rcu_ctrlblk.rcucblist,
53 .curtail = &rcu_ctrlblk.rcucblist,
54};
55
56void rcu_barrier(void)
57{
58 wait_rcu_gp(call_rcu);
59}
60EXPORT_SYMBOL(rcu_barrier);
61
62
63void rcu_qs(void)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
69 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
70 raise_softirq_irqoff(RCU_SOFTIRQ);
71 }
72 local_irq_restore(flags);
73}
74
75
76
77
78
79
80
81void rcu_sched_clock_irq(int user)
82{
83 if (user) {
84 rcu_qs();
85 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
86 set_tsk_need_resched(current);
87 set_preempt_need_resched();
88 }
89}
90
91
92
93
94
95static inline bool rcu_reclaim_tiny(struct rcu_head *head)
96{
97 rcu_callback_t f;
98 unsigned long offset = (unsigned long)head->func;
99
100 rcu_lock_acquire(&rcu_callback_map);
101 if (__is_kvfree_rcu_offset(offset)) {
102 trace_rcu_invoke_kvfree_callback("", head, offset);
103 kvfree((void *)head - offset);
104 rcu_lock_release(&rcu_callback_map);
105 return true;
106 }
107
108 trace_rcu_invoke_callback("", head);
109 f = head->func;
110 WRITE_ONCE(head->func, (rcu_callback_t)0L);
111 f(head);
112 rcu_lock_release(&rcu_callback_map);
113 return false;
114}
115
116
117static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
118{
119 struct rcu_head *next, *list;
120 unsigned long flags;
121
122
123 local_irq_save(flags);
124 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
125
126 local_irq_restore(flags);
127 return;
128 }
129 list = rcu_ctrlblk.rcucblist;
130 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
131 *rcu_ctrlblk.donetail = NULL;
132 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
133 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
134 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
135 local_irq_restore(flags);
136
137
138 while (list) {
139 next = list->next;
140 prefetch(next);
141 debug_rcu_head_unqueue(list);
142 local_bh_disable();
143 rcu_reclaim_tiny(list);
144 local_bh_enable();
145 list = next;
146 }
147}
148
149
150
151
152
153
154
155
156
157
158
159void synchronize_rcu(void)
160{
161 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
162 lock_is_held(&rcu_lock_map) ||
163 lock_is_held(&rcu_sched_lock_map),
164 "Illegal synchronize_rcu() in RCU read-side critical section");
165}
166EXPORT_SYMBOL_GPL(synchronize_rcu);
167
168
169
170
171
172
173void call_rcu(struct rcu_head *head, rcu_callback_t func)
174{
175 unsigned long flags;
176
177 debug_rcu_head_queue(head);
178 head->func = func;
179 head->next = NULL;
180
181 local_irq_save(flags);
182 *rcu_ctrlblk.curtail = head;
183 rcu_ctrlblk.curtail = &head->next;
184 local_irq_restore(flags);
185
186 if (unlikely(is_idle_task(current))) {
187
188 resched_cpu(0);
189 }
190}
191EXPORT_SYMBOL_GPL(call_rcu);
192
193void __init rcu_init(void)
194{
195 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
196 rcu_early_boot_tests();
197 srcu_init();
198}
199