1
2
3
4
5
6
7
8#include <linux/bug.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/irq_work.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/tick.h>
17#include <linux/cpu.h>
18#include <linux/notifier.h>
19#include <linux/smp.h>
20#include <asm/processor.h>
21
22
23static DEFINE_PER_CPU(struct llist_head, raised_list);
24static DEFINE_PER_CPU(struct llist_head, lazy_list);
25
26
27
28
29static bool irq_work_claim(struct irq_work *work)
30{
31 unsigned long flags, oflags, nflags;
32
33
34
35
36
37 flags = work->flags & ~IRQ_WORK_PENDING;
38 for (;;) {
39 nflags = flags | IRQ_WORK_FLAGS;
40 oflags = cmpxchg(&work->flags, flags, nflags);
41 if (oflags == flags)
42 break;
43 if (oflags & IRQ_WORK_PENDING)
44 return false;
45 flags = oflags;
46 cpu_relax();
47 }
48
49 return true;
50}
51
52void __weak arch_irq_work_raise(void)
53{
54
55
56
57}
58
59#ifdef CONFIG_SMP
60
61
62
63
64
65
66bool irq_work_queue_on(struct irq_work *work, int cpu)
67{
68
69 WARN_ON_ONCE(cpu_is_offline(cpu));
70
71
72 WARN_ON_ONCE(in_nmi());
73
74
75 if (!irq_work_claim(work))
76 return false;
77
78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
79 arch_send_call_function_single_ipi(cpu);
80
81 return true;
82}
83EXPORT_SYMBOL_GPL(irq_work_queue_on);
84#endif
85
86
87bool irq_work_queue(struct irq_work *work)
88{
89
90 if (!irq_work_claim(work))
91 return false;
92
93
94 preempt_disable();
95
96
97 if (work->flags & IRQ_WORK_LAZY) {
98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
99 tick_nohz_tick_stopped())
100 arch_irq_work_raise();
101 } else {
102 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
103 arch_irq_work_raise();
104 }
105
106 preempt_enable();
107
108 return true;
109}
110EXPORT_SYMBOL_GPL(irq_work_queue);
111
112bool irq_work_needs_cpu(void)
113{
114 struct llist_head *raised, *lazy;
115
116 raised = this_cpu_ptr(&raised_list);
117 lazy = this_cpu_ptr(&lazy_list);
118
119 if (llist_empty(raised) || arch_irq_work_has_interrupt())
120 if (llist_empty(lazy))
121 return false;
122
123
124 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
125
126 return true;
127}
128
129static void irq_work_run_list(struct llist_head *list)
130{
131 unsigned long flags;
132 struct irq_work *work;
133 struct llist_node *llnode;
134
135 BUG_ON(!irqs_disabled());
136
137 if (llist_empty(list))
138 return;
139
140 llnode = llist_del_all(list);
141 while (llnode != NULL) {
142 work = llist_entry(llnode, struct irq_work, llnode);
143
144 llnode = llist_next(llnode);
145
146
147
148
149
150
151
152
153 flags = work->flags & ~IRQ_WORK_PENDING;
154 xchg(&work->flags, flags);
155
156 work->func(work);
157
158
159
160
161 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
162 }
163}
164
165
166
167
168
169void irq_work_run(void)
170{
171 irq_work_run_list(this_cpu_ptr(&raised_list));
172 irq_work_run_list(this_cpu_ptr(&lazy_list));
173}
174EXPORT_SYMBOL_GPL(irq_work_run);
175
176void irq_work_tick(void)
177{
178 struct llist_head *raised = this_cpu_ptr(&raised_list);
179
180 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
181 irq_work_run_list(raised);
182 irq_work_run_list(this_cpu_ptr(&lazy_list));
183}
184
185
186
187
188
189void irq_work_sync(struct irq_work *work)
190{
191 WARN_ON_ONCE(irqs_disabled());
192
193 while (work->flags & IRQ_WORK_BUSY)
194 cpu_relax();
195}
196EXPORT_SYMBOL_GPL(irq_work_sync);
197