1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
20#include <linux/freezer.h>
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
23#include <linux/ftrace.h>
24#include <linux/smp.h>
25#include <linux/smpboot.h>
26#include <linux/tick.h>
27#include <linux/irq.h>
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#ifndef __ARCH_IRQ_STAT
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
53#endif
54
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59const char * const softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62};
63
64
65
66
67
68
69
70static void wakeup_softirqd(void)
71{
72
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79
80
81
82
83
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109#ifdef CONFIG_TRACE_IRQFLAGS
110void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111{
112 unsigned long flags;
113
114 WARN_ON_ONCE(in_irq());
115
116 raw_local_irq_save(flags);
117
118
119
120
121
122
123
124 __preempt_count_add(cnt);
125
126
127
128 if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 lockdep_softirqs_off(ip);
130 raw_local_irq_restore(flags);
131
132 if (preempt_count() == cnt) {
133#ifdef CONFIG_DEBUG_PREEMPT
134 current->preempt_disable_ip = get_lock_parent_ip();
135#endif
136 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137 }
138}
139EXPORT_SYMBOL(__local_bh_disable_ip);
140#endif
141
142static void __local_bh_enable(unsigned int cnt)
143{
144 lockdep_assert_irqs_disabled();
145
146 if (preempt_count() == cnt)
147 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
148
149 if (softirq_count() == (cnt & SOFTIRQ_MASK))
150 lockdep_softirqs_on(_RET_IP_);
151
152 __preempt_count_sub(cnt);
153}
154
155
156
157
158
159void _local_bh_enable(void)
160{
161 WARN_ON_ONCE(in_irq());
162 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
163}
164EXPORT_SYMBOL(_local_bh_enable);
165
166void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
167{
168 WARN_ON_ONCE(in_irq());
169 lockdep_assert_irqs_enabled();
170#ifdef CONFIG_TRACE_IRQFLAGS
171 local_irq_disable();
172#endif
173
174
175
176 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
177 lockdep_softirqs_on(ip);
178
179
180
181
182 preempt_count_sub(cnt - 1);
183
184 if (unlikely(!in_interrupt() && local_softirq_pending())) {
185
186
187
188
189 do_softirq();
190 }
191
192 preempt_count_dec();
193#ifdef CONFIG_TRACE_IRQFLAGS
194 local_irq_enable();
195#endif
196 preempt_check_resched();
197}
198EXPORT_SYMBOL(__local_bh_enable_ip);
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
214#define MAX_SOFTIRQ_RESTART 10
215
216#ifdef CONFIG_TRACE_IRQFLAGS
217
218
219
220
221
222
223static inline bool lockdep_softirq_start(void)
224{
225 bool in_hardirq = false;
226
227 if (lockdep_hardirq_context(current)) {
228 in_hardirq = true;
229 lockdep_hardirq_exit();
230 }
231
232 lockdep_softirq_enter();
233
234 return in_hardirq;
235}
236
237static inline void lockdep_softirq_end(bool in_hardirq)
238{
239 lockdep_softirq_exit();
240
241 if (in_hardirq)
242 lockdep_hardirq_enter();
243}
244#else
245static inline bool lockdep_softirq_start(void) { return false; }
246static inline void lockdep_softirq_end(bool in_hardirq) { }
247#endif
248
249asmlinkage __visible void __softirq_entry __do_softirq(void)
250{
251 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
252 unsigned long old_flags = current->flags;
253 int max_restart = MAX_SOFTIRQ_RESTART;
254 struct softirq_action *h;
255 bool in_hardirq;
256 __u32 pending;
257 int softirq_bit;
258
259
260
261
262
263
264 current->flags &= ~PF_MEMALLOC;
265
266 pending = local_softirq_pending();
267 account_irq_enter_time(current);
268
269 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270 in_hardirq = lockdep_softirq_start();
271
272restart:
273
274 set_softirq_pending(0);
275
276 local_irq_enable();
277
278 h = softirq_vec;
279
280 while ((softirq_bit = ffs(pending))) {
281 unsigned int vec_nr;
282 int prev_count;
283
284 h += softirq_bit - 1;
285
286 vec_nr = h - softirq_vec;
287 prev_count = preempt_count();
288
289 kstat_incr_softirqs_this_cpu(vec_nr);
290
291 trace_softirq_entry(vec_nr);
292 h->action(h);
293 trace_softirq_exit(vec_nr);
294 if (unlikely(prev_count != preempt_count())) {
295 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 vec_nr, softirq_to_name[vec_nr], h->action,
297 prev_count, preempt_count());
298 preempt_count_set(prev_count);
299 }
300 h++;
301 pending >>= softirq_bit;
302 }
303
304 if (__this_cpu_read(ksoftirqd) == current)
305 rcu_softirq_qs();
306 local_irq_disable();
307
308 pending = local_softirq_pending();
309 if (pending) {
310 if (time_before(jiffies, end) && !need_resched() &&
311 --max_restart)
312 goto restart;
313
314 wakeup_softirqd();
315 }
316
317 lockdep_softirq_end(in_hardirq);
318 account_irq_exit_time(current);
319 __local_bh_enable(SOFTIRQ_OFFSET);
320 WARN_ON_ONCE(in_interrupt());
321 current_restore_flags(old_flags, PF_MEMALLOC);
322}
323
324asmlinkage __visible void do_softirq(void)
325{
326 __u32 pending;
327 unsigned long flags;
328
329 if (in_interrupt())
330 return;
331
332 local_irq_save(flags);
333
334 pending = local_softirq_pending();
335
336 if (pending && !ksoftirqd_running(pending))
337 do_softirq_own_stack();
338
339 local_irq_restore(flags);
340}
341
342
343
344
345void irq_enter(void)
346{
347 rcu_irq_enter();
348 if (is_idle_task(current) && !in_interrupt()) {
349
350
351
352
353 local_bh_disable();
354 tick_irq_enter();
355 _local_bh_enable();
356 }
357
358 __irq_enter();
359}
360
361static inline void invoke_softirq(void)
362{
363 if (ksoftirqd_running(local_softirq_pending()))
364 return;
365
366 if (!force_irqthreads) {
367#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
368
369
370
371
372
373 __do_softirq();
374#else
375
376
377
378
379
380 do_softirq_own_stack();
381#endif
382 } else {
383 wakeup_softirqd();
384 }
385}
386
387static inline void tick_irq_exit(void)
388{
389#ifdef CONFIG_NO_HZ_COMMON
390 int cpu = smp_processor_id();
391
392
393 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
394 if (!in_irq())
395 tick_nohz_irq_exit();
396 }
397#endif
398}
399
400
401
402
403void irq_exit(void)
404{
405#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
406 local_irq_disable();
407#else
408 lockdep_assert_irqs_disabled();
409#endif
410 account_irq_exit_time(current);
411 preempt_count_sub(HARDIRQ_OFFSET);
412 if (!in_interrupt() && local_softirq_pending())
413 invoke_softirq();
414
415 tick_irq_exit();
416 rcu_irq_exit();
417
418 lockdep_hardirq_exit();
419}
420
421
422
423
424inline void raise_softirq_irqoff(unsigned int nr)
425{
426 __raise_softirq_irqoff(nr);
427
428
429
430
431
432
433
434
435
436
437 if (!in_interrupt())
438 wakeup_softirqd();
439}
440
441void raise_softirq(unsigned int nr)
442{
443 unsigned long flags;
444
445 local_irq_save(flags);
446 raise_softirq_irqoff(nr);
447 local_irq_restore(flags);
448}
449
450void __raise_softirq_irqoff(unsigned int nr)
451{
452 trace_softirq_raise(nr);
453 or_softirq_pending(1UL << nr);
454}
455
456void open_softirq(int nr, void (*action)(struct softirq_action *))
457{
458 softirq_vec[nr].action = action;
459}
460
461
462
463
464struct tasklet_head {
465 struct tasklet_struct *head;
466 struct tasklet_struct **tail;
467};
468
469static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
470static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
471
472static void __tasklet_schedule_common(struct tasklet_struct *t,
473 struct tasklet_head __percpu *headp,
474 unsigned int softirq_nr)
475{
476 struct tasklet_head *head;
477 unsigned long flags;
478
479 local_irq_save(flags);
480 head = this_cpu_ptr(headp);
481 t->next = NULL;
482 *head->tail = t;
483 head->tail = &(t->next);
484 raise_softirq_irqoff(softirq_nr);
485 local_irq_restore(flags);
486}
487
488void __tasklet_schedule(struct tasklet_struct *t)
489{
490 __tasklet_schedule_common(t, &tasklet_vec,
491 TASKLET_SOFTIRQ);
492}
493EXPORT_SYMBOL(__tasklet_schedule);
494
495void __tasklet_hi_schedule(struct tasklet_struct *t)
496{
497 __tasklet_schedule_common(t, &tasklet_hi_vec,
498 HI_SOFTIRQ);
499}
500EXPORT_SYMBOL(__tasklet_hi_schedule);
501
502static void tasklet_action_common(struct softirq_action *a,
503 struct tasklet_head *tl_head,
504 unsigned int softirq_nr)
505{
506 struct tasklet_struct *list;
507
508 local_irq_disable();
509 list = tl_head->head;
510 tl_head->head = NULL;
511 tl_head->tail = &tl_head->head;
512 local_irq_enable();
513
514 while (list) {
515 struct tasklet_struct *t = list;
516
517 list = list->next;
518
519 if (tasklet_trylock(t)) {
520 if (!atomic_read(&t->count)) {
521 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
522 &t->state))
523 BUG();
524 t->func(t->data);
525 tasklet_unlock(t);
526 continue;
527 }
528 tasklet_unlock(t);
529 }
530
531 local_irq_disable();
532 t->next = NULL;
533 *tl_head->tail = t;
534 tl_head->tail = &t->next;
535 __raise_softirq_irqoff(softirq_nr);
536 local_irq_enable();
537 }
538}
539
540static __latent_entropy void tasklet_action(struct softirq_action *a)
541{
542 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
543}
544
545static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
546{
547 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
548}
549
550void tasklet_init(struct tasklet_struct *t,
551 void (*func)(unsigned long), unsigned long data)
552{
553 t->next = NULL;
554 t->state = 0;
555 atomic_set(&t->count, 0);
556 t->func = func;
557 t->data = data;
558}
559EXPORT_SYMBOL(tasklet_init);
560
561void tasklet_kill(struct tasklet_struct *t)
562{
563 if (in_interrupt())
564 pr_notice("Attempt to kill tasklet from interrupt\n");
565
566 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
567 do {
568 yield();
569 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
570 }
571 tasklet_unlock_wait(t);
572 clear_bit(TASKLET_STATE_SCHED, &t->state);
573}
574EXPORT_SYMBOL(tasklet_kill);
575
576void __init softirq_init(void)
577{
578 int cpu;
579
580 for_each_possible_cpu(cpu) {
581 per_cpu(tasklet_vec, cpu).tail =
582 &per_cpu(tasklet_vec, cpu).head;
583 per_cpu(tasklet_hi_vec, cpu).tail =
584 &per_cpu(tasklet_hi_vec, cpu).head;
585 }
586
587 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
588 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
589}
590
591static int ksoftirqd_should_run(unsigned int cpu)
592{
593 return local_softirq_pending();
594}
595
596static void run_ksoftirqd(unsigned int cpu)
597{
598 local_irq_disable();
599 if (local_softirq_pending()) {
600
601
602
603
604 __do_softirq();
605 local_irq_enable();
606 cond_resched();
607 return;
608 }
609 local_irq_enable();
610}
611
612#ifdef CONFIG_HOTPLUG_CPU
613
614
615
616
617
618
619
620
621
622void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
623{
624 struct tasklet_struct **i;
625
626 BUG_ON(cpu_online(cpu));
627 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
628
629 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
630 return;
631
632
633 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
634 if (*i == t) {
635 *i = t->next;
636
637 if (*i == NULL)
638 per_cpu(tasklet_vec, cpu).tail = i;
639 return;
640 }
641 }
642 BUG();
643}
644
645static int takeover_tasklets(unsigned int cpu)
646{
647
648 local_irq_disable();
649
650
651 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
652 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
653 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
654 per_cpu(tasklet_vec, cpu).head = NULL;
655 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
656 }
657 raise_softirq_irqoff(TASKLET_SOFTIRQ);
658
659 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
660 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
661 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
662 per_cpu(tasklet_hi_vec, cpu).head = NULL;
663 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
664 }
665 raise_softirq_irqoff(HI_SOFTIRQ);
666
667 local_irq_enable();
668 return 0;
669}
670#else
671#define takeover_tasklets NULL
672#endif
673
674static struct smp_hotplug_thread softirq_threads = {
675 .store = &ksoftirqd,
676 .thread_should_run = ksoftirqd_should_run,
677 .thread_fn = run_ksoftirqd,
678 .thread_comm = "ksoftirqd/%u",
679};
680
681static __init int spawn_ksoftirqd(void)
682{
683 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
684 takeover_tasklets);
685 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
686
687 return 0;
688}
689early_initcall(spawn_ksoftirqd);
690
691
692
693
694
695
696int __init __weak early_irq_init(void)
697{
698 return 0;
699}
700
701int __init __weak arch_probe_nr_irqs(void)
702{
703 return NR_IRQS_LEGACY;
704}
705
706int __init __weak arch_early_irq_init(void)
707{
708 return 0;
709}
710
711unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
712{
713 return from;
714}
715