1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65
66
67
68
69
70
71static void wakeup_softirqd(void)
72{
73
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94#ifdef CONFIG_TRACE_IRQFLAGS
95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
102
103
104
105
106
107
108
109 __preempt_count_add(cnt);
110
111
112
113 if (softirq_count() == (cnt & SOFTIRQ_MASK))
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
116
117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119 current->preempt_disable_ip = get_lock_parent_ip();
120#endif
121 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
122 }
123}
124EXPORT_SYMBOL(__local_bh_disable_ip);
125#endif
126
127static void __local_bh_enable(unsigned int cnt)
128{
129 WARN_ON_ONCE(!irqs_disabled());
130
131 if (softirq_count() == (cnt & SOFTIRQ_MASK))
132 trace_softirqs_on(_RET_IP_);
133 preempt_count_sub(cnt);
134}
135
136
137
138
139
140
141void _local_bh_enable(void)
142{
143 WARN_ON_ONCE(in_irq());
144 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
145}
146EXPORT_SYMBOL(_local_bh_enable);
147
148void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
149{
150 WARN_ON_ONCE(in_irq() || irqs_disabled());
151#ifdef CONFIG_TRACE_IRQFLAGS
152 local_irq_disable();
153#endif
154
155
156
157 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
158 trace_softirqs_on(ip);
159
160
161
162
163 preempt_count_sub(cnt - 1);
164
165 if (unlikely(!in_interrupt() && local_softirq_pending())) {
166
167
168
169
170 do_softirq();
171 }
172
173 preempt_count_dec();
174#ifdef CONFIG_TRACE_IRQFLAGS
175 local_irq_enable();
176#endif
177 preempt_check_resched();
178}
179EXPORT_SYMBOL(__local_bh_enable_ip);
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
195#define MAX_SOFTIRQ_RESTART 10
196
197#ifdef CONFIG_TRACE_IRQFLAGS
198
199
200
201
202
203
204static inline bool lockdep_softirq_start(void)
205{
206 bool in_hardirq = false;
207
208 if (trace_hardirq_context(current)) {
209 in_hardirq = true;
210 trace_hardirq_exit();
211 }
212
213 lockdep_softirq_enter();
214
215 return in_hardirq;
216}
217
218static inline void lockdep_softirq_end(bool in_hardirq)
219{
220 lockdep_softirq_exit();
221
222 if (in_hardirq)
223 trace_hardirq_enter();
224}
225#else
226static inline bool lockdep_softirq_start(void) { return false; }
227static inline void lockdep_softirq_end(bool in_hardirq) { }
228#endif
229
230asmlinkage __visible void __softirq_entry __do_softirq(void)
231{
232 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
233 unsigned long old_flags = current->flags;
234 int max_restart = MAX_SOFTIRQ_RESTART;
235 struct softirq_action *h;
236 bool in_hardirq;
237 __u32 pending;
238 int softirq_bit;
239
240
241
242
243
244
245 current->flags &= ~PF_MEMALLOC;
246
247 pending = local_softirq_pending();
248 account_irq_enter_time(current);
249
250 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
251 in_hardirq = lockdep_softirq_start();
252
253restart:
254
255 set_softirq_pending(0);
256
257 local_irq_enable();
258
259 h = softirq_vec;
260
261 while ((softirq_bit = ffs(pending))) {
262 unsigned int vec_nr;
263 int prev_count;
264
265 h += softirq_bit - 1;
266
267 vec_nr = h - softirq_vec;
268 prev_count = preempt_count();
269
270 kstat_incr_softirqs_this_cpu(vec_nr);
271
272 trace_softirq_entry(vec_nr);
273 h->action(h);
274 trace_softirq_exit(vec_nr);
275 if (unlikely(prev_count != preempt_count())) {
276 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
277 vec_nr, softirq_to_name[vec_nr], h->action,
278 prev_count, preempt_count());
279 preempt_count_set(prev_count);
280 }
281 h++;
282 pending >>= softirq_bit;
283 }
284
285 rcu_bh_qs();
286 local_irq_disable();
287
288 pending = local_softirq_pending();
289 if (pending) {
290 if (time_before(jiffies, end) && !need_resched() &&
291 --max_restart)
292 goto restart;
293
294 wakeup_softirqd();
295 }
296
297 lockdep_softirq_end(in_hardirq);
298 account_irq_exit_time(current);
299 __local_bh_enable(SOFTIRQ_OFFSET);
300 WARN_ON_ONCE(in_interrupt());
301 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
302}
303
304asmlinkage __visible void do_softirq(void)
305{
306 __u32 pending;
307 unsigned long flags;
308
309 if (in_interrupt())
310 return;
311
312 local_irq_save(flags);
313
314 pending = local_softirq_pending();
315
316 if (pending)
317 do_softirq_own_stack();
318
319 local_irq_restore(flags);
320}
321
322
323
324
325void irq_enter(void)
326{
327 rcu_irq_enter();
328 if (is_idle_task(current) && !in_interrupt()) {
329
330
331
332
333 local_bh_disable();
334 tick_irq_enter();
335 _local_bh_enable();
336 }
337
338 __irq_enter();
339}
340
341static inline void invoke_softirq(void)
342{
343 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345
346
347
348
349
350 __do_softirq();
351#else
352
353
354
355
356
357 do_softirq_own_stack();
358#endif
359 } else {
360 wakeup_softirqd();
361 }
362}
363
364static inline void tick_irq_exit(void)
365{
366#ifdef CONFIG_NO_HZ_COMMON
367 int cpu = smp_processor_id();
368
369
370 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
371 if (!in_interrupt())
372 tick_nohz_irq_exit();
373 }
374#endif
375}
376
377
378
379
380void irq_exit(void)
381{
382#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
383 local_irq_disable();
384#else
385 WARN_ON_ONCE(!irqs_disabled());
386#endif
387
388 account_irq_exit_time(current);
389 preempt_count_sub(HARDIRQ_OFFSET);
390 if (!in_interrupt() && local_softirq_pending())
391 invoke_softirq();
392
393 tick_irq_exit();
394 rcu_irq_exit();
395 trace_hardirq_exit();
396}
397
398
399
400
401inline void raise_softirq_irqoff(unsigned int nr)
402{
403 __raise_softirq_irqoff(nr);
404
405
406
407
408
409
410
411
412
413
414 if (!in_interrupt())
415 wakeup_softirqd();
416}
417
418void raise_softirq(unsigned int nr)
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 raise_softirq_irqoff(nr);
424 local_irq_restore(flags);
425}
426
427void __raise_softirq_irqoff(unsigned int nr)
428{
429 trace_softirq_raise(nr);
430 or_softirq_pending(1UL << nr);
431}
432
433void open_softirq(int nr, void (*action)(struct softirq_action *))
434{
435 softirq_vec[nr].action = action;
436}
437
438
439
440
441struct tasklet_head {
442 struct tasklet_struct *head;
443 struct tasklet_struct **tail;
444};
445
446static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
447static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
448
449void __tasklet_schedule(struct tasklet_struct *t)
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
454 t->next = NULL;
455 *__this_cpu_read(tasklet_vec.tail) = t;
456 __this_cpu_write(tasklet_vec.tail, &(t->next));
457 raise_softirq_irqoff(TASKLET_SOFTIRQ);
458 local_irq_restore(flags);
459}
460EXPORT_SYMBOL(__tasklet_schedule);
461
462void __tasklet_hi_schedule(struct tasklet_struct *t)
463{
464 unsigned long flags;
465
466 local_irq_save(flags);
467 t->next = NULL;
468 *__this_cpu_read(tasklet_hi_vec.tail) = t;
469 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
470 raise_softirq_irqoff(HI_SOFTIRQ);
471 local_irq_restore(flags);
472}
473EXPORT_SYMBOL(__tasklet_hi_schedule);
474
475void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476{
477 BUG_ON(!irqs_disabled());
478
479 t->next = __this_cpu_read(tasklet_hi_vec.head);
480 __this_cpu_write(tasklet_hi_vec.head, t);
481 __raise_softirq_irqoff(HI_SOFTIRQ);
482}
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
485static void tasklet_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
503 &t->state))
504 BUG();
505 t->func(t->data);
506 tasklet_unlock(t);
507 continue;
508 }
509 tasklet_unlock(t);
510 }
511
512 local_irq_disable();
513 t->next = NULL;
514 *__this_cpu_read(tasklet_vec.tail) = t;
515 __this_cpu_write(tasklet_vec.tail, &(t->next));
516 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
517 local_irq_enable();
518 }
519}
520
521static void tasklet_hi_action(struct softirq_action *a)
522{
523 struct tasklet_struct *list;
524
525 local_irq_disable();
526 list = __this_cpu_read(tasklet_hi_vec.head);
527 __this_cpu_write(tasklet_hi_vec.head, NULL);
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
529 local_irq_enable();
530
531 while (list) {
532 struct tasklet_struct *t = list;
533
534 list = list->next;
535
536 if (tasklet_trylock(t)) {
537 if (!atomic_read(&t->count)) {
538 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
539 &t->state))
540 BUG();
541 t->func(t->data);
542 tasklet_unlock(t);
543 continue;
544 }
545 tasklet_unlock(t);
546 }
547
548 local_irq_disable();
549 t->next = NULL;
550 *__this_cpu_read(tasklet_hi_vec.tail) = t;
551 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
552 __raise_softirq_irqoff(HI_SOFTIRQ);
553 local_irq_enable();
554 }
555}
556
557void tasklet_init(struct tasklet_struct *t,
558 void (*func)(unsigned long), unsigned long data)
559{
560 t->next = NULL;
561 t->state = 0;
562 atomic_set(&t->count, 0);
563 t->func = func;
564 t->data = data;
565}
566EXPORT_SYMBOL(tasklet_init);
567
568void tasklet_kill(struct tasklet_struct *t)
569{
570 if (in_interrupt())
571 pr_notice("Attempt to kill tasklet from interrupt\n");
572
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
574 do {
575 yield();
576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
577 }
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
581EXPORT_SYMBOL(tasklet_kill);
582
583
584
585
586
587
588
589
590
591
592static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
593{
594 struct tasklet_hrtimer *ttimer =
595 container_of(timer, struct tasklet_hrtimer, timer);
596
597 tasklet_hi_schedule(&ttimer->tasklet);
598 return HRTIMER_NORESTART;
599}
600
601
602
603
604
605static void __tasklet_hrtimer_trampoline(unsigned long data)
606{
607 struct tasklet_hrtimer *ttimer = (void *)data;
608 enum hrtimer_restart restart;
609
610 restart = ttimer->function(&ttimer->timer);
611 if (restart != HRTIMER_NORESTART)
612 hrtimer_restart(&ttimer->timer);
613}
614
615
616
617
618
619
620
621
622void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
623 enum hrtimer_restart (*function)(struct hrtimer *),
624 clockid_t which_clock, enum hrtimer_mode mode)
625{
626 hrtimer_init(&ttimer->timer, which_clock, mode);
627 ttimer->timer.function = __hrtimer_tasklet_trampoline;
628 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
629 (unsigned long)ttimer);
630 ttimer->function = function;
631}
632EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
633
634void __init softirq_init(void)
635{
636 int cpu;
637
638 for_each_possible_cpu(cpu) {
639 per_cpu(tasklet_vec, cpu).tail =
640 &per_cpu(tasklet_vec, cpu).head;
641 per_cpu(tasklet_hi_vec, cpu).tail =
642 &per_cpu(tasklet_hi_vec, cpu).head;
643 }
644
645 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
646 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
647}
648
649static int ksoftirqd_should_run(unsigned int cpu)
650{
651 return local_softirq_pending();
652}
653
654static void run_ksoftirqd(unsigned int cpu)
655{
656 local_irq_disable();
657 if (local_softirq_pending()) {
658
659
660
661
662 __do_softirq();
663 local_irq_enable();
664 cond_resched_rcu_qs();
665 return;
666 }
667 local_irq_enable();
668}
669
670#ifdef CONFIG_HOTPLUG_CPU
671
672
673
674
675
676
677
678
679
680void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
681{
682 struct tasklet_struct **i;
683
684 BUG_ON(cpu_online(cpu));
685 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
686
687 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
688 return;
689
690
691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
692 if (*i == t) {
693 *i = t->next;
694
695 if (*i == NULL)
696 per_cpu(tasklet_vec, cpu).tail = i;
697 return;
698 }
699 }
700 BUG();
701}
702
703static void takeover_tasklets(unsigned int cpu)
704{
705
706 local_irq_disable();
707
708
709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
712 per_cpu(tasklet_vec, cpu).head = NULL;
713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
714 }
715 raise_softirq_irqoff(TASKLET_SOFTIRQ);
716
717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
720 per_cpu(tasklet_hi_vec, cpu).head = NULL;
721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
722 }
723 raise_softirq_irqoff(HI_SOFTIRQ);
724
725 local_irq_enable();
726}
727#endif
728
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730 void *hcpu)
731{
732 switch (action) {
733#ifdef CONFIG_HOTPLUG_CPU
734 case CPU_DEAD:
735 case CPU_DEAD_FROZEN:
736 takeover_tasklets((unsigned long)hcpu);
737 break;
738#endif
739 }
740 return NOTIFY_OK;
741}
742
743static struct notifier_block cpu_nfb = {
744 .notifier_call = cpu_callback
745};
746
747static struct smp_hotplug_thread softirq_threads = {
748 .store = &ksoftirqd,
749 .thread_should_run = ksoftirqd_should_run,
750 .thread_fn = run_ksoftirqd,
751 .thread_comm = "ksoftirqd/%u",
752};
753
754static __init int spawn_ksoftirqd(void)
755{
756 register_cpu_notifier(&cpu_nfb);
757
758 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759
760 return 0;
761}
762early_initcall(spawn_ksoftirqd);
763
764
765
766
767
768
769int __init __weak early_irq_init(void)
770{
771 return 0;
772}
773
774int __init __weak arch_probe_nr_irqs(void)
775{
776 return NR_IRQS_LEGACY;
777}
778
779int __init __weak arch_early_irq_init(void)
780{
781 return 0;
782}
783
784unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
785{
786 return from;
787}
788