1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
20#include <linux/freezer.h>
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
23#include <linux/ftrace.h>
24#include <linux/smp.h>
25#include <linux/smpboot.h>
26#include <linux/tick.h>
27#include <linux/irq.h>
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#ifndef __ARCH_IRQ_STAT
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
53#endif
54
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59const char * const softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62};
63
64
65
66
67
68
69
70static void wakeup_softirqd(void)
71{
72
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79
80
81
82
83
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
93}
94
95#ifdef CONFIG_TRACE_IRQFLAGS
96DEFINE_PER_CPU(int, hardirqs_enabled);
97DEFINE_PER_CPU(int, hardirq_context);
98EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
99EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
100#endif
101
102
103
104
105
106
107
108
109
110
111
112#ifdef CONFIG_TRACE_IRQFLAGS
113
114
115
116
117void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
118{
119 unsigned long flags;
120
121 WARN_ON_ONCE(in_irq());
122
123 raw_local_irq_save(flags);
124
125
126
127
128
129
130
131 __preempt_count_add(cnt);
132
133
134
135 if (softirq_count() == (cnt & SOFTIRQ_MASK))
136 lockdep_softirqs_off(ip);
137 raw_local_irq_restore(flags);
138
139 if (preempt_count() == cnt) {
140#ifdef CONFIG_DEBUG_PREEMPT
141 current->preempt_disable_ip = get_lock_parent_ip();
142#endif
143 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
144 }
145}
146EXPORT_SYMBOL(__local_bh_disable_ip);
147#endif
148
149static void __local_bh_enable(unsigned int cnt)
150{
151 lockdep_assert_irqs_disabled();
152
153 if (preempt_count() == cnt)
154 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
155
156 if (softirq_count() == (cnt & SOFTIRQ_MASK))
157 lockdep_softirqs_on(_RET_IP_);
158
159 __preempt_count_sub(cnt);
160}
161
162
163
164
165
166void _local_bh_enable(void)
167{
168 WARN_ON_ONCE(in_irq());
169 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
170}
171EXPORT_SYMBOL(_local_bh_enable);
172
173void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
174{
175 WARN_ON_ONCE(in_irq());
176 lockdep_assert_irqs_enabled();
177#ifdef CONFIG_TRACE_IRQFLAGS
178 local_irq_disable();
179#endif
180
181
182
183 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
184 lockdep_softirqs_on(ip);
185
186
187
188
189 __preempt_count_sub(cnt - 1);
190
191 if (unlikely(!in_interrupt() && local_softirq_pending())) {
192
193
194
195
196 do_softirq();
197 }
198
199 preempt_count_dec();
200#ifdef CONFIG_TRACE_IRQFLAGS
201 local_irq_enable();
202#endif
203 preempt_check_resched();
204}
205EXPORT_SYMBOL(__local_bh_enable_ip);
206
207static inline void invoke_softirq(void)
208{
209 if (ksoftirqd_running(local_softirq_pending()))
210 return;
211
212 if (!force_irqthreads) {
213#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
214
215
216
217
218
219 __do_softirq();
220#else
221
222
223
224
225
226 do_softirq_own_stack();
227#endif
228 } else {
229 wakeup_softirqd();
230 }
231}
232
233asmlinkage __visible void do_softirq(void)
234{
235 __u32 pending;
236 unsigned long flags;
237
238 if (in_interrupt())
239 return;
240
241 local_irq_save(flags);
242
243 pending = local_softirq_pending();
244
245 if (pending && !ksoftirqd_running(pending))
246 do_softirq_own_stack();
247
248 local_irq_restore(flags);
249}
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
265#define MAX_SOFTIRQ_RESTART 10
266
267#ifdef CONFIG_TRACE_IRQFLAGS
268
269
270
271
272
273
274static inline bool lockdep_softirq_start(void)
275{
276 bool in_hardirq = false;
277
278 if (lockdep_hardirq_context()) {
279 in_hardirq = true;
280 lockdep_hardirq_exit();
281 }
282
283 lockdep_softirq_enter();
284
285 return in_hardirq;
286}
287
288static inline void lockdep_softirq_end(bool in_hardirq)
289{
290 lockdep_softirq_exit();
291
292 if (in_hardirq)
293 lockdep_hardirq_enter();
294}
295#else
296static inline bool lockdep_softirq_start(void) { return false; }
297static inline void lockdep_softirq_end(bool in_hardirq) { }
298#endif
299
300asmlinkage __visible void __softirq_entry __do_softirq(void)
301{
302 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
303 unsigned long old_flags = current->flags;
304 int max_restart = MAX_SOFTIRQ_RESTART;
305 struct softirq_action *h;
306 bool in_hardirq;
307 __u32 pending;
308 int softirq_bit;
309
310
311
312
313
314
315 current->flags &= ~PF_MEMALLOC;
316
317 pending = local_softirq_pending();
318
319 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
320 in_hardirq = lockdep_softirq_start();
321 account_softirq_enter(current);
322
323restart:
324
325 set_softirq_pending(0);
326
327 local_irq_enable();
328
329 h = softirq_vec;
330
331 while ((softirq_bit = ffs(pending))) {
332 unsigned int vec_nr;
333 int prev_count;
334
335 h += softirq_bit - 1;
336
337 vec_nr = h - softirq_vec;
338 prev_count = preempt_count();
339
340 kstat_incr_softirqs_this_cpu(vec_nr);
341
342 trace_softirq_entry(vec_nr);
343 h->action(h);
344 trace_softirq_exit(vec_nr);
345 if (unlikely(prev_count != preempt_count())) {
346 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
347 vec_nr, softirq_to_name[vec_nr], h->action,
348 prev_count, preempt_count());
349 preempt_count_set(prev_count);
350 }
351 h++;
352 pending >>= softirq_bit;
353 }
354
355 if (__this_cpu_read(ksoftirqd) == current)
356 rcu_softirq_qs();
357 local_irq_disable();
358
359 pending = local_softirq_pending();
360 if (pending) {
361 if (time_before(jiffies, end) && !need_resched() &&
362 --max_restart)
363 goto restart;
364
365 wakeup_softirqd();
366 }
367
368 account_softirq_exit(current);
369 lockdep_softirq_end(in_hardirq);
370 __local_bh_enable(SOFTIRQ_OFFSET);
371 WARN_ON_ONCE(in_interrupt());
372 current_restore_flags(old_flags, PF_MEMALLOC);
373}
374
375
376
377
378void irq_enter_rcu(void)
379{
380 __irq_enter_raw();
381
382 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
383 tick_irq_enter();
384
385 account_hardirq_enter(current);
386}
387
388
389
390
391void irq_enter(void)
392{
393 rcu_irq_enter();
394 irq_enter_rcu();
395}
396
397static inline void tick_irq_exit(void)
398{
399#ifdef CONFIG_NO_HZ_COMMON
400 int cpu = smp_processor_id();
401
402
403 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
404 if (!in_irq())
405 tick_nohz_irq_exit();
406 }
407#endif
408}
409
410static inline void __irq_exit_rcu(void)
411{
412#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
413 local_irq_disable();
414#else
415 lockdep_assert_irqs_disabled();
416#endif
417 account_hardirq_exit(current);
418 preempt_count_sub(HARDIRQ_OFFSET);
419 if (!in_interrupt() && local_softirq_pending())
420 invoke_softirq();
421
422 tick_irq_exit();
423}
424
425
426
427
428
429
430void irq_exit_rcu(void)
431{
432 __irq_exit_rcu();
433
434 lockdep_hardirq_exit();
435}
436
437
438
439
440
441
442void irq_exit(void)
443{
444 __irq_exit_rcu();
445 rcu_irq_exit();
446
447 lockdep_hardirq_exit();
448}
449
450
451
452
453inline void raise_softirq_irqoff(unsigned int nr)
454{
455 __raise_softirq_irqoff(nr);
456
457
458
459
460
461
462
463
464
465
466 if (!in_interrupt())
467 wakeup_softirqd();
468}
469
470void raise_softirq(unsigned int nr)
471{
472 unsigned long flags;
473
474 local_irq_save(flags);
475 raise_softirq_irqoff(nr);
476 local_irq_restore(flags);
477}
478
479void __raise_softirq_irqoff(unsigned int nr)
480{
481 lockdep_assert_irqs_disabled();
482 trace_softirq_raise(nr);
483 or_softirq_pending(1UL << nr);
484}
485
486void open_softirq(int nr, void (*action)(struct softirq_action *))
487{
488 softirq_vec[nr].action = action;
489}
490
491
492
493
494struct tasklet_head {
495 struct tasklet_struct *head;
496 struct tasklet_struct **tail;
497};
498
499static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
500static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
501
502static void __tasklet_schedule_common(struct tasklet_struct *t,
503 struct tasklet_head __percpu *headp,
504 unsigned int softirq_nr)
505{
506 struct tasklet_head *head;
507 unsigned long flags;
508
509 local_irq_save(flags);
510 head = this_cpu_ptr(headp);
511 t->next = NULL;
512 *head->tail = t;
513 head->tail = &(t->next);
514 raise_softirq_irqoff(softirq_nr);
515 local_irq_restore(flags);
516}
517
518void __tasklet_schedule(struct tasklet_struct *t)
519{
520 __tasklet_schedule_common(t, &tasklet_vec,
521 TASKLET_SOFTIRQ);
522}
523EXPORT_SYMBOL(__tasklet_schedule);
524
525void __tasklet_hi_schedule(struct tasklet_struct *t)
526{
527 __tasklet_schedule_common(t, &tasklet_hi_vec,
528 HI_SOFTIRQ);
529}
530EXPORT_SYMBOL(__tasklet_hi_schedule);
531
532static void tasklet_action_common(struct softirq_action *a,
533 struct tasklet_head *tl_head,
534 unsigned int softirq_nr)
535{
536 struct tasklet_struct *list;
537
538 local_irq_disable();
539 list = tl_head->head;
540 tl_head->head = NULL;
541 tl_head->tail = &tl_head->head;
542 local_irq_enable();
543
544 while (list) {
545 struct tasklet_struct *t = list;
546
547 list = list->next;
548
549 if (tasklet_trylock(t)) {
550 if (!atomic_read(&t->count)) {
551 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
552 &t->state))
553 BUG();
554 if (t->use_callback)
555 t->callback(t);
556 else
557 t->func(t->data);
558 tasklet_unlock(t);
559 continue;
560 }
561 tasklet_unlock(t);
562 }
563
564 local_irq_disable();
565 t->next = NULL;
566 *tl_head->tail = t;
567 tl_head->tail = &t->next;
568 __raise_softirq_irqoff(softirq_nr);
569 local_irq_enable();
570 }
571}
572
573static __latent_entropy void tasklet_action(struct softirq_action *a)
574{
575 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
576}
577
578static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
579{
580 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
581}
582
583void tasklet_setup(struct tasklet_struct *t,
584 void (*callback)(struct tasklet_struct *))
585{
586 t->next = NULL;
587 t->state = 0;
588 atomic_set(&t->count, 0);
589 t->callback = callback;
590 t->use_callback = true;
591 t->data = 0;
592}
593EXPORT_SYMBOL(tasklet_setup);
594
595void tasklet_init(struct tasklet_struct *t,
596 void (*func)(unsigned long), unsigned long data)
597{
598 t->next = NULL;
599 t->state = 0;
600 atomic_set(&t->count, 0);
601 t->func = func;
602 t->use_callback = false;
603 t->data = data;
604}
605EXPORT_SYMBOL(tasklet_init);
606
607void tasklet_kill(struct tasklet_struct *t)
608{
609 if (in_interrupt())
610 pr_notice("Attempt to kill tasklet from interrupt\n");
611
612 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
613 do {
614 yield();
615 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
616 }
617 tasklet_unlock_wait(t);
618 clear_bit(TASKLET_STATE_SCHED, &t->state);
619}
620EXPORT_SYMBOL(tasklet_kill);
621
622void __init softirq_init(void)
623{
624 int cpu;
625
626 for_each_possible_cpu(cpu) {
627 per_cpu(tasklet_vec, cpu).tail =
628 &per_cpu(tasklet_vec, cpu).head;
629 per_cpu(tasklet_hi_vec, cpu).tail =
630 &per_cpu(tasklet_hi_vec, cpu).head;
631 }
632
633 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
634 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
635}
636
637static int ksoftirqd_should_run(unsigned int cpu)
638{
639 return local_softirq_pending();
640}
641
642static void run_ksoftirqd(unsigned int cpu)
643{
644 local_irq_disable();
645 if (local_softirq_pending()) {
646
647
648
649
650 __do_softirq();
651 local_irq_enable();
652 cond_resched();
653 return;
654 }
655 local_irq_enable();
656}
657
658#ifdef CONFIG_HOTPLUG_CPU
659
660
661
662
663
664
665
666
667
668void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
669{
670 struct tasklet_struct **i;
671
672 BUG_ON(cpu_online(cpu));
673 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
674
675 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
676 return;
677
678
679 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
680 if (*i == t) {
681 *i = t->next;
682
683 if (*i == NULL)
684 per_cpu(tasklet_vec, cpu).tail = i;
685 return;
686 }
687 }
688 BUG();
689}
690
691static int takeover_tasklets(unsigned int cpu)
692{
693
694 local_irq_disable();
695
696
697 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
698 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
699 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
700 per_cpu(tasklet_vec, cpu).head = NULL;
701 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
702 }
703 raise_softirq_irqoff(TASKLET_SOFTIRQ);
704
705 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
706 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
707 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
708 per_cpu(tasklet_hi_vec, cpu).head = NULL;
709 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
710 }
711 raise_softirq_irqoff(HI_SOFTIRQ);
712
713 local_irq_enable();
714 return 0;
715}
716#else
717#define takeover_tasklets NULL
718#endif
719
720static struct smp_hotplug_thread softirq_threads = {
721 .store = &ksoftirqd,
722 .thread_should_run = ksoftirqd_should_run,
723 .thread_fn = run_ksoftirqd,
724 .thread_comm = "ksoftirqd/%u",
725};
726
727static __init int spawn_ksoftirqd(void)
728{
729 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
730 takeover_tasklets);
731 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
732
733 return 0;
734}
735early_initcall(spawn_ksoftirqd);
736
737
738
739
740
741
742int __init __weak early_irq_init(void)
743{
744 return 0;
745}
746
747int __init __weak arch_probe_nr_irqs(void)
748{
749 return NR_IRQS_LEGACY;
750}
751
752int __init __weak arch_early_irq_init(void)
753{
754 return 0;
755}
756
757unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
758{
759 return from;
760}
761