1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#ifndef __ARCH_IRQ_STAT
52DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
53EXPORT_PER_CPU_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65
66
67
68
69
70
71static void wakeup_softirqd(void)
72{
73
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80
81
82
83
84
85#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
86static bool ksoftirqd_running(unsigned long pending)
87{
88 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
89
90 if (pending & SOFTIRQ_NOW_MASK)
91 return false;
92 return tsk && (tsk->state == TASK_RUNNING);
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109#ifdef CONFIG_TRACE_IRQFLAGS
110void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111{
112 unsigned long flags;
113
114 WARN_ON_ONCE(in_irq());
115
116 raw_local_irq_save(flags);
117
118
119
120
121
122
123
124 __preempt_count_add(cnt);
125
126
127
128 if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 trace_softirqs_off(ip);
130 raw_local_irq_restore(flags);
131
132 if (preempt_count() == cnt) {
133#ifdef CONFIG_DEBUG_PREEMPT
134 current->preempt_disable_ip = get_lock_parent_ip();
135#endif
136 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137 }
138}
139EXPORT_SYMBOL(__local_bh_disable_ip);
140#endif
141
142static void __local_bh_enable(unsigned int cnt)
143{
144 lockdep_assert_irqs_disabled();
145
146 if (preempt_count() == cnt)
147 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
148
149 if (softirq_count() == (cnt & SOFTIRQ_MASK))
150 trace_softirqs_on(_RET_IP_);
151
152 __preempt_count_sub(cnt);
153}
154
155
156
157
158
159void _local_bh_enable(void)
160{
161 WARN_ON_ONCE(in_irq());
162 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
163}
164EXPORT_SYMBOL(_local_bh_enable);
165
166void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
167{
168 WARN_ON_ONCE(in_irq());
169 lockdep_assert_irqs_enabled();
170#ifdef CONFIG_TRACE_IRQFLAGS
171 local_irq_disable();
172#endif
173
174
175
176 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
177 trace_softirqs_on(ip);
178
179
180
181
182 preempt_count_sub(cnt - 1);
183
184 if (unlikely(!in_interrupt() && local_softirq_pending())) {
185
186
187
188
189 do_softirq();
190 }
191
192 preempt_count_dec();
193#ifdef CONFIG_TRACE_IRQFLAGS
194 local_irq_enable();
195#endif
196 preempt_check_resched();
197}
198EXPORT_SYMBOL(__local_bh_enable_ip);
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
214#define MAX_SOFTIRQ_RESTART 10
215
216#ifdef CONFIG_TRACE_IRQFLAGS
217
218
219
220
221
222
223static inline bool lockdep_softirq_start(void)
224{
225 bool in_hardirq = false;
226
227 if (trace_hardirq_context(current)) {
228 in_hardirq = true;
229 trace_hardirq_exit();
230 }
231
232 lockdep_softirq_enter();
233
234 return in_hardirq;
235}
236
237static inline void lockdep_softirq_end(bool in_hardirq)
238{
239 lockdep_softirq_exit();
240
241 if (in_hardirq)
242 trace_hardirq_enter();
243}
244#else
245static inline bool lockdep_softirq_start(void) { return false; }
246static inline void lockdep_softirq_end(bool in_hardirq) { }
247#endif
248
249asmlinkage __visible void __softirq_entry __do_softirq(void)
250{
251 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
252 unsigned long old_flags = current->flags;
253 int max_restart = MAX_SOFTIRQ_RESTART;
254 struct softirq_action *h;
255 bool in_hardirq;
256 __u32 pending;
257 int softirq_bit;
258
259
260
261
262
263
264 current->flags &= ~PF_MEMALLOC;
265
266 pending = local_softirq_pending();
267 account_irq_enter_time(current);
268
269 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270 in_hardirq = lockdep_softirq_start();
271
272restart:
273
274 set_softirq_pending(0);
275
276 local_irq_enable();
277
278 h = softirq_vec;
279
280 while ((softirq_bit = ffs(pending))) {
281 unsigned int vec_nr;
282 int prev_count;
283
284 h += softirq_bit - 1;
285
286 vec_nr = h - softirq_vec;
287 prev_count = preempt_count();
288
289 kstat_incr_softirqs_this_cpu(vec_nr);
290
291 trace_softirq_entry(vec_nr);
292 h->action(h);
293 trace_softirq_exit(vec_nr);
294 if (unlikely(prev_count != preempt_count())) {
295 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 vec_nr, softirq_to_name[vec_nr], h->action,
297 prev_count, preempt_count());
298 preempt_count_set(prev_count);
299 }
300 h++;
301 pending >>= softirq_bit;
302 }
303
304 rcu_bh_qs();
305 local_irq_disable();
306
307 pending = local_softirq_pending();
308 if (pending) {
309 if (time_before(jiffies, end) && !need_resched() &&
310 --max_restart)
311 goto restart;
312
313 wakeup_softirqd();
314 }
315
316 lockdep_softirq_end(in_hardirq);
317 account_irq_exit_time(current);
318 __local_bh_enable(SOFTIRQ_OFFSET);
319 WARN_ON_ONCE(in_interrupt());
320 current_restore_flags(old_flags, PF_MEMALLOC);
321}
322
323asmlinkage __visible void do_softirq(void)
324{
325 __u32 pending;
326 unsigned long flags;
327
328 if (in_interrupt())
329 return;
330
331 local_irq_save(flags);
332
333 pending = local_softirq_pending();
334
335 if (pending && !ksoftirqd_running(pending))
336 do_softirq_own_stack();
337
338 local_irq_restore(flags);
339}
340
341
342
343
344void irq_enter(void)
345{
346 rcu_irq_enter();
347 if (is_idle_task(current) && !in_interrupt()) {
348
349
350
351
352 local_bh_disable();
353 tick_irq_enter();
354 _local_bh_enable();
355 }
356
357 __irq_enter();
358}
359
360static inline void invoke_softirq(void)
361{
362 if (ksoftirqd_running(local_softirq_pending()))
363 return;
364
365 if (!force_irqthreads) {
366#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
367
368
369
370
371
372 __do_softirq();
373#else
374
375
376
377
378
379 do_softirq_own_stack();
380#endif
381 } else {
382 wakeup_softirqd();
383 }
384}
385
386static inline void tick_irq_exit(void)
387{
388#ifdef CONFIG_NO_HZ_COMMON
389 int cpu = smp_processor_id();
390
391
392 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
393 if (!in_irq())
394 tick_nohz_irq_exit();
395 }
396#endif
397}
398
399
400
401
402void irq_exit(void)
403{
404#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
405 local_irq_disable();
406#else
407 lockdep_assert_irqs_disabled();
408#endif
409 account_irq_exit_time(current);
410 preempt_count_sub(HARDIRQ_OFFSET);
411 if (!in_interrupt() && local_softirq_pending())
412 invoke_softirq();
413
414 tick_irq_exit();
415 rcu_irq_exit();
416 trace_hardirq_exit();
417}
418
419
420
421
422inline void raise_softirq_irqoff(unsigned int nr)
423{
424 __raise_softirq_irqoff(nr);
425
426
427
428
429
430
431
432
433
434
435 if (!in_interrupt())
436 wakeup_softirqd();
437}
438
439void raise_softirq(unsigned int nr)
440{
441 unsigned long flags;
442
443 local_irq_save(flags);
444 raise_softirq_irqoff(nr);
445 local_irq_restore(flags);
446}
447
448void __raise_softirq_irqoff(unsigned int nr)
449{
450 trace_softirq_raise(nr);
451 or_softirq_pending(1UL << nr);
452}
453
454void open_softirq(int nr, void (*action)(struct softirq_action *))
455{
456 softirq_vec[nr].action = action;
457}
458
459
460
461
462struct tasklet_head {
463 struct tasklet_struct *head;
464 struct tasklet_struct **tail;
465};
466
467static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
468static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
469
470static void __tasklet_schedule_common(struct tasklet_struct *t,
471 struct tasklet_head __percpu *headp,
472 unsigned int softirq_nr)
473{
474 struct tasklet_head *head;
475 unsigned long flags;
476
477 local_irq_save(flags);
478 head = this_cpu_ptr(headp);
479 t->next = NULL;
480 *head->tail = t;
481 head->tail = &(t->next);
482 raise_softirq_irqoff(softirq_nr);
483 local_irq_restore(flags);
484}
485
486void __tasklet_schedule(struct tasklet_struct *t)
487{
488 __tasklet_schedule_common(t, &tasklet_vec,
489 TASKLET_SOFTIRQ);
490}
491EXPORT_SYMBOL(__tasklet_schedule);
492
493void __tasklet_hi_schedule(struct tasklet_struct *t)
494{
495 __tasklet_schedule_common(t, &tasklet_hi_vec,
496 HI_SOFTIRQ);
497}
498EXPORT_SYMBOL(__tasklet_hi_schedule);
499
500static void tasklet_action_common(struct softirq_action *a,
501 struct tasklet_head *tl_head,
502 unsigned int softirq_nr)
503{
504 struct tasklet_struct *list;
505
506 local_irq_disable();
507 list = tl_head->head;
508 tl_head->head = NULL;
509 tl_head->tail = &tl_head->head;
510 local_irq_enable();
511
512 while (list) {
513 struct tasklet_struct *t = list;
514
515 list = list->next;
516
517 if (tasklet_trylock(t)) {
518 if (!atomic_read(&t->count)) {
519 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
520 &t->state))
521 BUG();
522 t->func(t->data);
523 tasklet_unlock(t);
524 continue;
525 }
526 tasklet_unlock(t);
527 }
528
529 local_irq_disable();
530 t->next = NULL;
531 *tl_head->tail = t;
532 tl_head->tail = &t->next;
533 __raise_softirq_irqoff(softirq_nr);
534 local_irq_enable();
535 }
536}
537
538static __latent_entropy void tasklet_action(struct softirq_action *a)
539{
540 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
541}
542
543static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
544{
545 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
546}
547
548void tasklet_init(struct tasklet_struct *t,
549 void (*func)(unsigned long), unsigned long data)
550{
551 t->next = NULL;
552 t->state = 0;
553 atomic_set(&t->count, 0);
554 t->func = func;
555 t->data = data;
556}
557EXPORT_SYMBOL(tasklet_init);
558
559void tasklet_kill(struct tasklet_struct *t)
560{
561 if (in_interrupt())
562 pr_notice("Attempt to kill tasklet from interrupt\n");
563
564 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
565 do {
566 yield();
567 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
568 }
569 tasklet_unlock_wait(t);
570 clear_bit(TASKLET_STATE_SCHED, &t->state);
571}
572EXPORT_SYMBOL(tasklet_kill);
573
574
575
576
577
578
579
580
581
582
583static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
584{
585 struct tasklet_hrtimer *ttimer =
586 container_of(timer, struct tasklet_hrtimer, timer);
587
588 tasklet_hi_schedule(&ttimer->tasklet);
589 return HRTIMER_NORESTART;
590}
591
592
593
594
595
596static void __tasklet_hrtimer_trampoline(unsigned long data)
597{
598 struct tasklet_hrtimer *ttimer = (void *)data;
599 enum hrtimer_restart restart;
600
601 restart = ttimer->function(&ttimer->timer);
602 if (restart != HRTIMER_NORESTART)
603 hrtimer_restart(&ttimer->timer);
604}
605
606
607
608
609
610
611
612
613void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
614 enum hrtimer_restart (*function)(struct hrtimer *),
615 clockid_t which_clock, enum hrtimer_mode mode)
616{
617 hrtimer_init(&ttimer->timer, which_clock, mode);
618 ttimer->timer.function = __hrtimer_tasklet_trampoline;
619 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
620 (unsigned long)ttimer);
621 ttimer->function = function;
622}
623EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
624
625void __init softirq_init(void)
626{
627 int cpu;
628
629 for_each_possible_cpu(cpu) {
630 per_cpu(tasklet_vec, cpu).tail =
631 &per_cpu(tasklet_vec, cpu).head;
632 per_cpu(tasklet_hi_vec, cpu).tail =
633 &per_cpu(tasklet_hi_vec, cpu).head;
634 }
635
636 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
637 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
638}
639
640static int ksoftirqd_should_run(unsigned int cpu)
641{
642 return local_softirq_pending();
643}
644
645static void run_ksoftirqd(unsigned int cpu)
646{
647 local_irq_disable();
648 if (local_softirq_pending()) {
649
650
651
652
653 __do_softirq();
654 local_irq_enable();
655 cond_resched();
656 return;
657 }
658 local_irq_enable();
659}
660
661#ifdef CONFIG_HOTPLUG_CPU
662
663
664
665
666
667
668
669
670
671void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
672{
673 struct tasklet_struct **i;
674
675 BUG_ON(cpu_online(cpu));
676 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
677
678 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
679 return;
680
681
682 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
683 if (*i == t) {
684 *i = t->next;
685
686 if (*i == NULL)
687 per_cpu(tasklet_vec, cpu).tail = i;
688 return;
689 }
690 }
691 BUG();
692}
693
694static int takeover_tasklets(unsigned int cpu)
695{
696
697 local_irq_disable();
698
699
700 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
701 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
702 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
703 per_cpu(tasklet_vec, cpu).head = NULL;
704 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
705 }
706 raise_softirq_irqoff(TASKLET_SOFTIRQ);
707
708 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
709 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
710 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
711 per_cpu(tasklet_hi_vec, cpu).head = NULL;
712 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
713 }
714 raise_softirq_irqoff(HI_SOFTIRQ);
715
716 local_irq_enable();
717 return 0;
718}
719#else
720#define takeover_tasklets NULL
721#endif
722
723static struct smp_hotplug_thread softirq_threads = {
724 .store = &ksoftirqd,
725 .thread_should_run = ksoftirqd_should_run,
726 .thread_fn = run_ksoftirqd,
727 .thread_comm = "ksoftirqd/%u",
728};
729
730static __init int spawn_ksoftirqd(void)
731{
732 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
733 takeover_tasklets);
734 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
735
736 return 0;
737}
738early_initcall(spawn_ksoftirqd);
739
740
741
742
743
744
745int __init __weak early_irq_init(void)
746{
747 return 0;
748}
749
750int __init __weak arch_probe_nr_irqs(void)
751{
752 return NR_IRQS_LEGACY;
753}
754
755int __init __weak arch_early_irq_init(void)
756{
757 return 0;
758}
759
760unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
761{
762 return from;
763}
764