1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65
66
67
68
69
70
71static void wakeup_softirqd(void)
72{
73
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80
81
82
83
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105#ifdef CONFIG_TRACE_IRQFLAGS
106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107{
108 unsigned long flags;
109
110 WARN_ON_ONCE(in_irq());
111
112 raw_local_irq_save(flags);
113
114
115
116
117
118
119
120 __preempt_count_add(cnt);
121
122
123
124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
125 trace_softirqs_off(ip);
126 raw_local_irq_restore(flags);
127
128 if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
130 current->preempt_disable_ip = get_lock_parent_ip();
131#endif
132 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133 }
134}
135EXPORT_SYMBOL(__local_bh_disable_ip);
136#endif
137
138static void __local_bh_enable(unsigned int cnt)
139{
140 lockdep_assert_irqs_disabled();
141
142 if (softirq_count() == (cnt & SOFTIRQ_MASK))
143 trace_softirqs_on(_RET_IP_);
144 preempt_count_sub(cnt);
145}
146
147
148
149
150
151
152void _local_bh_enable(void)
153{
154 WARN_ON_ONCE(in_irq());
155 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156}
157EXPORT_SYMBOL(_local_bh_enable);
158
159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160{
161 WARN_ON_ONCE(in_irq());
162 lockdep_assert_irqs_enabled();
163#ifdef CONFIG_TRACE_IRQFLAGS
164 local_irq_disable();
165#endif
166
167
168
169 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
170 trace_softirqs_on(ip);
171
172
173
174
175 preempt_count_sub(cnt - 1);
176
177 if (unlikely(!in_interrupt() && local_softirq_pending())) {
178
179
180
181
182 do_softirq();
183 }
184
185 preempt_count_dec();
186#ifdef CONFIG_TRACE_IRQFLAGS
187 local_irq_enable();
188#endif
189 preempt_check_resched();
190}
191EXPORT_SYMBOL(__local_bh_enable_ip);
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
207#define MAX_SOFTIRQ_RESTART 10
208
209#ifdef CONFIG_TRACE_IRQFLAGS
210
211
212
213
214
215
216static inline bool lockdep_softirq_start(void)
217{
218 bool in_hardirq = false;
219
220 if (trace_hardirq_context(current)) {
221 in_hardirq = true;
222 trace_hardirq_exit();
223 }
224
225 lockdep_softirq_enter();
226
227 return in_hardirq;
228}
229
230static inline void lockdep_softirq_end(bool in_hardirq)
231{
232 lockdep_softirq_exit();
233
234 if (in_hardirq)
235 trace_hardirq_enter();
236}
237#else
238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
240#endif
241
242asmlinkage __visible void __softirq_entry __do_softirq(void)
243{
244 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
245 unsigned long old_flags = current->flags;
246 int max_restart = MAX_SOFTIRQ_RESTART;
247 struct softirq_action *h;
248 bool in_hardirq;
249 __u32 pending;
250 int softirq_bit;
251
252
253
254
255
256
257 current->flags &= ~PF_MEMALLOC;
258
259 pending = local_softirq_pending();
260 account_irq_enter_time(current);
261
262 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
263 in_hardirq = lockdep_softirq_start();
264
265restart:
266
267 set_softirq_pending(0);
268
269 local_irq_enable();
270
271 h = softirq_vec;
272
273 while ((softirq_bit = ffs(pending))) {
274 unsigned int vec_nr;
275 int prev_count;
276
277 h += softirq_bit - 1;
278
279 vec_nr = h - softirq_vec;
280 prev_count = preempt_count();
281
282 kstat_incr_softirqs_this_cpu(vec_nr);
283
284 trace_softirq_entry(vec_nr);
285 h->action(h);
286 trace_softirq_exit(vec_nr);
287 if (unlikely(prev_count != preempt_count())) {
288 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
289 vec_nr, softirq_to_name[vec_nr], h->action,
290 prev_count, preempt_count());
291 preempt_count_set(prev_count);
292 }
293 h++;
294 pending >>= softirq_bit;
295 }
296
297 rcu_bh_qs();
298 local_irq_disable();
299
300 pending = local_softirq_pending();
301 if (pending) {
302 if (time_before(jiffies, end) && !need_resched() &&
303 --max_restart)
304 goto restart;
305
306 wakeup_softirqd();
307 }
308
309 lockdep_softirq_end(in_hardirq);
310 account_irq_exit_time(current);
311 __local_bh_enable(SOFTIRQ_OFFSET);
312 WARN_ON_ONCE(in_interrupt());
313 current_restore_flags(old_flags, PF_MEMALLOC);
314}
315
316asmlinkage __visible void do_softirq(void)
317{
318 __u32 pending;
319 unsigned long flags;
320
321 if (in_interrupt())
322 return;
323
324 local_irq_save(flags);
325
326 pending = local_softirq_pending();
327
328 if (pending && !ksoftirqd_running())
329 do_softirq_own_stack();
330
331 local_irq_restore(flags);
332}
333
334
335
336
337void irq_enter(void)
338{
339 rcu_irq_enter();
340 if (is_idle_task(current) && !in_interrupt()) {
341
342
343
344
345 local_bh_disable();
346 tick_irq_enter();
347 _local_bh_enable();
348 }
349
350 __irq_enter();
351}
352
353static inline void invoke_softirq(void)
354{
355 if (ksoftirqd_running())
356 return;
357
358 if (!force_irqthreads) {
359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
360
361
362
363
364
365 __do_softirq();
366#else
367
368
369
370
371
372 do_softirq_own_stack();
373#endif
374 } else {
375 wakeup_softirqd();
376 }
377}
378
379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382 int cpu = smp_processor_id();
383
384
385 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386 if (!in_interrupt())
387 tick_nohz_irq_exit();
388 }
389#endif
390}
391
392
393
394
395void irq_exit(void)
396{
397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
398 local_irq_disable();
399#else
400 lockdep_assert_irqs_disabled();
401#endif
402 account_irq_exit_time(current);
403 preempt_count_sub(HARDIRQ_OFFSET);
404 if (!in_interrupt() && local_softirq_pending())
405 invoke_softirq();
406
407 tick_irq_exit();
408 rcu_irq_exit();
409 trace_hardirq_exit();
410}
411
412
413
414
415inline void raise_softirq_irqoff(unsigned int nr)
416{
417 __raise_softirq_irqoff(nr);
418
419
420
421
422
423
424
425
426
427
428 if (!in_interrupt())
429 wakeup_softirqd();
430}
431
432void raise_softirq(unsigned int nr)
433{
434 unsigned long flags;
435
436 local_irq_save(flags);
437 raise_softirq_irqoff(nr);
438 local_irq_restore(flags);
439}
440
441void __raise_softirq_irqoff(unsigned int nr)
442{
443 trace_softirq_raise(nr);
444 or_softirq_pending(1UL << nr);
445}
446
447void open_softirq(int nr, void (*action)(struct softirq_action *))
448{
449 softirq_vec[nr].action = action;
450}
451
452
453
454
455struct tasklet_head {
456 struct tasklet_struct *head;
457 struct tasklet_struct **tail;
458};
459
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462
463static void __tasklet_schedule_common(struct tasklet_struct *t,
464 struct tasklet_head __percpu *headp,
465 unsigned int softirq_nr)
466{
467 struct tasklet_head *head;
468 unsigned long flags;
469
470 local_irq_save(flags);
471 head = this_cpu_ptr(headp);
472 t->next = NULL;
473 *head->tail = t;
474 head->tail = &(t->next);
475 raise_softirq_irqoff(softirq_nr);
476 local_irq_restore(flags);
477}
478
479void __tasklet_schedule(struct tasklet_struct *t)
480{
481 __tasklet_schedule_common(t, &tasklet_vec,
482 TASKLET_SOFTIRQ);
483}
484EXPORT_SYMBOL(__tasklet_schedule);
485
486void __tasklet_hi_schedule(struct tasklet_struct *t)
487{
488 __tasklet_schedule_common(t, &tasklet_hi_vec,
489 HI_SOFTIRQ);
490}
491EXPORT_SYMBOL(__tasklet_hi_schedule);
492
493static void tasklet_action_common(struct softirq_action *a,
494 struct tasklet_head *tl_head,
495 unsigned int softirq_nr)
496{
497 struct tasklet_struct *list;
498
499 local_irq_disable();
500 list = tl_head->head;
501 tl_head->head = NULL;
502 tl_head->tail = &tl_head->head;
503 local_irq_enable();
504
505 while (list) {
506 struct tasklet_struct *t = list;
507
508 list = list->next;
509
510 if (tasklet_trylock(t)) {
511 if (!atomic_read(&t->count)) {
512 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
513 &t->state))
514 BUG();
515 t->func(t->data);
516 tasklet_unlock(t);
517 continue;
518 }
519 tasklet_unlock(t);
520 }
521
522 local_irq_disable();
523 t->next = NULL;
524 *tl_head->tail = t;
525 tl_head->tail = &t->next;
526 __raise_softirq_irqoff(softirq_nr);
527 local_irq_enable();
528 }
529}
530
531static __latent_entropy void tasklet_action(struct softirq_action *a)
532{
533 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
534}
535
536static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
537{
538 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
539}
540
541void tasklet_init(struct tasklet_struct *t,
542 void (*func)(unsigned long), unsigned long data)
543{
544 t->next = NULL;
545 t->state = 0;
546 atomic_set(&t->count, 0);
547 t->func = func;
548 t->data = data;
549}
550EXPORT_SYMBOL(tasklet_init);
551
552void tasklet_kill(struct tasklet_struct *t)
553{
554 if (in_interrupt())
555 pr_notice("Attempt to kill tasklet from interrupt\n");
556
557 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
558 do {
559 yield();
560 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
561 }
562 tasklet_unlock_wait(t);
563 clear_bit(TASKLET_STATE_SCHED, &t->state);
564}
565EXPORT_SYMBOL(tasklet_kill);
566
567
568
569
570
571
572
573
574
575
576static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
577{
578 struct tasklet_hrtimer *ttimer =
579 container_of(timer, struct tasklet_hrtimer, timer);
580
581 tasklet_hi_schedule(&ttimer->tasklet);
582 return HRTIMER_NORESTART;
583}
584
585
586
587
588
589static void __tasklet_hrtimer_trampoline(unsigned long data)
590{
591 struct tasklet_hrtimer *ttimer = (void *)data;
592 enum hrtimer_restart restart;
593
594 restart = ttimer->function(&ttimer->timer);
595 if (restart != HRTIMER_NORESTART)
596 hrtimer_restart(&ttimer->timer);
597}
598
599
600
601
602
603
604
605
606void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
607 enum hrtimer_restart (*function)(struct hrtimer *),
608 clockid_t which_clock, enum hrtimer_mode mode)
609{
610 hrtimer_init(&ttimer->timer, which_clock, mode);
611 ttimer->timer.function = __hrtimer_tasklet_trampoline;
612 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
613 (unsigned long)ttimer);
614 ttimer->function = function;
615}
616EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
617
618void __init softirq_init(void)
619{
620 int cpu;
621
622 for_each_possible_cpu(cpu) {
623 per_cpu(tasklet_vec, cpu).tail =
624 &per_cpu(tasklet_vec, cpu).head;
625 per_cpu(tasklet_hi_vec, cpu).tail =
626 &per_cpu(tasklet_hi_vec, cpu).head;
627 }
628
629 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
630 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
631}
632
633static int ksoftirqd_should_run(unsigned int cpu)
634{
635 return local_softirq_pending();
636}
637
638static void run_ksoftirqd(unsigned int cpu)
639{
640 local_irq_disable();
641 if (local_softirq_pending()) {
642
643
644
645
646 __do_softirq();
647 local_irq_enable();
648 cond_resched();
649 return;
650 }
651 local_irq_enable();
652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655
656
657
658
659
660
661
662
663
664void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
665{
666 struct tasklet_struct **i;
667
668 BUG_ON(cpu_online(cpu));
669 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
670
671 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
672 return;
673
674
675 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
676 if (*i == t) {
677 *i = t->next;
678
679 if (*i == NULL)
680 per_cpu(tasklet_vec, cpu).tail = i;
681 return;
682 }
683 }
684 BUG();
685}
686
687static int takeover_tasklets(unsigned int cpu)
688{
689
690 local_irq_disable();
691
692
693 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
694 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
695 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
696 per_cpu(tasklet_vec, cpu).head = NULL;
697 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
698 }
699 raise_softirq_irqoff(TASKLET_SOFTIRQ);
700
701 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
702 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
703 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
704 per_cpu(tasklet_hi_vec, cpu).head = NULL;
705 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
706 }
707 raise_softirq_irqoff(HI_SOFTIRQ);
708
709 local_irq_enable();
710 return 0;
711}
712#else
713#define takeover_tasklets NULL
714#endif
715
716static struct smp_hotplug_thread softirq_threads = {
717 .store = &ksoftirqd,
718 .thread_should_run = ksoftirqd_should_run,
719 .thread_fn = run_ksoftirqd,
720 .thread_comm = "ksoftirqd/%u",
721};
722
723static __init int spawn_ksoftirqd(void)
724{
725 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
726 takeover_tasklets);
727 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
728
729 return 0;
730}
731early_initcall(spawn_ksoftirqd);
732
733
734
735
736
737
738int __init __weak early_irq_init(void)
739{
740 return 0;
741}
742
743int __init __weak arch_probe_nr_irqs(void)
744{
745 return NR_IRQS_LEGACY;
746}
747
748int __init __weak arch_early_irq_init(void)
749{
750 return 0;
751}
752
753unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
754{
755 return from;
756}
757