1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h>
31
32#include <asm/irq.h>
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60char *softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65
66
67
68
69
70
71static void wakeup_softirqd(void)
72{
73
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94#ifdef CONFIG_TRACE_IRQFLAGS
95static void __local_bh_disable(unsigned long ip, unsigned int cnt)
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
102
103
104
105
106
107
108
109 preempt_count() += cnt;
110
111
112
113 if (softirq_count() == cnt)
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
116
117 if (preempt_count() == cnt)
118 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119}
120#else
121static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
122{
123 add_preempt_count(cnt);
124 barrier();
125}
126#endif
127
128void local_bh_disable(void)
129{
130 __local_bh_disable((unsigned long)__builtin_return_address(0),
131 SOFTIRQ_DISABLE_OFFSET);
132}
133
134EXPORT_SYMBOL(local_bh_disable);
135
136static void __local_bh_enable(unsigned int cnt)
137{
138 WARN_ON_ONCE(in_irq());
139 WARN_ON_ONCE(!irqs_disabled());
140
141 if (softirq_count() == cnt)
142 trace_softirqs_on((unsigned long)__builtin_return_address(0));
143 sub_preempt_count(cnt);
144}
145
146
147
148
149
150
151void _local_bh_enable(void)
152{
153 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
154}
155
156EXPORT_SYMBOL(_local_bh_enable);
157
158static inline void _local_bh_enable_ip(unsigned long ip)
159{
160 WARN_ON_ONCE(in_irq() || irqs_disabled());
161#ifdef CONFIG_TRACE_IRQFLAGS
162 local_irq_disable();
163#endif
164
165
166
167 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
168 trace_softirqs_on(ip);
169
170
171
172
173 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
174
175 if (unlikely(!in_interrupt() && local_softirq_pending()))
176 do_softirq();
177
178 dec_preempt_count();
179#ifdef CONFIG_TRACE_IRQFLAGS
180 local_irq_enable();
181#endif
182 preempt_check_resched();
183}
184
185void local_bh_enable(void)
186{
187 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188}
189EXPORT_SYMBOL(local_bh_enable);
190
191void local_bh_enable_ip(unsigned long ip)
192{
193 _local_bh_enable_ip(ip);
194}
195EXPORT_SYMBOL(local_bh_enable_ip);
196
197
198
199
200
201
202
203
204
205
206#define MAX_SOFTIRQ_RESTART 10
207
208asmlinkage void __do_softirq(void)
209{
210 struct softirq_action *h;
211 __u32 pending;
212 int max_restart = MAX_SOFTIRQ_RESTART;
213 int cpu;
214 unsigned long old_flags = current->flags;
215
216
217
218
219
220
221 current->flags &= ~PF_MEMALLOC;
222
223 pending = local_softirq_pending();
224 vtime_account(current);
225
226 __local_bh_disable((unsigned long)__builtin_return_address(0),
227 SOFTIRQ_OFFSET);
228 lockdep_softirq_enter();
229
230 cpu = smp_processor_id();
231restart:
232
233 set_softirq_pending(0);
234
235 local_irq_enable();
236
237 h = softirq_vec;
238
239 do {
240 if (pending & 1) {
241 unsigned int vec_nr = h - softirq_vec;
242 int prev_count = preempt_count();
243
244 kstat_incr_softirqs_this_cpu(vec_nr);
245
246 trace_softirq_entry(vec_nr);
247 h->action(h);
248 trace_softirq_exit(vec_nr);
249 if (unlikely(prev_count != preempt_count())) {
250 printk(KERN_ERR "huh, entered softirq %u %s %p"
251 "with preempt_count %08x,"
252 " exited with %08x?\n", vec_nr,
253 softirq_to_name[vec_nr], h->action,
254 prev_count, preempt_count());
255 preempt_count() = prev_count;
256 }
257
258 rcu_bh_qs(cpu);
259 }
260 h++;
261 pending >>= 1;
262 } while (pending);
263
264 local_irq_disable();
265
266 pending = local_softirq_pending();
267 if (pending && --max_restart)
268 goto restart;
269
270 if (pending)
271 wakeup_softirqd();
272
273 lockdep_softirq_exit();
274
275 vtime_account(current);
276 __local_bh_enable(SOFTIRQ_OFFSET);
277 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
278}
279
280#ifndef __ARCH_HAS_DO_SOFTIRQ
281
282asmlinkage void do_softirq(void)
283{
284 __u32 pending;
285 unsigned long flags;
286
287 if (in_interrupt())
288 return;
289
290 local_irq_save(flags);
291
292 pending = local_softirq_pending();
293
294 if (pending)
295 __do_softirq();
296
297 local_irq_restore(flags);
298}
299
300#endif
301
302
303
304
305void irq_enter(void)
306{
307 int cpu = smp_processor_id();
308
309 rcu_irq_enter();
310 if (is_idle_task(current) && !in_interrupt()) {
311
312
313
314
315 local_bh_disable();
316 tick_check_idle(cpu);
317 _local_bh_enable();
318 }
319
320 __irq_enter();
321}
322
323static inline void invoke_softirq(void)
324{
325 if (!force_irqthreads) {
326#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
327 __do_softirq();
328#else
329 do_softirq();
330#endif
331 } else {
332 __local_bh_disable((unsigned long)__builtin_return_address(0),
333 SOFTIRQ_OFFSET);
334 wakeup_softirqd();
335 __local_bh_enable(SOFTIRQ_OFFSET);
336 }
337}
338
339
340
341
342void irq_exit(void)
343{
344 vtime_account(current);
345 trace_hardirq_exit();
346 sub_preempt_count(IRQ_EXIT_OFFSET);
347 if (!in_interrupt() && local_softirq_pending())
348 invoke_softirq();
349
350#ifdef CONFIG_NO_HZ
351
352 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
353 tick_nohz_irq_exit();
354#endif
355 rcu_irq_exit();
356 sched_preempt_enable_no_resched();
357}
358
359
360
361
362inline void raise_softirq_irqoff(unsigned int nr)
363{
364 __raise_softirq_irqoff(nr);
365
366
367
368
369
370
371
372
373
374
375 if (!in_interrupt())
376 wakeup_softirqd();
377}
378
379void raise_softirq(unsigned int nr)
380{
381 unsigned long flags;
382
383 local_irq_save(flags);
384 raise_softirq_irqoff(nr);
385 local_irq_restore(flags);
386}
387
388void __raise_softirq_irqoff(unsigned int nr)
389{
390 trace_softirq_raise(nr);
391 or_softirq_pending(1UL << nr);
392}
393
394void open_softirq(int nr, void (*action)(struct softirq_action *))
395{
396 softirq_vec[nr].action = action;
397}
398
399
400
401
402struct tasklet_head
403{
404 struct tasklet_struct *head;
405 struct tasklet_struct **tail;
406};
407
408static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
409static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
410
411void __tasklet_schedule(struct tasklet_struct *t)
412{
413 unsigned long flags;
414
415 local_irq_save(flags);
416 t->next = NULL;
417 *__this_cpu_read(tasklet_vec.tail) = t;
418 __this_cpu_write(tasklet_vec.tail, &(t->next));
419 raise_softirq_irqoff(TASKLET_SOFTIRQ);
420 local_irq_restore(flags);
421}
422
423EXPORT_SYMBOL(__tasklet_schedule);
424
425void __tasklet_hi_schedule(struct tasklet_struct *t)
426{
427 unsigned long flags;
428
429 local_irq_save(flags);
430 t->next = NULL;
431 *__this_cpu_read(tasklet_hi_vec.tail) = t;
432 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
433 raise_softirq_irqoff(HI_SOFTIRQ);
434 local_irq_restore(flags);
435}
436
437EXPORT_SYMBOL(__tasklet_hi_schedule);
438
439void __tasklet_hi_schedule_first(struct tasklet_struct *t)
440{
441 BUG_ON(!irqs_disabled());
442
443 t->next = __this_cpu_read(tasklet_hi_vec.head);
444 __this_cpu_write(tasklet_hi_vec.head, t);
445 __raise_softirq_irqoff(HI_SOFTIRQ);
446}
447
448EXPORT_SYMBOL(__tasklet_hi_schedule_first);
449
450static void tasklet_action(struct softirq_action *a)
451{
452 struct tasklet_struct *list;
453
454 local_irq_disable();
455 list = __this_cpu_read(tasklet_vec.head);
456 __this_cpu_write(tasklet_vec.head, NULL);
457 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
458 local_irq_enable();
459
460 while (list) {
461 struct tasklet_struct *t = list;
462
463 list = list->next;
464
465 if (tasklet_trylock(t)) {
466 if (!atomic_read(&t->count)) {
467 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
468 BUG();
469 t->func(t->data);
470 tasklet_unlock(t);
471 continue;
472 }
473 tasklet_unlock(t);
474 }
475
476 local_irq_disable();
477 t->next = NULL;
478 *__this_cpu_read(tasklet_vec.tail) = t;
479 __this_cpu_write(tasklet_vec.tail, &(t->next));
480 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
481 local_irq_enable();
482 }
483}
484
485static void tasklet_hi_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
490 list = __this_cpu_read(tasklet_hi_vec.head);
491 __this_cpu_write(tasklet_hi_vec.head, NULL);
492 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
503 BUG();
504 t->func(t->data);
505 tasklet_unlock(t);
506 continue;
507 }
508 tasklet_unlock(t);
509 }
510
511 local_irq_disable();
512 t->next = NULL;
513 *__this_cpu_read(tasklet_hi_vec.tail) = t;
514 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
515 __raise_softirq_irqoff(HI_SOFTIRQ);
516 local_irq_enable();
517 }
518}
519
520
521void tasklet_init(struct tasklet_struct *t,
522 void (*func)(unsigned long), unsigned long data)
523{
524 t->next = NULL;
525 t->state = 0;
526 atomic_set(&t->count, 0);
527 t->func = func;
528 t->data = data;
529}
530
531EXPORT_SYMBOL(tasklet_init);
532
533void tasklet_kill(struct tasklet_struct *t)
534{
535 if (in_interrupt())
536 printk("Attempt to kill tasklet from interrupt\n");
537
538 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
539 do {
540 yield();
541 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
542 }
543 tasklet_unlock_wait(t);
544 clear_bit(TASKLET_STATE_SCHED, &t->state);
545}
546
547EXPORT_SYMBOL(tasklet_kill);
548
549
550
551
552
553
554
555
556
557
558static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
559{
560 struct tasklet_hrtimer *ttimer =
561 container_of(timer, struct tasklet_hrtimer, timer);
562
563 tasklet_hi_schedule(&ttimer->tasklet);
564 return HRTIMER_NORESTART;
565}
566
567
568
569
570
571static void __tasklet_hrtimer_trampoline(unsigned long data)
572{
573 struct tasklet_hrtimer *ttimer = (void *)data;
574 enum hrtimer_restart restart;
575
576 restart = ttimer->function(&ttimer->timer);
577 if (restart != HRTIMER_NORESTART)
578 hrtimer_restart(&ttimer->timer);
579}
580
581
582
583
584
585
586
587
588void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
589 enum hrtimer_restart (*function)(struct hrtimer *),
590 clockid_t which_clock, enum hrtimer_mode mode)
591{
592 hrtimer_init(&ttimer->timer, which_clock, mode);
593 ttimer->timer.function = __hrtimer_tasklet_trampoline;
594 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
595 (unsigned long)ttimer);
596 ttimer->function = function;
597}
598EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
599
600
601
602
603
604DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
605EXPORT_PER_CPU_SYMBOL(softirq_work_list);
606
607static void __local_trigger(struct call_single_data *cp, int softirq)
608{
609 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
610
611 list_add_tail(&cp->list, head);
612
613
614 if (head->next == &cp->list)
615 raise_softirq_irqoff(softirq);
616}
617
618#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
619static void remote_softirq_receive(void *data)
620{
621 struct call_single_data *cp = data;
622 unsigned long flags;
623 int softirq;
624
625 softirq = cp->priv;
626
627 local_irq_save(flags);
628 __local_trigger(cp, softirq);
629 local_irq_restore(flags);
630}
631
632static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
633{
634 if (cpu_online(cpu)) {
635 cp->func = remote_softirq_receive;
636 cp->info = cp;
637 cp->flags = 0;
638 cp->priv = softirq;
639
640 __smp_call_function_single(cpu, cp, 0);
641 return 0;
642 }
643 return 1;
644}
645#else
646static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
647{
648 return 1;
649}
650#endif
651
652
653
654
655
656
657
658
659
660
661
662
663
664void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
665{
666 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
667 __local_trigger(cp, softirq);
668}
669EXPORT_SYMBOL(__send_remote_softirq);
670
671
672
673
674
675
676
677
678
679
680void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
681{
682 unsigned long flags;
683 int this_cpu;
684
685 local_irq_save(flags);
686 this_cpu = smp_processor_id();
687 __send_remote_softirq(cp, cpu, this_cpu, softirq);
688 local_irq_restore(flags);
689}
690EXPORT_SYMBOL(send_remote_softirq);
691
692static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
693 unsigned long action, void *hcpu)
694{
695
696
697
698
699 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
700 int cpu = (unsigned long) hcpu;
701 int i;
702
703 local_irq_disable();
704 for (i = 0; i < NR_SOFTIRQS; i++) {
705 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
706 struct list_head *local_head;
707
708 if (list_empty(head))
709 continue;
710
711 local_head = &__get_cpu_var(softirq_work_list[i]);
712 list_splice_init(head, local_head);
713 raise_softirq_irqoff(i);
714 }
715 local_irq_enable();
716 }
717
718 return NOTIFY_OK;
719}
720
721static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
722 .notifier_call = remote_softirq_cpu_notify,
723};
724
725void __init softirq_init(void)
726{
727 int cpu;
728
729 for_each_possible_cpu(cpu) {
730 int i;
731
732 per_cpu(tasklet_vec, cpu).tail =
733 &per_cpu(tasklet_vec, cpu).head;
734 per_cpu(tasklet_hi_vec, cpu).tail =
735 &per_cpu(tasklet_hi_vec, cpu).head;
736 for (i = 0; i < NR_SOFTIRQS; i++)
737 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
738 }
739
740 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
741
742 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
743 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
744}
745
746static int ksoftirqd_should_run(unsigned int cpu)
747{
748 return local_softirq_pending();
749}
750
751static void run_ksoftirqd(unsigned int cpu)
752{
753 local_irq_disable();
754 if (local_softirq_pending()) {
755 __do_softirq();
756 rcu_note_context_switch(cpu);
757 local_irq_enable();
758 cond_resched();
759 return;
760 }
761 local_irq_enable();
762}
763
764#ifdef CONFIG_HOTPLUG_CPU
765
766
767
768
769
770
771
772
773
774void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
775{
776 struct tasklet_struct **i;
777
778 BUG_ON(cpu_online(cpu));
779 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
780
781 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
782 return;
783
784
785 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
786 if (*i == t) {
787 *i = t->next;
788
789 if (*i == NULL)
790 per_cpu(tasklet_vec, cpu).tail = i;
791 return;
792 }
793 }
794 BUG();
795}
796
797static void takeover_tasklets(unsigned int cpu)
798{
799
800 local_irq_disable();
801
802
803 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
804 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
805 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
806 per_cpu(tasklet_vec, cpu).head = NULL;
807 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
808 }
809 raise_softirq_irqoff(TASKLET_SOFTIRQ);
810
811 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
812 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
813 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
814 per_cpu(tasklet_hi_vec, cpu).head = NULL;
815 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
816 }
817 raise_softirq_irqoff(HI_SOFTIRQ);
818
819 local_irq_enable();
820}
821#endif
822
823static int __cpuinit cpu_callback(struct notifier_block *nfb,
824 unsigned long action,
825 void *hcpu)
826{
827 switch (action) {
828#ifdef CONFIG_HOTPLUG_CPU
829 case CPU_DEAD:
830 case CPU_DEAD_FROZEN:
831 takeover_tasklets((unsigned long)hcpu);
832 break;
833#endif
834 }
835 return NOTIFY_OK;
836}
837
838static struct notifier_block __cpuinitdata cpu_nfb = {
839 .notifier_call = cpu_callback
840};
841
842static struct smp_hotplug_thread softirq_threads = {
843 .store = &ksoftirqd,
844 .thread_should_run = ksoftirqd_should_run,
845 .thread_fn = run_ksoftirqd,
846 .thread_comm = "ksoftirqd/%u",
847};
848
849static __init int spawn_ksoftirqd(void)
850{
851 register_cpu_notifier(&cpu_nfb);
852
853 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
854
855 return 0;
856}
857early_initcall(spawn_ksoftirqd);
858
859
860
861
862
863
864int __init __weak early_irq_init(void)
865{
866 return 0;
867}
868
869#ifdef CONFIG_GENERIC_HARDIRQS
870int __init __weak arch_probe_nr_irqs(void)
871{
872 return NR_IRQS_LEGACY;
873}
874
875int __init __weak arch_early_irq_init(void)
876{
877 return 0;
878}
879#endif
880