1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
18#include <linux/uaccess.h>
19#include <linux/percpu.h>
20
21#include <asm/apic.h>
22
23DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24EXPORT_PER_CPU_SYMBOL(irq_stat);
25
26DEFINE_PER_CPU(struct pt_regs *, irq_regs);
27EXPORT_PER_CPU_SYMBOL(irq_regs);
28
29#ifdef CONFIG_DEBUG_STACKOVERFLOW
30
31static int check_stack_overflow(void)
32{
33 long sp;
34
35 __asm__ __volatile__("andl %%esp,%0" :
36 "=r" (sp) : "0" (THREAD_SIZE - 1));
37
38 return sp < (sizeof(struct thread_info) + STACK_WARN);
39}
40
41static void print_stack_overflow(void)
42{
43 printk(KERN_WARNING "low stack detected by irq handler\n");
44 dump_stack();
45}
46
47#else
48static inline int check_stack_overflow(void) { return 0; }
49static inline void print_stack_overflow(void) { }
50#endif
51
52#ifdef CONFIG_4KSTACKS
53
54
55
56union irq_ctx {
57 struct thread_info tinfo;
58 u32 stack[THREAD_SIZE/sizeof(u32)];
59} __attribute__((aligned(PAGE_SIZE)));
60
61static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
62static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
63
64static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
65static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
66
67static void call_on_stack(void *func, void *stack)
68{
69 asm volatile("xchgl %%ebx,%%esp \n"
70 "call *%%edi \n"
71 "movl %%ebx,%%esp \n"
72 : "=b" (stack)
73 : "0" (stack),
74 "D"(func)
75 : "memory", "cc", "edx", "ecx", "eax");
76}
77
78static inline int
79execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
80{
81 union irq_ctx *curctx, *irqctx;
82 u32 *isp, arg1, arg2;
83
84 curctx = (union irq_ctx *) current_thread_info();
85 irqctx = __get_cpu_var(hardirq_ctx);
86
87
88
89
90
91
92
93 if (unlikely(curctx == irqctx))
94 return 0;
95
96
97 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
98 irqctx->tinfo.task = curctx->tinfo.task;
99 irqctx->tinfo.previous_esp = current_stack_pointer;
100
101
102
103
104
105 irqctx->tinfo.preempt_count =
106 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
107 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
108
109 if (unlikely(overflow))
110 call_on_stack(print_stack_overflow, isp);
111
112 asm volatile("xchgl %%ebx,%%esp \n"
113 "call *%%edi \n"
114 "movl %%ebx,%%esp \n"
115 : "=a" (arg1), "=d" (arg2), "=b" (isp)
116 : "0" (irq), "1" (desc), "2" (isp),
117 "D" (desc->handle_irq)
118 : "memory", "cc", "ecx");
119 return 1;
120}
121
122
123
124
125void __cpuinit irq_ctx_init(int cpu)
126{
127 union irq_ctx *irqctx;
128
129 if (per_cpu(hardirq_ctx, cpu))
130 return;
131
132 irqctx = &per_cpu(hardirq_stack, cpu);
133 irqctx->tinfo.task = NULL;
134 irqctx->tinfo.exec_domain = NULL;
135 irqctx->tinfo.cpu = cpu;
136 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
138
139 per_cpu(hardirq_ctx, cpu) = irqctx;
140
141 irqctx = &per_cpu(softirq_stack, cpu);
142 irqctx->tinfo.task = NULL;
143 irqctx->tinfo.exec_domain = NULL;
144 irqctx->tinfo.cpu = cpu;
145 irqctx->tinfo.preempt_count = 0;
146 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
147
148 per_cpu(softirq_ctx, cpu) = irqctx;
149
150 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
151 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
152}
153
154void irq_ctx_exit(int cpu)
155{
156 per_cpu(hardirq_ctx, cpu) = NULL;
157}
158
159asmlinkage void do_softirq(void)
160{
161 unsigned long flags;
162 struct thread_info *curctx;
163 union irq_ctx *irqctx;
164 u32 *isp;
165
166 if (in_interrupt())
167 return;
168
169 local_irq_save(flags);
170
171 if (local_softirq_pending()) {
172 curctx = current_thread_info();
173 irqctx = __get_cpu_var(softirq_ctx);
174 irqctx->tinfo.task = curctx->task;
175 irqctx->tinfo.previous_esp = current_stack_pointer;
176
177
178 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
179
180 call_on_stack(__do_softirq, isp);
181
182
183
184 WARN_ON_ONCE(softirq_count());
185 }
186
187 local_irq_restore(flags);
188}
189
190#else
191static inline int
192execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
193#endif
194
195bool handle_irq(unsigned irq, struct pt_regs *regs)
196{
197 struct irq_desc *desc;
198 int overflow;
199
200 overflow = check_stack_overflow();
201
202 desc = irq_to_desc(irq);
203 if (unlikely(!desc))
204 return false;
205
206 if (!execute_on_irq_stack(overflow, desc, irq)) {
207 if (unlikely(overflow))
208 print_stack_overflow();
209 desc->handle_irq(irq, desc);
210 }
211
212 return true;
213}
214
215#ifdef CONFIG_HOTPLUG_CPU
216
217
218void fixup_irqs(void)
219{
220 unsigned int irq;
221 struct irq_desc *desc;
222
223 for_each_irq_desc(irq, desc) {
224 const struct cpumask *affinity;
225
226 if (!desc)
227 continue;
228 if (irq == 2)
229 continue;
230
231 affinity = desc->affinity;
232 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
233 printk("Breaking affinity for irq %i\n", irq);
234 affinity = cpu_all_mask;
235 }
236 if (desc->chip->set_affinity)
237 desc->chip->set_affinity(irq, affinity);
238 else if (desc->action)
239 printk_once("Cannot set affinity for irq %i\n", irq);
240 }
241
242#if 0
243 barrier();
244
245
246
247
248 __asm__ __volatile__("sti; nop; cli");
249 barrier();
250#else
251
252 local_irq_enable();
253 mdelay(1);
254 local_irq_disable();
255#endif
256}
257#endif
258
259