1
2
3
4
5
6
7
8#include <linux/cpumask.h>
9#include <linux/hardirq.h>
10#include <linux/interrupt.h>
11#include <linux/kernel_stat.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <as-layout.h>
17#include <kern_util.h>
18#include <os.h>
19
20
21
22
23
24
25
26
27
28static struct irq_fd *active_fds = NULL;
29static struct irq_fd **last_irq_ptr = &active_fds;
30
31extern void free_irqs(void);
32
33void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
34{
35 struct irq_fd *irq_fd;
36 int n;
37
38 if (smp_sigio_handler())
39 return;
40
41 while (1) {
42 n = os_waiting_for_events(active_fds);
43 if (n <= 0) {
44 if (n == -EINTR)
45 continue;
46 else break;
47 }
48
49 for (irq_fd = active_fds; irq_fd != NULL;
50 irq_fd = irq_fd->next) {
51 if (irq_fd->current_events != 0) {
52 irq_fd->current_events = 0;
53 do_IRQ(irq_fd->irq, regs);
54 }
55 }
56 }
57
58 free_irqs();
59}
60
61static DEFINE_SPINLOCK(irq_lock);
62
63static int activate_fd(int irq, int fd, int type, void *dev_id)
64{
65 struct pollfd *tmp_pfd;
66 struct irq_fd *new_fd, *irq_fd;
67 unsigned long flags;
68 int events, err, n;
69
70 err = os_set_fd_async(fd);
71 if (err < 0)
72 goto out;
73
74 err = -ENOMEM;
75 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
76 if (new_fd == NULL)
77 goto out;
78
79 if (type == IRQ_READ)
80 events = UM_POLLIN | UM_POLLPRI;
81 else events = UM_POLLOUT;
82 *new_fd = ((struct irq_fd) { .next = NULL,
83 .id = dev_id,
84 .fd = fd,
85 .type = type,
86 .irq = irq,
87 .events = events,
88 .current_events = 0 } );
89
90 err = -EBUSY;
91 spin_lock_irqsave(&irq_lock, flags);
92 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
93 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
94 printk(KERN_ERR "Registering fd %d twice\n", fd);
95 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
96 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
97 dev_id);
98 goto out_unlock;
99 }
100 }
101
102 if (type == IRQ_WRITE)
103 fd = -1;
104
105 tmp_pfd = NULL;
106 n = 0;
107
108 while (1) {
109 n = os_create_pollfd(fd, events, tmp_pfd, n);
110 if (n == 0)
111 break;
112
113
114
115
116
117
118
119
120
121
122
123
124
125 spin_unlock_irqrestore(&irq_lock, flags);
126 kfree(tmp_pfd);
127
128 tmp_pfd = kmalloc(n, GFP_KERNEL);
129 if (tmp_pfd == NULL)
130 goto out_kfree;
131
132 spin_lock_irqsave(&irq_lock, flags);
133 }
134
135 *last_irq_ptr = new_fd;
136 last_irq_ptr = &new_fd->next;
137
138 spin_unlock_irqrestore(&irq_lock, flags);
139
140
141
142
143
144 maybe_sigio_broken(fd, (type == IRQ_READ));
145
146 return 0;
147
148 out_unlock:
149 spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151 kfree(new_fd);
152 out:
153 return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
157{
158 unsigned long flags;
159
160 spin_lock_irqsave(&irq_lock, flags);
161 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162 spin_unlock_irqrestore(&irq_lock, flags);
163}
164
165struct irq_and_dev {
166 int irq;
167 void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172 struct irq_and_dev *data = d;
173
174 return ((irq->irq == data->irq) && (irq->id == data->dev));
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
180 .dev = dev });
181
182 free_irq_by_cb(same_irq_and_dev, &data);
183}
184
185static int same_fd(struct irq_fd *irq, void *fd)
186{
187 return (irq->fd == *((int *)fd));
188}
189
190void free_irq_by_fd(int fd)
191{
192 free_irq_by_cb(same_fd, &fd);
193}
194
195
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
197{
198 struct irq_fd *irq;
199 int i = 0;
200 int fdi;
201
202 for (irq = active_fds; irq != NULL; irq = irq->next) {
203 if ((irq->fd == fd) && (irq->irq == irqnum))
204 break;
205 i++;
206 }
207 if (irq == NULL) {
208 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209 fd);
210 goto out;
211 }
212 fdi = os_get_pollfd(i);
213 if ((fdi != -1) && (fdi != fd)) {
214 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216 fdi, fd);
217 irq = NULL;
218 goto out;
219 }
220 *index_out = i;
221 out:
222 return irq;
223}
224
225void reactivate_fd(int fd, int irqnum)
226{
227 struct irq_fd *irq;
228 unsigned long flags;
229 int i;
230
231 spin_lock_irqsave(&irq_lock, flags);
232 irq = find_irq_by_fd(fd, irqnum, &i);
233 if (irq == NULL) {
234 spin_unlock_irqrestore(&irq_lock, flags);
235 return;
236 }
237 os_set_pollfd(i, irq->fd);
238 spin_unlock_irqrestore(&irq_lock, flags);
239
240 add_sigio_fd(fd);
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245 struct irq_fd *irq;
246 unsigned long flags;
247 int i;
248
249 spin_lock_irqsave(&irq_lock, flags);
250 irq = find_irq_by_fd(fd, irqnum, &i);
251 if (irq == NULL) {
252 spin_unlock_irqrestore(&irq_lock, flags);
253 return;
254 }
255
256 os_set_pollfd(i, -1);
257 spin_unlock_irqrestore(&irq_lock, flags);
258
259 ignore_sigio_fd(fd);
260}
261EXPORT_SYMBOL(deactivate_fd);
262
263
264
265
266
267
268
269int deactivate_all_fds(void)
270{
271 struct irq_fd *irq;
272 int err;
273
274 for (irq = active_fds; irq != NULL; irq = irq->next) {
275 err = os_clear_fd_async(irq->fd);
276 if (err)
277 return err;
278 }
279
280 os_set_ioignore();
281
282 return 0;
283}
284
285
286
287
288
289
290unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
291{
292 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
293 irq_enter();
294 generic_handle_irq(irq);
295 irq_exit();
296 set_irq_regs(old_regs);
297 return 1;
298}
299
300void um_free_irq(unsigned int irq, void *dev)
301{
302 free_irq_by_irq_and_dev(irq, dev);
303 free_irq(irq, dev);
304}
305EXPORT_SYMBOL(um_free_irq);
306
307int um_request_irq(unsigned int irq, int fd, int type,
308 irq_handler_t handler,
309 unsigned long irqflags, const char * devname,
310 void *dev_id)
311{
312 int err;
313
314 if (fd != -1) {
315 err = activate_fd(irq, fd, type, dev_id);
316 if (err)
317 return err;
318 }
319
320 return request_irq(irq, handler, irqflags, devname, dev_id);
321}
322
323EXPORT_SYMBOL(um_request_irq);
324EXPORT_SYMBOL(reactivate_fd);
325
326
327
328
329
330static void dummy(struct irq_data *d)
331{
332}
333
334
335static struct irq_chip normal_irq_type = {
336 .name = "SIGIO",
337 .irq_disable = dummy,
338 .irq_enable = dummy,
339 .irq_ack = dummy,
340 .irq_mask = dummy,
341 .irq_unmask = dummy,
342};
343
344static struct irq_chip SIGVTALRM_irq_type = {
345 .name = "SIGVTALRM",
346 .irq_disable = dummy,
347 .irq_enable = dummy,
348 .irq_ack = dummy,
349 .irq_mask = dummy,
350 .irq_unmask = dummy,
351};
352
353void __init init_IRQ(void)
354{
355 int i;
356
357 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
358
359 for (i = 1; i < NR_IRQS; i++)
360 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410static unsigned long pending_mask;
411
412unsigned long to_irq_stack(unsigned long *mask_out)
413{
414 struct thread_info *ti;
415 unsigned long mask, old;
416 int nested;
417
418 mask = xchg(&pending_mask, *mask_out);
419 if (mask != 0) {
420
421
422
423
424
425
426
427
428
429 old = *mask_out;
430 do {
431 old |= mask;
432 mask = xchg(&pending_mask, old);
433 } while (mask != old);
434 return 1;
435 }
436
437 ti = current_thread_info();
438 nested = (ti->real_thread != NULL);
439 if (!nested) {
440 struct task_struct *task;
441 struct thread_info *tti;
442
443 task = cpu_tasks[ti->cpu].task;
444 tti = task_thread_info(task);
445
446 *ti = *tti;
447 ti->real_thread = tti;
448 task->stack = ti;
449 }
450
451 mask = xchg(&pending_mask, 0);
452 *mask_out |= mask | nested;
453 return 0;
454}
455
456unsigned long from_irq_stack(int nested)
457{
458 struct thread_info *ti, *to;
459 unsigned long mask;
460
461 ti = current_thread_info();
462
463 pending_mask = 1;
464
465 to = ti->real_thread;
466 current->stack = to;
467 ti->real_thread = NULL;
468 *to = *ti;
469
470 mask = xchg(&pending_mask, 0);
471 return mask & ~1;
472}
473
474