1
2
3
4
5
6
7
8
9
10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <as-layout.h>
19#include <kern_util.h>
20#include <os.h>
21#include <irq_user.h>
22
23
24
25
26
27
28
29
30struct irq_entry {
31 struct irq_entry *next;
32 int fd;
33 struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
34};
35
36static struct irq_entry *active_fds;
37
38static DEFINE_SPINLOCK(irq_lock);
39
40static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
41{
42
43
44
45
46
47
48 if (irq->active) {
49 irq->active = false;
50 do {
51 irq->pending = false;
52 do_IRQ(irq->irq, regs);
53 } while (irq->pending && (!irq->purge));
54 if (!irq->purge)
55 irq->active = true;
56 } else {
57 irq->pending = true;
58 }
59}
60
61void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
62{
63 struct irq_entry *irq_entry;
64 struct irq_fd *irq;
65
66 int n, i, j;
67
68 while (1) {
69
70
71
72
73
74
75 n = os_waiting_for_events_epoll();
76
77 if (n <= 0) {
78 if (n == -EINTR)
79 continue;
80 else
81 break;
82 }
83
84 for (i = 0; i < n ; i++) {
85
86
87
88 irq_entry = (struct irq_entry *)
89 os_epoll_get_data_pointer(i);
90 for (j = 0; j < MAX_IRQ_TYPE ; j++) {
91 irq = irq_entry->irq_array[j];
92 if (irq == NULL)
93 continue;
94 if (os_epoll_triggered(i, irq->events) > 0)
95 irq_io_loop(irq, regs);
96 if (irq->purge) {
97 irq_entry->irq_array[j] = NULL;
98 kfree(irq);
99 }
100 }
101 }
102 }
103}
104
105static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
106{
107 int i;
108 int events = 0;
109 struct irq_fd *irq;
110
111 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
112 irq = irq_entry->irq_array[i];
113 if (irq != NULL)
114 events = irq->events | events;
115 }
116 if (events > 0) {
117
118 return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
119 }
120
121 return os_del_epoll_fd(irq_entry->fd);
122}
123
124
125
126static int activate_fd(int irq, int fd, int type, void *dev_id)
127{
128 struct irq_fd *new_fd;
129 struct irq_entry *irq_entry;
130 int i, err, events;
131 unsigned long flags;
132
133 err = os_set_fd_async(fd);
134 if (err < 0)
135 goto out;
136
137 spin_lock_irqsave(&irq_lock, flags);
138
139
140
141 err = -EBUSY;
142 for (irq_entry = active_fds;
143 irq_entry != NULL; irq_entry = irq_entry->next) {
144 if (irq_entry->fd == fd)
145 break;
146 }
147
148 if (irq_entry == NULL) {
149
150
151
152 irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
153 if (irq_entry == NULL) {
154 printk(KERN_ERR
155 "Failed to allocate new IRQ entry\n");
156 goto out_unlock;
157 }
158 irq_entry->fd = fd;
159 for (i = 0; i < MAX_IRQ_TYPE; i++)
160 irq_entry->irq_array[i] = NULL;
161 irq_entry->next = active_fds;
162 active_fds = irq_entry;
163 }
164
165
166
167
168
169 if (irq_entry->irq_array[type] != NULL) {
170 printk(KERN_ERR
171 "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
172 irq, fd, type, dev_id
173 );
174 goto out_unlock;
175 } else {
176
177
178 err = -ENOMEM;
179 new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
180 if (new_fd == NULL)
181 goto out_unlock;
182
183 events = os_event_mask(type);
184
185 *new_fd = ((struct irq_fd) {
186 .id = dev_id,
187 .irq = irq,
188 .type = type,
189 .events = events,
190 .active = true,
191 .pending = false,
192 .purge = false
193 });
194
195
196
197 os_del_epoll_fd(irq_entry->fd);
198 irq_entry->irq_array[type] = new_fd;
199 }
200
201
202 assign_epoll_events_to_irq(irq_entry);
203 spin_unlock_irqrestore(&irq_lock, flags);
204 maybe_sigio_broken(fd, (type != IRQ_NONE));
205
206 return 0;
207out_unlock:
208 spin_unlock_irqrestore(&irq_lock, flags);
209out:
210 return err;
211}
212
213
214
215
216
217
218static void garbage_collect_irq_entries(void)
219{
220 int i;
221 bool reap;
222 struct irq_entry *walk;
223 struct irq_entry *previous = NULL;
224 struct irq_entry *to_free;
225
226 if (active_fds == NULL)
227 return;
228 walk = active_fds;
229 while (walk != NULL) {
230 reap = true;
231 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
232 if (walk->irq_array[i] != NULL) {
233 reap = false;
234 break;
235 }
236 }
237 if (reap) {
238 if (previous == NULL)
239 active_fds = walk->next;
240 else
241 previous->next = walk->next;
242 to_free = walk;
243 } else {
244 to_free = NULL;
245 }
246 walk = walk->next;
247 if (to_free != NULL)
248 kfree(to_free);
249 }
250}
251
252
253
254
255
256static struct irq_entry *get_irq_entry_by_fd(int fd)
257{
258 struct irq_entry *walk = active_fds;
259
260 while (walk != NULL) {
261 if (walk->fd == fd)
262 return walk;
263 walk = walk->next;
264 }
265 return NULL;
266}
267
268
269
270
271
272
273
274
275
276#define IGNORE_IRQ 1
277#define IGNORE_DEV (1<<1)
278
279static void do_free_by_irq_and_dev(
280 struct irq_entry *irq_entry,
281 unsigned int irq,
282 void *dev,
283 int flags
284)
285{
286 int i;
287 struct irq_fd *to_free;
288
289 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
290 if (irq_entry->irq_array[i] != NULL) {
291 if (
292 ((flags & IGNORE_IRQ) ||
293 (irq_entry->irq_array[i]->irq == irq)) &&
294 ((flags & IGNORE_DEV) ||
295 (irq_entry->irq_array[i]->id == dev))
296 ) {
297
298
299
300 os_del_epoll_fd(irq_entry->fd);
301 to_free = irq_entry->irq_array[i];
302 irq_entry->irq_array[i] = NULL;
303 assign_epoll_events_to_irq(irq_entry);
304 if (to_free->active)
305 to_free->purge = true;
306 else
307 kfree(to_free);
308 }
309 }
310 }
311}
312
313void free_irq_by_fd(int fd)
314{
315 struct irq_entry *to_free;
316 unsigned long flags;
317
318 spin_lock_irqsave(&irq_lock, flags);
319 to_free = get_irq_entry_by_fd(fd);
320 if (to_free != NULL) {
321 do_free_by_irq_and_dev(
322 to_free,
323 -1,
324 NULL,
325 IGNORE_IRQ | IGNORE_DEV
326 );
327 }
328 garbage_collect_irq_entries();
329 spin_unlock_irqrestore(&irq_lock, flags);
330}
331EXPORT_SYMBOL(free_irq_by_fd);
332
333static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
334{
335 struct irq_entry *to_free;
336 unsigned long flags;
337
338 spin_lock_irqsave(&irq_lock, flags);
339 to_free = active_fds;
340 while (to_free != NULL) {
341 do_free_by_irq_and_dev(
342 to_free,
343 irq,
344 dev,
345 0
346 );
347 to_free = to_free->next;
348 }
349 garbage_collect_irq_entries();
350 spin_unlock_irqrestore(&irq_lock, flags);
351}
352
353
354void reactivate_fd(int fd, int irqnum)
355{
356
357}
358
359void deactivate_fd(int fd, int irqnum)
360{
361 struct irq_entry *to_free;
362 unsigned long flags;
363
364 os_del_epoll_fd(fd);
365 spin_lock_irqsave(&irq_lock, flags);
366 to_free = get_irq_entry_by_fd(fd);
367 if (to_free != NULL) {
368 do_free_by_irq_and_dev(
369 to_free,
370 irqnum,
371 NULL,
372 IGNORE_DEV
373 );
374 }
375 garbage_collect_irq_entries();
376 spin_unlock_irqrestore(&irq_lock, flags);
377 ignore_sigio_fd(fd);
378}
379EXPORT_SYMBOL(deactivate_fd);
380
381
382
383
384
385
386
387int deactivate_all_fds(void)
388{
389 unsigned long flags;
390 struct irq_entry *to_free;
391
392 spin_lock_irqsave(&irq_lock, flags);
393
394
395
396
397 os_set_ioignore();
398 to_free = active_fds;
399 while (to_free != NULL) {
400 do_free_by_irq_and_dev(
401 to_free,
402 -1,
403 NULL,
404 IGNORE_IRQ | IGNORE_DEV
405 );
406 to_free = to_free->next;
407 }
408 garbage_collect_irq_entries();
409 spin_unlock_irqrestore(&irq_lock, flags);
410 os_close_epoll_fd();
411 return 0;
412}
413
414
415
416
417
418
419unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
420{
421 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
422 irq_enter();
423 generic_handle_irq(irq);
424 irq_exit();
425 set_irq_regs(old_regs);
426 return 1;
427}
428
429void um_free_irq(unsigned int irq, void *dev)
430{
431 free_irq_by_irq_and_dev(irq, dev);
432 free_irq(irq, dev);
433}
434EXPORT_SYMBOL(um_free_irq);
435
436int um_request_irq(unsigned int irq, int fd, int type,
437 irq_handler_t handler,
438 unsigned long irqflags, const char * devname,
439 void *dev_id)
440{
441 int err;
442
443 if (fd != -1) {
444 err = activate_fd(irq, fd, type, dev_id);
445 if (err)
446 return err;
447 }
448
449 return request_irq(irq, handler, irqflags, devname, dev_id);
450}
451
452EXPORT_SYMBOL(um_request_irq);
453EXPORT_SYMBOL(reactivate_fd);
454
455
456
457
458
459static void dummy(struct irq_data *d)
460{
461}
462
463
464static struct irq_chip normal_irq_type = {
465 .name = "SIGIO",
466 .irq_disable = dummy,
467 .irq_enable = dummy,
468 .irq_ack = dummy,
469 .irq_mask = dummy,
470 .irq_unmask = dummy,
471};
472
473static struct irq_chip SIGVTALRM_irq_type = {
474 .name = "SIGVTALRM",
475 .irq_disable = dummy,
476 .irq_enable = dummy,
477 .irq_ack = dummy,
478 .irq_mask = dummy,
479 .irq_unmask = dummy,
480};
481
482void __init init_IRQ(void)
483{
484 int i;
485
486 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
487
488
489 for (i = 1; i < NR_IRQS; i++)
490 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
491
492 os_setup_epoll();
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static unsigned long pending_mask;
543
544unsigned long to_irq_stack(unsigned long *mask_out)
545{
546 struct thread_info *ti;
547 unsigned long mask, old;
548 int nested;
549
550 mask = xchg(&pending_mask, *mask_out);
551 if (mask != 0) {
552
553
554
555
556
557
558
559
560
561 old = *mask_out;
562 do {
563 old |= mask;
564 mask = xchg(&pending_mask, old);
565 } while (mask != old);
566 return 1;
567 }
568
569 ti = current_thread_info();
570 nested = (ti->real_thread != NULL);
571 if (!nested) {
572 struct task_struct *task;
573 struct thread_info *tti;
574
575 task = cpu_tasks[ti->cpu].task;
576 tti = task_thread_info(task);
577
578 *ti = *tti;
579 ti->real_thread = tti;
580 task->stack = ti;
581 }
582
583 mask = xchg(&pending_mask, 0);
584 *mask_out |= mask | nested;
585 return 0;
586}
587
588unsigned long from_irq_stack(int nested)
589{
590 struct thread_info *ti, *to;
591 unsigned long mask;
592
593 ti = current_thread_info();
594
595 pending_mask = 1;
596
597 to = ti->real_thread;
598 current->stack = to;
599 ti->real_thread = NULL;
600 *to = *ti;
601
602 mask = xchg(&pending_mask, 0);
603 return mask & ~1;
604}
605
606