1
2
3
4
5
6
7
8#include <linux/cpumask.h>
9#include <linux/hardirq.h>
10#include <linux/interrupt.h>
11#include <linux/kernel_stat.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <as-layout.h>
17#include <kern_util.h>
18#include <os.h>
19
20
21
22
23
24
25
26
27
28static struct irq_fd *active_fds = NULL;
29static struct irq_fd **last_irq_ptr = &active_fds;
30
31extern void free_irqs(void);
32
33void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
34{
35 struct irq_fd *irq_fd;
36 int n;
37
38 while (1) {
39 n = os_waiting_for_events(active_fds);
40 if (n <= 0) {
41 if (n == -EINTR)
42 continue;
43 else break;
44 }
45
46 for (irq_fd = active_fds; irq_fd != NULL;
47 irq_fd = irq_fd->next) {
48 if (irq_fd->current_events != 0) {
49 irq_fd->current_events = 0;
50 do_IRQ(irq_fd->irq, regs);
51 }
52 }
53 }
54
55 free_irqs();
56}
57
58static DEFINE_SPINLOCK(irq_lock);
59
60static int activate_fd(int irq, int fd, int type, void *dev_id)
61{
62 struct pollfd *tmp_pfd;
63 struct irq_fd *new_fd, *irq_fd;
64 unsigned long flags;
65 int events, err, n;
66
67 err = os_set_fd_async(fd);
68 if (err < 0)
69 goto out;
70
71 err = -ENOMEM;
72 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
73 if (new_fd == NULL)
74 goto out;
75
76 if (type == IRQ_READ)
77 events = UM_POLLIN | UM_POLLPRI;
78 else events = UM_POLLOUT;
79 *new_fd = ((struct irq_fd) { .next = NULL,
80 .id = dev_id,
81 .fd = fd,
82 .type = type,
83 .irq = irq,
84 .events = events,
85 .current_events = 0 } );
86
87 err = -EBUSY;
88 spin_lock_irqsave(&irq_lock, flags);
89 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
90 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
91 printk(KERN_ERR "Registering fd %d twice\n", fd);
92 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
93 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
94 dev_id);
95 goto out_unlock;
96 }
97 }
98
99 if (type == IRQ_WRITE)
100 fd = -1;
101
102 tmp_pfd = NULL;
103 n = 0;
104
105 while (1) {
106 n = os_create_pollfd(fd, events, tmp_pfd, n);
107 if (n == 0)
108 break;
109
110
111
112
113
114
115
116
117
118
119
120
121
122 spin_unlock_irqrestore(&irq_lock, flags);
123 kfree(tmp_pfd);
124
125 tmp_pfd = kmalloc(n, GFP_KERNEL);
126 if (tmp_pfd == NULL)
127 goto out_kfree;
128
129 spin_lock_irqsave(&irq_lock, flags);
130 }
131
132 *last_irq_ptr = new_fd;
133 last_irq_ptr = &new_fd->next;
134
135 spin_unlock_irqrestore(&irq_lock, flags);
136
137
138
139
140
141 maybe_sigio_broken(fd, (type == IRQ_READ));
142
143 return 0;
144
145 out_unlock:
146 spin_unlock_irqrestore(&irq_lock, flags);
147 out_kfree:
148 kfree(new_fd);
149 out:
150 return err;
151}
152
153static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
154{
155 unsigned long flags;
156
157 spin_lock_irqsave(&irq_lock, flags);
158 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
159 spin_unlock_irqrestore(&irq_lock, flags);
160}
161
162struct irq_and_dev {
163 int irq;
164 void *dev;
165};
166
167static int same_irq_and_dev(struct irq_fd *irq, void *d)
168{
169 struct irq_and_dev *data = d;
170
171 return ((irq->irq == data->irq) && (irq->id == data->dev));
172}
173
174static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
175{
176 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
177 .dev = dev });
178
179 free_irq_by_cb(same_irq_and_dev, &data);
180}
181
182static int same_fd(struct irq_fd *irq, void *fd)
183{
184 return (irq->fd == *((int *)fd));
185}
186
187void free_irq_by_fd(int fd)
188{
189 free_irq_by_cb(same_fd, &fd);
190}
191
192
193static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
194{
195 struct irq_fd *irq;
196 int i = 0;
197 int fdi;
198
199 for (irq = active_fds; irq != NULL; irq = irq->next) {
200 if ((irq->fd == fd) && (irq->irq == irqnum))
201 break;
202 i++;
203 }
204 if (irq == NULL) {
205 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
206 fd);
207 goto out;
208 }
209 fdi = os_get_pollfd(i);
210 if ((fdi != -1) && (fdi != fd)) {
211 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
212 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
213 fdi, fd);
214 irq = NULL;
215 goto out;
216 }
217 *index_out = i;
218 out:
219 return irq;
220}
221
222void reactivate_fd(int fd, int irqnum)
223{
224 struct irq_fd *irq;
225 unsigned long flags;
226 int i;
227
228 spin_lock_irqsave(&irq_lock, flags);
229 irq = find_irq_by_fd(fd, irqnum, &i);
230 if (irq == NULL) {
231 spin_unlock_irqrestore(&irq_lock, flags);
232 return;
233 }
234 os_set_pollfd(i, irq->fd);
235 spin_unlock_irqrestore(&irq_lock, flags);
236
237 add_sigio_fd(fd);
238}
239
240void deactivate_fd(int fd, int irqnum)
241{
242 struct irq_fd *irq;
243 unsigned long flags;
244 int i;
245
246 spin_lock_irqsave(&irq_lock, flags);
247 irq = find_irq_by_fd(fd, irqnum, &i);
248 if (irq == NULL) {
249 spin_unlock_irqrestore(&irq_lock, flags);
250 return;
251 }
252
253 os_set_pollfd(i, -1);
254 spin_unlock_irqrestore(&irq_lock, flags);
255
256 ignore_sigio_fd(fd);
257}
258EXPORT_SYMBOL(deactivate_fd);
259
260
261
262
263
264
265
266int deactivate_all_fds(void)
267{
268 struct irq_fd *irq;
269 int err;
270
271 for (irq = active_fds; irq != NULL; irq = irq->next) {
272 err = os_clear_fd_async(irq->fd);
273 if (err)
274 return err;
275 }
276
277 os_set_ioignore();
278
279 return 0;
280}
281
282
283
284
285
286
287unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
288{
289 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
290 irq_enter();
291 generic_handle_irq(irq);
292 irq_exit();
293 set_irq_regs(old_regs);
294 return 1;
295}
296
297void um_free_irq(unsigned int irq, void *dev)
298{
299 free_irq_by_irq_and_dev(irq, dev);
300 free_irq(irq, dev);
301}
302EXPORT_SYMBOL(um_free_irq);
303
304int um_request_irq(unsigned int irq, int fd, int type,
305 irq_handler_t handler,
306 unsigned long irqflags, const char * devname,
307 void *dev_id)
308{
309 int err;
310
311 if (fd != -1) {
312 err = activate_fd(irq, fd, type, dev_id);
313 if (err)
314 return err;
315 }
316
317 return request_irq(irq, handler, irqflags, devname, dev_id);
318}
319
320EXPORT_SYMBOL(um_request_irq);
321EXPORT_SYMBOL(reactivate_fd);
322
323
324
325
326
327static void dummy(struct irq_data *d)
328{
329}
330
331
332static struct irq_chip normal_irq_type = {
333 .name = "SIGIO",
334 .irq_disable = dummy,
335 .irq_enable = dummy,
336 .irq_ack = dummy,
337 .irq_mask = dummy,
338 .irq_unmask = dummy,
339};
340
341static struct irq_chip SIGVTALRM_irq_type = {
342 .name = "SIGVTALRM",
343 .irq_disable = dummy,
344 .irq_enable = dummy,
345 .irq_ack = dummy,
346 .irq_mask = dummy,
347 .irq_unmask = dummy,
348};
349
350void __init init_IRQ(void)
351{
352 int i;
353
354 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
355
356 for (i = 1; i < NR_IRQS; i++)
357 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407static unsigned long pending_mask;
408
409unsigned long to_irq_stack(unsigned long *mask_out)
410{
411 struct thread_info *ti;
412 unsigned long mask, old;
413 int nested;
414
415 mask = xchg(&pending_mask, *mask_out);
416 if (mask != 0) {
417
418
419
420
421
422
423
424
425
426 old = *mask_out;
427 do {
428 old |= mask;
429 mask = xchg(&pending_mask, old);
430 } while (mask != old);
431 return 1;
432 }
433
434 ti = current_thread_info();
435 nested = (ti->real_thread != NULL);
436 if (!nested) {
437 struct task_struct *task;
438 struct thread_info *tti;
439
440 task = cpu_tasks[ti->cpu].task;
441 tti = task_thread_info(task);
442
443 *ti = *tti;
444 ti->real_thread = tti;
445 task->stack = ti;
446 }
447
448 mask = xchg(&pending_mask, 0);
449 *mask_out |= mask | nested;
450 return 0;
451}
452
453unsigned long from_irq_stack(int nested)
454{
455 struct thread_info *ti, *to;
456 unsigned long mask;
457
458 ti = current_thread_info();
459
460 pending_mask = 1;
461
462 to = ti->real_thread;
463 current->stack = to;
464 ti->real_thread = NULL;
465 *to = *ti;
466
467 mask = xchg(&pending_mask, 0);
468 return mask & ~1;
469}
470
471