1
2
3
4
5
6
7
8#include "linux/cpumask.h"
9#include "linux/hardirq.h"
10#include "linux/interrupt.h"
11#include "linux/kernel_stat.h"
12#include "linux/module.h"
13#include "linux/sched.h"
14#include "linux/seq_file.h"
15#include "linux/slab.h"
16#include "as-layout.h"
17#include "kern_util.h"
18#include "os.h"
19
20
21
22
23
24
25
26
27
28static struct irq_fd *active_fds = NULL;
29static struct irq_fd **last_irq_ptr = &active_fds;
30
31extern void free_irqs(void);
32
33void sigio_handler(int sig, struct uml_pt_regs *regs)
34{
35 struct irq_fd *irq_fd;
36 int n;
37
38 if (smp_sigio_handler())
39 return;
40
41 while (1) {
42 n = os_waiting_for_events(active_fds);
43 if (n <= 0) {
44 if (n == -EINTR)
45 continue;
46 else break;
47 }
48
49 for (irq_fd = active_fds; irq_fd != NULL;
50 irq_fd = irq_fd->next) {
51 if (irq_fd->current_events != 0) {
52 irq_fd->current_events = 0;
53 do_IRQ(irq_fd->irq, regs);
54 }
55 }
56 }
57
58 free_irqs();
59}
60
61static DEFINE_SPINLOCK(irq_lock);
62
63static int activate_fd(int irq, int fd, int type, void *dev_id)
64{
65 struct pollfd *tmp_pfd;
66 struct irq_fd *new_fd, *irq_fd;
67 unsigned long flags;
68 int events, err, n;
69
70 err = os_set_fd_async(fd);
71 if (err < 0)
72 goto out;
73
74 err = -ENOMEM;
75 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
76 if (new_fd == NULL)
77 goto out;
78
79 if (type == IRQ_READ)
80 events = UM_POLLIN | UM_POLLPRI;
81 else events = UM_POLLOUT;
82 *new_fd = ((struct irq_fd) { .next = NULL,
83 .id = dev_id,
84 .fd = fd,
85 .type = type,
86 .irq = irq,
87 .events = events,
88 .current_events = 0 } );
89
90 err = -EBUSY;
91 spin_lock_irqsave(&irq_lock, flags);
92 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
93 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
94 printk(KERN_ERR "Registering fd %d twice\n", fd);
95 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
96 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
97 dev_id);
98 goto out_unlock;
99 }
100 }
101
102 if (type == IRQ_WRITE)
103 fd = -1;
104
105 tmp_pfd = NULL;
106 n = 0;
107
108 while (1) {
109 n = os_create_pollfd(fd, events, tmp_pfd, n);
110 if (n == 0)
111 break;
112
113
114
115
116
117
118
119
120
121
122
123
124
125 spin_unlock_irqrestore(&irq_lock, flags);
126 kfree(tmp_pfd);
127
128 tmp_pfd = kmalloc(n, GFP_KERNEL);
129 if (tmp_pfd == NULL)
130 goto out_kfree;
131
132 spin_lock_irqsave(&irq_lock, flags);
133 }
134
135 *last_irq_ptr = new_fd;
136 last_irq_ptr = &new_fd->next;
137
138 spin_unlock_irqrestore(&irq_lock, flags);
139
140
141
142
143
144 maybe_sigio_broken(fd, (type == IRQ_READ));
145
146 return 0;
147
148 out_unlock:
149 spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151 kfree(new_fd);
152 out:
153 return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
157{
158 unsigned long flags;
159
160 spin_lock_irqsave(&irq_lock, flags);
161 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162 spin_unlock_irqrestore(&irq_lock, flags);
163}
164
165struct irq_and_dev {
166 int irq;
167 void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172 struct irq_and_dev *data = d;
173
174 return ((irq->irq == data->irq) && (irq->id == data->dev));
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
180 .dev = dev });
181
182 free_irq_by_cb(same_irq_and_dev, &data);
183}
184
185static int same_fd(struct irq_fd *irq, void *fd)
186{
187 return (irq->fd == *((int *)fd));
188}
189
190void free_irq_by_fd(int fd)
191{
192 free_irq_by_cb(same_fd, &fd);
193}
194
195
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
197{
198 struct irq_fd *irq;
199 int i = 0;
200 int fdi;
201
202 for (irq = active_fds; irq != NULL; irq = irq->next) {
203 if ((irq->fd == fd) && (irq->irq == irqnum))
204 break;
205 i++;
206 }
207 if (irq == NULL) {
208 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209 fd);
210 goto out;
211 }
212 fdi = os_get_pollfd(i);
213 if ((fdi != -1) && (fdi != fd)) {
214 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216 fdi, fd);
217 irq = NULL;
218 goto out;
219 }
220 *index_out = i;
221 out:
222 return irq;
223}
224
225void reactivate_fd(int fd, int irqnum)
226{
227 struct irq_fd *irq;
228 unsigned long flags;
229 int i;
230
231 spin_lock_irqsave(&irq_lock, flags);
232 irq = find_irq_by_fd(fd, irqnum, &i);
233 if (irq == NULL) {
234 spin_unlock_irqrestore(&irq_lock, flags);
235 return;
236 }
237 os_set_pollfd(i, irq->fd);
238 spin_unlock_irqrestore(&irq_lock, flags);
239
240 add_sigio_fd(fd);
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245 struct irq_fd *irq;
246 unsigned long flags;
247 int i;
248
249 spin_lock_irqsave(&irq_lock, flags);
250 irq = find_irq_by_fd(fd, irqnum, &i);
251 if (irq == NULL) {
252 spin_unlock_irqrestore(&irq_lock, flags);
253 return;
254 }
255
256 os_set_pollfd(i, -1);
257 spin_unlock_irqrestore(&irq_lock, flags);
258
259 ignore_sigio_fd(fd);
260}
261EXPORT_SYMBOL(deactivate_fd);
262
263
264
265
266
267
268
269int deactivate_all_fds(void)
270{
271 struct irq_fd *irq;
272 int err;
273
274 for (irq = active_fds; irq != NULL; irq = irq->next) {
275 err = os_clear_fd_async(irq->fd);
276 if (err)
277 return err;
278 }
279
280 os_set_ioignore();
281
282 return 0;
283}
284
285
286
287
288
289
290unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
291{
292 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
293 irq_enter();
294 generic_handle_irq(irq);
295 irq_exit();
296 set_irq_regs(old_regs);
297 return 1;
298}
299
300int um_request_irq(unsigned int irq, int fd, int type,
301 irq_handler_t handler,
302 unsigned long irqflags, const char * devname,
303 void *dev_id)
304{
305 int err;
306
307 if (fd != -1) {
308 err = activate_fd(irq, fd, type, dev_id);
309 if (err)
310 return err;
311 }
312
313 return request_irq(irq, handler, irqflags, devname, dev_id);
314}
315
316EXPORT_SYMBOL(um_request_irq);
317EXPORT_SYMBOL(reactivate_fd);
318
319
320
321
322
323static void dummy(struct irq_data *d)
324{
325}
326
327
328static struct irq_chip normal_irq_type = {
329 .name = "SIGIO",
330 .release = free_irq_by_irq_and_dev,
331 .irq_disable = dummy,
332 .irq_enable = dummy,
333 .irq_ack = dummy,
334};
335
336static struct irq_chip SIGVTALRM_irq_type = {
337 .name = "SIGVTALRM",
338 .release = free_irq_by_irq_and_dev,
339 .irq_disable = dummy,
340 .irq_enable = dummy,
341 .irq_ack = dummy,
342};
343
344void __init init_IRQ(void)
345{
346 int i;
347
348 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
349
350 for (i = 1; i < NR_IRQS; i++)
351 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401static unsigned long pending_mask;
402
403unsigned long to_irq_stack(unsigned long *mask_out)
404{
405 struct thread_info *ti;
406 unsigned long mask, old;
407 int nested;
408
409 mask = xchg(&pending_mask, *mask_out);
410 if (mask != 0) {
411
412
413
414
415
416
417
418
419
420 old = *mask_out;
421 do {
422 old |= mask;
423 mask = xchg(&pending_mask, old);
424 } while (mask != old);
425 return 1;
426 }
427
428 ti = current_thread_info();
429 nested = (ti->real_thread != NULL);
430 if (!nested) {
431 struct task_struct *task;
432 struct thread_info *tti;
433
434 task = cpu_tasks[ti->cpu].task;
435 tti = task_thread_info(task);
436
437 *ti = *tti;
438 ti->real_thread = tti;
439 task->stack = ti;
440 }
441
442 mask = xchg(&pending_mask, 0);
443 *mask_out |= mask | nested;
444 return 0;
445}
446
447unsigned long from_irq_stack(int nested)
448{
449 struct thread_info *ti, *to;
450 unsigned long mask;
451
452 ti = current_thread_info();
453
454 pending_mask = 1;
455
456 to = ti->real_thread;
457 current->stack = to;
458 ti->real_thread = NULL;
459 *to = *ti;
460
461 mask = xchg(&pending_mask, 0);
462 return mask & ~1;
463}
464
465