linux/arch/um/kernel/irq.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Licensed under the GPL
   4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
   5 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
   6 */
   7
   8#include <linux/cpumask.h>
   9#include <linux/hardirq.h>
  10#include <linux/interrupt.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/module.h>
  13#include <linux/sched.h>
  14#include <linux/seq_file.h>
  15#include <linux/slab.h>
  16#include <as-layout.h>
  17#include <kern_util.h>
  18#include <os.h>
  19
  20/*
  21 * This list is accessed under irq_lock, except in sigio_handler,
  22 * where it is safe from being modified.  IRQ handlers won't change it -
  23 * if an IRQ source has vanished, it will be freed by free_irqs just
  24 * before returning from sigio_handler.  That will process a separate
  25 * list of irqs to free, with its own locking, coming back here to
  26 * remove list elements, taking the irq_lock to do so.
  27 */
  28static struct irq_fd *active_fds = NULL;
  29static struct irq_fd **last_irq_ptr = &active_fds;
  30
  31extern void free_irqs(void);
  32
  33void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
  34{
  35        struct irq_fd *irq_fd;
  36        int n;
  37
  38        while (1) {
  39                n = os_waiting_for_events(active_fds);
  40                if (n <= 0) {
  41                        if (n == -EINTR)
  42                                continue;
  43                        else break;
  44                }
  45
  46                for (irq_fd = active_fds; irq_fd != NULL;
  47                     irq_fd = irq_fd->next) {
  48                        if (irq_fd->current_events != 0) {
  49                                irq_fd->current_events = 0;
  50                                do_IRQ(irq_fd->irq, regs);
  51                        }
  52                }
  53        }
  54
  55        free_irqs();
  56}
  57
  58static DEFINE_SPINLOCK(irq_lock);
  59
  60static int activate_fd(int irq, int fd, int type, void *dev_id)
  61{
  62        struct pollfd *tmp_pfd;
  63        struct irq_fd *new_fd, *irq_fd;
  64        unsigned long flags;
  65        int events, err, n;
  66
  67        err = os_set_fd_async(fd);
  68        if (err < 0)
  69                goto out;
  70
  71        err = -ENOMEM;
  72        new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
  73        if (new_fd == NULL)
  74                goto out;
  75
  76        if (type == IRQ_READ)
  77                events = UM_POLLIN | UM_POLLPRI;
  78        else events = UM_POLLOUT;
  79        *new_fd = ((struct irq_fd) { .next              = NULL,
  80                                     .id                = dev_id,
  81                                     .fd                = fd,
  82                                     .type              = type,
  83                                     .irq               = irq,
  84                                     .events            = events,
  85                                     .current_events    = 0 } );
  86
  87        err = -EBUSY;
  88        spin_lock_irqsave(&irq_lock, flags);
  89        for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
  90                if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
  91                        printk(KERN_ERR "Registering fd %d twice\n", fd);
  92                        printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
  93                        printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
  94                               dev_id);
  95                        goto out_unlock;
  96                }
  97        }
  98
  99        if (type == IRQ_WRITE)
 100                fd = -1;
 101
 102        tmp_pfd = NULL;
 103        n = 0;
 104
 105        while (1) {
 106                n = os_create_pollfd(fd, events, tmp_pfd, n);
 107                if (n == 0)
 108                        break;
 109
 110                /*
 111                 * n > 0
 112                 * It means we couldn't put new pollfd to current pollfds
 113                 * and tmp_fds is NULL or too small for new pollfds array.
 114                 * Needed size is equal to n as minimum.
 115                 *
 116                 * Here we have to drop the lock in order to call
 117                 * kmalloc, which might sleep.
 118                 * If something else came in and changed the pollfds array
 119                 * so we will not be able to put new pollfd struct to pollfds
 120                 * then we free the buffer tmp_fds and try again.
 121                 */
 122                spin_unlock_irqrestore(&irq_lock, flags);
 123                kfree(tmp_pfd);
 124
 125                tmp_pfd = kmalloc(n, GFP_KERNEL);
 126                if (tmp_pfd == NULL)
 127                        goto out_kfree;
 128
 129                spin_lock_irqsave(&irq_lock, flags);
 130        }
 131
 132        *last_irq_ptr = new_fd;
 133        last_irq_ptr = &new_fd->next;
 134
 135        spin_unlock_irqrestore(&irq_lock, flags);
 136
 137        /*
 138         * This calls activate_fd, so it has to be outside the critical
 139         * section.
 140         */
 141        maybe_sigio_broken(fd, (type == IRQ_READ));
 142
 143        return 0;
 144
 145 out_unlock:
 146        spin_unlock_irqrestore(&irq_lock, flags);
 147 out_kfree:
 148        kfree(new_fd);
 149 out:
 150        return err;
 151}
 152
 153static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
 154{
 155        unsigned long flags;
 156
 157        spin_lock_irqsave(&irq_lock, flags);
 158        os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
 159        spin_unlock_irqrestore(&irq_lock, flags);
 160}
 161
 162struct irq_and_dev {
 163        int irq;
 164        void *dev;
 165};
 166
 167static int same_irq_and_dev(struct irq_fd *irq, void *d)
 168{
 169        struct irq_and_dev *data = d;
 170
 171        return ((irq->irq == data->irq) && (irq->id == data->dev));
 172}
 173
 174static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
 175{
 176        struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
 177                                                          .dev  = dev });
 178
 179        free_irq_by_cb(same_irq_and_dev, &data);
 180}
 181
 182static int same_fd(struct irq_fd *irq, void *fd)
 183{
 184        return (irq->fd == *((int *)fd));
 185}
 186
 187void free_irq_by_fd(int fd)
 188{
 189        free_irq_by_cb(same_fd, &fd);
 190}
 191
 192/* Must be called with irq_lock held */
 193static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
 194{
 195        struct irq_fd *irq;
 196        int i = 0;
 197        int fdi;
 198
 199        for (irq = active_fds; irq != NULL; irq = irq->next) {
 200                if ((irq->fd == fd) && (irq->irq == irqnum))
 201                        break;
 202                i++;
 203        }
 204        if (irq == NULL) {
 205                printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
 206                       fd);
 207                goto out;
 208        }
 209        fdi = os_get_pollfd(i);
 210        if ((fdi != -1) && (fdi != fd)) {
 211                printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
 212                       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
 213                       fdi, fd);
 214                irq = NULL;
 215                goto out;
 216        }
 217        *index_out = i;
 218 out:
 219        return irq;
 220}
 221
 222void reactivate_fd(int fd, int irqnum)
 223{
 224        struct irq_fd *irq;
 225        unsigned long flags;
 226        int i;
 227
 228        spin_lock_irqsave(&irq_lock, flags);
 229        irq = find_irq_by_fd(fd, irqnum, &i);
 230        if (irq == NULL) {
 231                spin_unlock_irqrestore(&irq_lock, flags);
 232                return;
 233        }
 234        os_set_pollfd(i, irq->fd);
 235        spin_unlock_irqrestore(&irq_lock, flags);
 236
 237        add_sigio_fd(fd);
 238}
 239
 240void deactivate_fd(int fd, int irqnum)
 241{
 242        struct irq_fd *irq;
 243        unsigned long flags;
 244        int i;
 245
 246        spin_lock_irqsave(&irq_lock, flags);
 247        irq = find_irq_by_fd(fd, irqnum, &i);
 248        if (irq == NULL) {
 249                spin_unlock_irqrestore(&irq_lock, flags);
 250                return;
 251        }
 252
 253        os_set_pollfd(i, -1);
 254        spin_unlock_irqrestore(&irq_lock, flags);
 255
 256        ignore_sigio_fd(fd);
 257}
 258EXPORT_SYMBOL(deactivate_fd);
 259
 260/*
 261 * Called just before shutdown in order to provide a clean exec
 262 * environment in case the system is rebooting.  No locking because
 263 * that would cause a pointless shutdown hang if something hadn't
 264 * released the lock.
 265 */
 266int deactivate_all_fds(void)
 267{
 268        struct irq_fd *irq;
 269        int err;
 270
 271        for (irq = active_fds; irq != NULL; irq = irq->next) {
 272                err = os_clear_fd_async(irq->fd);
 273                if (err)
 274                        return err;
 275        }
 276        /* If there is a signal already queued, after unblocking ignore it */
 277        os_set_ioignore();
 278
 279        return 0;
 280}
 281
 282/*
 283 * do_IRQ handles all normal device IRQs (the special
 284 * SMP cross-CPU interrupts have their own specific
 285 * handlers).
 286 */
 287unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
 288{
 289        struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
 290        irq_enter();
 291        generic_handle_irq(irq);
 292        irq_exit();
 293        set_irq_regs(old_regs);
 294        return 1;
 295}
 296
 297void um_free_irq(unsigned int irq, void *dev)
 298{
 299        free_irq_by_irq_and_dev(irq, dev);
 300        free_irq(irq, dev);
 301}
 302EXPORT_SYMBOL(um_free_irq);
 303
 304int um_request_irq(unsigned int irq, int fd, int type,
 305                   irq_handler_t handler,
 306                   unsigned long irqflags, const char * devname,
 307                   void *dev_id)
 308{
 309        int err;
 310
 311        if (fd != -1) {
 312                err = activate_fd(irq, fd, type, dev_id);
 313                if (err)
 314                        return err;
 315        }
 316
 317        return request_irq(irq, handler, irqflags, devname, dev_id);
 318}
 319
 320EXPORT_SYMBOL(um_request_irq);
 321EXPORT_SYMBOL(reactivate_fd);
 322
 323/*
 324 * irq_chip must define at least enable/disable and ack when
 325 * the edge handler is used.
 326 */
 327static void dummy(struct irq_data *d)
 328{
 329}
 330
 331/* This is used for everything else than the timer. */
 332static struct irq_chip normal_irq_type = {
 333        .name = "SIGIO",
 334        .irq_disable = dummy,
 335        .irq_enable = dummy,
 336        .irq_ack = dummy,
 337        .irq_mask = dummy,
 338        .irq_unmask = dummy,
 339};
 340
 341static struct irq_chip SIGVTALRM_irq_type = {
 342        .name = "SIGVTALRM",
 343        .irq_disable = dummy,
 344        .irq_enable = dummy,
 345        .irq_ack = dummy,
 346        .irq_mask = dummy,
 347        .irq_unmask = dummy,
 348};
 349
 350void __init init_IRQ(void)
 351{
 352        int i;
 353
 354        irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
 355
 356        for (i = 1; i < NR_IRQS; i++)
 357                irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
 358}
 359
 360/*
 361 * IRQ stack entry and exit:
 362 *
 363 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
 364 * and switch over to the IRQ stack after some preparation.  We use
 365 * sigaltstack to receive signals on a separate stack from the start.
 366 * These two functions make sure the rest of the kernel won't be too
 367 * upset by being on a different stack.  The IRQ stack has a
 368 * thread_info structure at the bottom so that current et al continue
 369 * to work.
 370 *
 371 * to_irq_stack copies the current task's thread_info to the IRQ stack
 372 * thread_info and sets the tasks's stack to point to the IRQ stack.
 373 *
 374 * from_irq_stack copies the thread_info struct back (flags may have
 375 * been modified) and resets the task's stack pointer.
 376 *
 377 * Tricky bits -
 378 *
 379 * What happens when two signals race each other?  UML doesn't block
 380 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
 381 * could arrive while a previous one is still setting up the
 382 * thread_info.
 383 *
 384 * There are three cases -
 385 *     The first interrupt on the stack - sets up the thread_info and
 386 * handles the interrupt
 387 *     A nested interrupt interrupting the copying of the thread_info -
 388 * can't handle the interrupt, as the stack is in an unknown state
 389 *     A nested interrupt not interrupting the copying of the
 390 * thread_info - doesn't do any setup, just handles the interrupt
 391 *
 392 * The first job is to figure out whether we interrupted stack setup.
 393 * This is done by xchging the signal mask with thread_info->pending.
 394 * If the value that comes back is zero, then there is no setup in
 395 * progress, and the interrupt can be handled.  If the value is
 396 * non-zero, then there is stack setup in progress.  In order to have
 397 * the interrupt handled, we leave our signal in the mask, and it will
 398 * be handled by the upper handler after it has set up the stack.
 399 *
 400 * Next is to figure out whether we are the outer handler or a nested
 401 * one.  As part of setting up the stack, thread_info->real_thread is
 402 * set to non-NULL (and is reset to NULL on exit).  This is the
 403 * nesting indicator.  If it is non-NULL, then the stack is already
 404 * set up and the handler can run.
 405 */
 406
 407static unsigned long pending_mask;
 408
 409unsigned long to_irq_stack(unsigned long *mask_out)
 410{
 411        struct thread_info *ti;
 412        unsigned long mask, old;
 413        int nested;
 414
 415        mask = xchg(&pending_mask, *mask_out);
 416        if (mask != 0) {
 417                /*
 418                 * If any interrupts come in at this point, we want to
 419                 * make sure that their bits aren't lost by our
 420                 * putting our bit in.  So, this loop accumulates bits
 421                 * until xchg returns the same value that we put in.
 422                 * When that happens, there were no new interrupts,
 423                 * and pending_mask contains a bit for each interrupt
 424                 * that came in.
 425                 */
 426                old = *mask_out;
 427                do {
 428                        old |= mask;
 429                        mask = xchg(&pending_mask, old);
 430                } while (mask != old);
 431                return 1;
 432        }
 433
 434        ti = current_thread_info();
 435        nested = (ti->real_thread != NULL);
 436        if (!nested) {
 437                struct task_struct *task;
 438                struct thread_info *tti;
 439
 440                task = cpu_tasks[ti->cpu].task;
 441                tti = task_thread_info(task);
 442
 443                *ti = *tti;
 444                ti->real_thread = tti;
 445                task->stack = ti;
 446        }
 447
 448        mask = xchg(&pending_mask, 0);
 449        *mask_out |= mask | nested;
 450        return 0;
 451}
 452
 453unsigned long from_irq_stack(int nested)
 454{
 455        struct thread_info *ti, *to;
 456        unsigned long mask;
 457
 458        ti = current_thread_info();
 459
 460        pending_mask = 1;
 461
 462        to = ti->real_thread;
 463        current->stack = to;
 464        ti->real_thread = NULL;
 465        *to = *ti;
 466
 467        mask = xchg(&pending_mask, 0);
 468        return mask & ~1;
 469}
 470
 471