linux/kernel/irq/manage.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#include <linux/irq.h>
  11#include <linux/kthread.h>
  12#include <linux/module.h>
  13#include <linux/random.h>
  14#include <linux/interrupt.h>
  15#include <linux/slab.h>
  16#include <linux/sched.h>
  17
  18#include "internals.h"
  19
  20#ifdef CONFIG_IRQ_FORCED_THREADING
  21__read_mostly bool force_irqthreads;
  22
  23static int __init setup_forced_irqthreads(char *arg)
  24{
  25        force_irqthreads = true;
  26        return 0;
  27}
  28early_param("threadirqs", setup_forced_irqthreads);
  29#endif
  30
  31/**
  32 *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  33 *      @irq: interrupt number to wait for
  34 *
  35 *      This function waits for any pending IRQ handlers for this interrupt
  36 *      to complete before returning. If you use this function while
  37 *      holding a resource the IRQ handler may need you will deadlock.
  38 *
  39 *      This function may be called - with care - from IRQ context.
  40 */
  41void synchronize_irq(unsigned int irq)
  42{
  43        struct irq_desc *desc = irq_to_desc(irq);
  44        bool inprogress;
  45
  46        if (!desc)
  47                return;
  48
  49        do {
  50                unsigned long flags;
  51
  52                /*
  53                 * Wait until we're out of the critical section.  This might
  54                 * give the wrong answer due to the lack of memory barriers.
  55                 */
  56                while (irqd_irq_inprogress(&desc->irq_data))
  57                        cpu_relax();
  58
  59                /* Ok, that indicated we're done: double-check carefully. */
  60                raw_spin_lock_irqsave(&desc->lock, flags);
  61                inprogress = irqd_irq_inprogress(&desc->irq_data);
  62                raw_spin_unlock_irqrestore(&desc->lock, flags);
  63
  64                /* Oops, that failed? */
  65        } while (inprogress);
  66
  67        /*
  68         * We made sure that no hardirq handler is running. Now verify
  69         * that no threaded handlers are active.
  70         */
  71        wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  72}
  73EXPORT_SYMBOL(synchronize_irq);
  74
  75#ifdef CONFIG_SMP
  76cpumask_var_t irq_default_affinity;
  77
  78/**
  79 *      irq_can_set_affinity - Check if the affinity of a given irq can be set
  80 *      @irq:           Interrupt to check
  81 *
  82 */
  83int irq_can_set_affinity(unsigned int irq)
  84{
  85        struct irq_desc *desc = irq_to_desc(irq);
  86
  87        if (!desc || !irqd_can_balance(&desc->irq_data) ||
  88            !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  89                return 0;
  90
  91        return 1;
  92}
  93
  94/**
  95 *      irq_set_thread_affinity - Notify irq threads to adjust affinity
  96 *      @desc:          irq descriptor which has affitnity changed
  97 *
  98 *      We just set IRQTF_AFFINITY and delegate the affinity setting
  99 *      to the interrupt thread itself. We can not call
 100 *      set_cpus_allowed_ptr() here as we hold desc->lock and this
 101 *      code can be called from hard interrupt context.
 102 */
 103void irq_set_thread_affinity(struct irq_desc *desc)
 104{
 105        struct irqaction *action = desc->action;
 106
 107        while (action) {
 108                if (action->thread)
 109                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 110                action = action->next;
 111        }
 112}
 113
 114#ifdef CONFIG_GENERIC_PENDING_IRQ
 115static inline bool irq_can_move_pcntxt(struct irq_data *data)
 116{
 117        return irqd_can_move_in_process_context(data);
 118}
 119static inline bool irq_move_pending(struct irq_data *data)
 120{
 121        return irqd_is_setaffinity_pending(data);
 122}
 123static inline void
 124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 125{
 126        cpumask_copy(desc->pending_mask, mask);
 127}
 128static inline void
 129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 130{
 131        cpumask_copy(mask, desc->pending_mask);
 132}
 133#else
 134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 135static inline bool irq_move_pending(struct irq_data *data) { return false; }
 136static inline void
 137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 138static inline void
 139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 140#endif
 141
 142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 143{
 144        struct irq_chip *chip = irq_data_get_irq_chip(data);
 145        struct irq_desc *desc = irq_data_to_desc(data);
 146        int ret = 0;
 147
 148        if (!chip || !chip->irq_set_affinity)
 149                return -EINVAL;
 150
 151        if (irq_can_move_pcntxt(data)) {
 152                ret = chip->irq_set_affinity(data, mask, false);
 153                switch (ret) {
 154                case IRQ_SET_MASK_OK:
 155                        cpumask_copy(data->affinity, mask);
 156                case IRQ_SET_MASK_OK_NOCOPY:
 157                        irq_set_thread_affinity(desc);
 158                        ret = 0;
 159                }
 160        } else {
 161                irqd_set_move_pending(data);
 162                irq_copy_pending(desc, mask);
 163        }
 164
 165        if (desc->affinity_notify) {
 166                kref_get(&desc->affinity_notify->kref);
 167                schedule_work(&desc->affinity_notify->work);
 168        }
 169        irqd_set(data, IRQD_AFFINITY_SET);
 170
 171        return ret;
 172}
 173
 174/**
 175 *      irq_set_affinity - Set the irq affinity of a given irq
 176 *      @irq:           Interrupt to set affinity
 177 *      @mask:          cpumask
 178 *
 179 */
 180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 181{
 182        struct irq_desc *desc = irq_to_desc(irq);
 183        unsigned long flags;
 184        int ret;
 185
 186        if (!desc)
 187                return -EINVAL;
 188
 189        raw_spin_lock_irqsave(&desc->lock, flags);
 190        ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
 191        raw_spin_unlock_irqrestore(&desc->lock, flags);
 192        return ret;
 193}
 194
 195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 196{
 197        unsigned long flags;
 198        struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 199
 200        if (!desc)
 201                return -EINVAL;
 202        desc->affinity_hint = m;
 203        irq_put_desc_unlock(desc, flags);
 204        return 0;
 205}
 206EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 207
 208static void irq_affinity_notify(struct work_struct *work)
 209{
 210        struct irq_affinity_notify *notify =
 211                container_of(work, struct irq_affinity_notify, work);
 212        struct irq_desc *desc = irq_to_desc(notify->irq);
 213        cpumask_var_t cpumask;
 214        unsigned long flags;
 215
 216        if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 217                goto out;
 218
 219        raw_spin_lock_irqsave(&desc->lock, flags);
 220        if (irq_move_pending(&desc->irq_data))
 221                irq_get_pending(cpumask, desc);
 222        else
 223                cpumask_copy(cpumask, desc->irq_data.affinity);
 224        raw_spin_unlock_irqrestore(&desc->lock, flags);
 225
 226        notify->notify(notify, cpumask);
 227
 228        free_cpumask_var(cpumask);
 229out:
 230        kref_put(&notify->kref, notify->release);
 231}
 232
 233/**
 234 *      irq_set_affinity_notifier - control notification of IRQ affinity changes
 235 *      @irq:           Interrupt for which to enable/disable notification
 236 *      @notify:        Context for notification, or %NULL to disable
 237 *                      notification.  Function pointers must be initialised;
 238 *                      the other fields will be initialised by this function.
 239 *
 240 *      Must be called in process context.  Notification may only be enabled
 241 *      after the IRQ is allocated and must be disabled before the IRQ is
 242 *      freed using free_irq().
 243 */
 244int
 245irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 246{
 247        struct irq_desc *desc = irq_to_desc(irq);
 248        struct irq_affinity_notify *old_notify;
 249        unsigned long flags;
 250
 251        /* The release function is promised process context */
 252        might_sleep();
 253
 254        if (!desc)
 255                return -EINVAL;
 256
 257        /* Complete initialisation of *notify */
 258        if (notify) {
 259                notify->irq = irq;
 260                kref_init(&notify->kref);
 261                INIT_WORK(&notify->work, irq_affinity_notify);
 262        }
 263
 264        raw_spin_lock_irqsave(&desc->lock, flags);
 265        old_notify = desc->affinity_notify;
 266        desc->affinity_notify = notify;
 267        raw_spin_unlock_irqrestore(&desc->lock, flags);
 268
 269        if (old_notify)
 270                kref_put(&old_notify->kref, old_notify->release);
 271
 272        return 0;
 273}
 274EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 275
 276#ifndef CONFIG_AUTO_IRQ_AFFINITY
 277/*
 278 * Generic version of the affinity autoselector.
 279 */
 280static int
 281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 282{
 283        struct irq_chip *chip = irq_desc_get_chip(desc);
 284        struct cpumask *set = irq_default_affinity;
 285        int ret;
 286
 287        /* Excludes PER_CPU and NO_BALANCE interrupts */
 288        if (!irq_can_set_affinity(irq))
 289                return 0;
 290
 291        /*
 292         * Preserve an userspace affinity setup, but make sure that
 293         * one of the targets is online.
 294         */
 295        if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 296                if (cpumask_intersects(desc->irq_data.affinity,
 297                                       cpu_online_mask))
 298                        set = desc->irq_data.affinity;
 299                else
 300                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 301        }
 302
 303        cpumask_and(mask, cpu_online_mask, set);
 304        ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
 305        switch (ret) {
 306        case IRQ_SET_MASK_OK:
 307                cpumask_copy(desc->irq_data.affinity, mask);
 308        case IRQ_SET_MASK_OK_NOCOPY:
 309                irq_set_thread_affinity(desc);
 310        }
 311        return 0;
 312}
 313#else
 314static inline int
 315setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 316{
 317        return irq_select_affinity(irq);
 318}
 319#endif
 320
 321/*
 322 * Called when affinity is set via /proc/irq
 323 */
 324int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 325{
 326        struct irq_desc *desc = irq_to_desc(irq);
 327        unsigned long flags;
 328        int ret;
 329
 330        raw_spin_lock_irqsave(&desc->lock, flags);
 331        ret = setup_affinity(irq, desc, mask);
 332        raw_spin_unlock_irqrestore(&desc->lock, flags);
 333        return ret;
 334}
 335
 336#else
 337static inline int
 338setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 339{
 340        return 0;
 341}
 342#endif
 343
 344void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 345{
 346        if (suspend) {
 347                if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
 348                        return;
 349                desc->istate |= IRQS_SUSPENDED;
 350        }
 351
 352        if (!desc->depth++)
 353                irq_disable(desc);
 354}
 355
 356static int __disable_irq_nosync(unsigned int irq)
 357{
 358        unsigned long flags;
 359        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 360
 361        if (!desc)
 362                return -EINVAL;
 363        __disable_irq(desc, irq, false);
 364        irq_put_desc_busunlock(desc, flags);
 365        return 0;
 366}
 367
 368/**
 369 *      disable_irq_nosync - disable an irq without waiting
 370 *      @irq: Interrupt to disable
 371 *
 372 *      Disable the selected interrupt line.  Disables and Enables are
 373 *      nested.
 374 *      Unlike disable_irq(), this function does not ensure existing
 375 *      instances of the IRQ handler have completed before returning.
 376 *
 377 *      This function may be called from IRQ context.
 378 */
 379void disable_irq_nosync(unsigned int irq)
 380{
 381        __disable_irq_nosync(irq);
 382}
 383EXPORT_SYMBOL(disable_irq_nosync);
 384
 385/**
 386 *      disable_irq - disable an irq and wait for completion
 387 *      @irq: Interrupt to disable
 388 *
 389 *      Disable the selected interrupt line.  Enables and Disables are
 390 *      nested.
 391 *      This function waits for any pending IRQ handlers for this interrupt
 392 *      to complete before returning. If you use this function while
 393 *      holding a resource the IRQ handler may need you will deadlock.
 394 *
 395 *      This function may be called - with care - from IRQ context.
 396 */
 397void disable_irq(unsigned int irq)
 398{
 399        if (!__disable_irq_nosync(irq))
 400                synchronize_irq(irq);
 401}
 402EXPORT_SYMBOL(disable_irq);
 403
 404void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 405{
 406        if (resume) {
 407                if (!(desc->istate & IRQS_SUSPENDED)) {
 408                        if (!desc->action)
 409                                return;
 410                        if (!(desc->action->flags & IRQF_FORCE_RESUME))
 411                                return;
 412                        /* Pretend that it got disabled ! */
 413                        desc->depth++;
 414                }
 415                desc->istate &= ~IRQS_SUSPENDED;
 416        }
 417
 418        switch (desc->depth) {
 419        case 0:
 420 err_out:
 421                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
 422                break;
 423        case 1: {
 424                if (desc->istate & IRQS_SUSPENDED)
 425                        goto err_out;
 426                /* Prevent probing on this irq: */
 427                irq_settings_set_noprobe(desc);
 428                irq_enable(desc);
 429                check_irq_resend(desc, irq);
 430                /* fall-through */
 431        }
 432        default:
 433                desc->depth--;
 434        }
 435}
 436
 437/**
 438 *      enable_irq - enable handling of an irq
 439 *      @irq: Interrupt to enable
 440 *
 441 *      Undoes the effect of one call to disable_irq().  If this
 442 *      matches the last disable, processing of interrupts on this
 443 *      IRQ line is re-enabled.
 444 *
 445 *      This function may be called from IRQ context only when
 446 *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 447 */
 448void enable_irq(unsigned int irq)
 449{
 450        unsigned long flags;
 451        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 452
 453        if (!desc)
 454                return;
 455        if (WARN(!desc->irq_data.chip,
 456                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 457                goto out;
 458
 459        __enable_irq(desc, irq, false);
 460out:
 461        irq_put_desc_busunlock(desc, flags);
 462}
 463EXPORT_SYMBOL(enable_irq);
 464
 465static int set_irq_wake_real(unsigned int irq, unsigned int on)
 466{
 467        struct irq_desc *desc = irq_to_desc(irq);
 468        int ret = -ENXIO;
 469
 470        if (desc->irq_data.chip->irq_set_wake)
 471                ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 472
 473        return ret;
 474}
 475
 476/**
 477 *      irq_set_irq_wake - control irq power management wakeup
 478 *      @irq:   interrupt to control
 479 *      @on:    enable/disable power management wakeup
 480 *
 481 *      Enable/disable power management wakeup mode, which is
 482 *      disabled by default.  Enables and disables must match,
 483 *      just as they match for non-wakeup mode support.
 484 *
 485 *      Wakeup mode lets this IRQ wake the system from sleep
 486 *      states like "suspend to RAM".
 487 */
 488int irq_set_irq_wake(unsigned int irq, unsigned int on)
 489{
 490        unsigned long flags;
 491        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 492        int ret = 0;
 493
 494        /* wakeup-capable irqs can be shared between drivers that
 495         * don't need to have the same sleep mode behaviors.
 496         */
 497        if (on) {
 498                if (desc->wake_depth++ == 0) {
 499                        ret = set_irq_wake_real(irq, on);
 500                        if (ret)
 501                                desc->wake_depth = 0;
 502                        else
 503                                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 504                }
 505        } else {
 506                if (desc->wake_depth == 0) {
 507                        WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 508                } else if (--desc->wake_depth == 0) {
 509                        ret = set_irq_wake_real(irq, on);
 510                        if (ret)
 511                                desc->wake_depth = 1;
 512                        else
 513                                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 514                }
 515        }
 516        irq_put_desc_busunlock(desc, flags);
 517        return ret;
 518}
 519EXPORT_SYMBOL(irq_set_irq_wake);
 520
 521/*
 522 * Internal function that tells the architecture code whether a
 523 * particular irq has been exclusively allocated or is available
 524 * for driver use.
 525 */
 526int can_request_irq(unsigned int irq, unsigned long irqflags)
 527{
 528        unsigned long flags;
 529        struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 530        int canrequest = 0;
 531
 532        if (!desc)
 533                return 0;
 534
 535        if (irq_settings_can_request(desc)) {
 536                if (desc->action)
 537                        if (irqflags & desc->action->flags & IRQF_SHARED)
 538                                canrequest =1;
 539        }
 540        irq_put_desc_unlock(desc, flags);
 541        return canrequest;
 542}
 543
 544int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 545                      unsigned long flags)
 546{
 547        struct irq_chip *chip = desc->irq_data.chip;
 548        int ret, unmask = 0;
 549
 550        if (!chip || !chip->irq_set_type) {
 551                /*
 552                 * IRQF_TRIGGER_* but the PIC does not support multiple
 553                 * flow-types?
 554                 */
 555                pr_debug("No set_type function for IRQ %d (%s)\n", irq,
 556                                chip ? (chip->name ? : "unknown") : "unknown");
 557                return 0;
 558        }
 559
 560        flags &= IRQ_TYPE_SENSE_MASK;
 561
 562        if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 563                if (!irqd_irq_masked(&desc->irq_data))
 564                        mask_irq(desc);
 565                if (!irqd_irq_disabled(&desc->irq_data))
 566                        unmask = 1;
 567        }
 568
 569        /* caller masked out all except trigger mode flags */
 570        ret = chip->irq_set_type(&desc->irq_data, flags);
 571
 572        switch (ret) {
 573        case IRQ_SET_MASK_OK:
 574                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 575                irqd_set(&desc->irq_data, flags);
 576
 577        case IRQ_SET_MASK_OK_NOCOPY:
 578                flags = irqd_get_trigger_type(&desc->irq_data);
 579                irq_settings_set_trigger_mask(desc, flags);
 580                irqd_clear(&desc->irq_data, IRQD_LEVEL);
 581                irq_settings_clr_level(desc);
 582                if (flags & IRQ_TYPE_LEVEL_MASK) {
 583                        irq_settings_set_level(desc);
 584                        irqd_set(&desc->irq_data, IRQD_LEVEL);
 585                }
 586
 587                ret = 0;
 588                break;
 589        default:
 590                pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
 591                       flags, irq, chip->irq_set_type);
 592        }
 593        if (unmask)
 594                unmask_irq(desc);
 595        return ret;
 596}
 597
 598/*
 599 * Default primary interrupt handler for threaded interrupts. Is
 600 * assigned as primary handler when request_threaded_irq is called
 601 * with handler == NULL. Useful for oneshot interrupts.
 602 */
 603static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 604{
 605        return IRQ_WAKE_THREAD;
 606}
 607
 608/*
 609 * Primary handler for nested threaded interrupts. Should never be
 610 * called.
 611 */
 612static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 613{
 614        WARN(1, "Primary handler called for nested irq %d\n", irq);
 615        return IRQ_NONE;
 616}
 617
 618static int irq_wait_for_interrupt(struct irqaction *action)
 619{
 620        while (!kthread_should_stop()) {
 621                set_current_state(TASK_INTERRUPTIBLE);
 622
 623                if (test_and_clear_bit(IRQTF_RUNTHREAD,
 624                                       &action->thread_flags)) {
 625                        __set_current_state(TASK_RUNNING);
 626                        return 0;
 627                }
 628                schedule();
 629        }
 630        return -1;
 631}
 632
 633/*
 634 * Oneshot interrupts keep the irq line masked until the threaded
 635 * handler finished. unmask if the interrupt has not been disabled and
 636 * is marked MASKED.
 637 */
 638static void irq_finalize_oneshot(struct irq_desc *desc,
 639                                 struct irqaction *action, bool force)
 640{
 641        if (!(desc->istate & IRQS_ONESHOT))
 642                return;
 643again:
 644        chip_bus_lock(desc);
 645        raw_spin_lock_irq(&desc->lock);
 646
 647        /*
 648         * Implausible though it may be we need to protect us against
 649         * the following scenario:
 650         *
 651         * The thread is faster done than the hard interrupt handler
 652         * on the other CPU. If we unmask the irq line then the
 653         * interrupt can come in again and masks the line, leaves due
 654         * to IRQS_INPROGRESS and the irq line is masked forever.
 655         *
 656         * This also serializes the state of shared oneshot handlers
 657         * versus "desc->threads_onehsot |= action->thread_mask;" in
 658         * irq_wake_thread(). See the comment there which explains the
 659         * serialization.
 660         */
 661        if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 662                raw_spin_unlock_irq(&desc->lock);
 663                chip_bus_sync_unlock(desc);
 664                cpu_relax();
 665                goto again;
 666        }
 667
 668        /*
 669         * Now check again, whether the thread should run. Otherwise
 670         * we would clear the threads_oneshot bit of this thread which
 671         * was just set.
 672         */
 673        if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 674                goto out_unlock;
 675
 676        desc->threads_oneshot &= ~action->thread_mask;
 677
 678        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 679            irqd_irq_masked(&desc->irq_data))
 680                unmask_irq(desc);
 681
 682out_unlock:
 683        raw_spin_unlock_irq(&desc->lock);
 684        chip_bus_sync_unlock(desc);
 685}
 686
 687#ifdef CONFIG_SMP
 688/*
 689 * Check whether we need to chasnge the affinity of the interrupt thread.
 690 */
 691static void
 692irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 693{
 694        cpumask_var_t mask;
 695
 696        if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 697                return;
 698
 699        /*
 700         * In case we are out of memory we set IRQTF_AFFINITY again and
 701         * try again next time
 702         */
 703        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 704                set_bit(IRQTF_AFFINITY, &action->thread_flags);
 705                return;
 706        }
 707
 708        raw_spin_lock_irq(&desc->lock);
 709        cpumask_copy(mask, desc->irq_data.affinity);
 710        raw_spin_unlock_irq(&desc->lock);
 711
 712        set_cpus_allowed_ptr(current, mask);
 713        free_cpumask_var(mask);
 714}
 715#else
 716static inline void
 717irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 718#endif
 719
 720/*
 721 * Interrupts which are not explicitely requested as threaded
 722 * interrupts rely on the implicit bh/preempt disable of the hard irq
 723 * context. So we need to disable bh here to avoid deadlocks and other
 724 * side effects.
 725 */
 726static void
 727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 728{
 729        local_bh_disable();
 730        action->thread_fn(action->irq, action->dev_id);
 731        irq_finalize_oneshot(desc, action, false);
 732        local_bh_enable();
 733}
 734
 735/*
 736 * Interrupts explicitely requested as threaded interupts want to be
 737 * preemtible - many of them need to sleep and wait for slow busses to
 738 * complete.
 739 */
 740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
 741{
 742        action->thread_fn(action->irq, action->dev_id);
 743        irq_finalize_oneshot(desc, action, false);
 744}
 745
 746/*
 747 * Interrupt handler thread
 748 */
 749static int irq_thread(void *data)
 750{
 751        static const struct sched_param param = {
 752                .sched_priority = MAX_USER_RT_PRIO/2,
 753        };
 754        struct irqaction *action = data;
 755        struct irq_desc *desc = irq_to_desc(action->irq);
 756        void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
 757        int wake;
 758
 759        if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
 760                                        &action->thread_flags))
 761                handler_fn = irq_forced_thread_fn;
 762        else
 763                handler_fn = irq_thread_fn;
 764
 765        sched_setscheduler(current, SCHED_FIFO, &param);
 766        current->irqaction = action;
 767
 768        while (!irq_wait_for_interrupt(action)) {
 769
 770                irq_thread_check_affinity(desc, action);
 771
 772                atomic_inc(&desc->threads_active);
 773
 774                raw_spin_lock_irq(&desc->lock);
 775                if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
 776                        /*
 777                         * CHECKME: We might need a dedicated
 778                         * IRQ_THREAD_PENDING flag here, which
 779                         * retriggers the thread in check_irq_resend()
 780                         * but AFAICT IRQS_PENDING should be fine as it
 781                         * retriggers the interrupt itself --- tglx
 782                         */
 783                        desc->istate |= IRQS_PENDING;
 784                        raw_spin_unlock_irq(&desc->lock);
 785                } else {
 786                        raw_spin_unlock_irq(&desc->lock);
 787                        handler_fn(desc, action);
 788                }
 789
 790                wake = atomic_dec_and_test(&desc->threads_active);
 791
 792                if (wake && waitqueue_active(&desc->wait_for_threads))
 793                        wake_up(&desc->wait_for_threads);
 794        }
 795
 796        /* Prevent a stale desc->threads_oneshot */
 797        irq_finalize_oneshot(desc, action, true);
 798
 799        /*
 800         * Clear irqaction. Otherwise exit_irq_thread() would make
 801         * fuzz about an active irq thread going into nirvana.
 802         */
 803        current->irqaction = NULL;
 804        return 0;
 805}
 806
 807/*
 808 * Called from do_exit()
 809 */
 810void exit_irq_thread(void)
 811{
 812        struct task_struct *tsk = current;
 813        struct irq_desc *desc;
 814
 815        if (!tsk->irqaction)
 816                return;
 817
 818        printk(KERN_ERR
 819               "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 820               tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
 821
 822        desc = irq_to_desc(tsk->irqaction->irq);
 823
 824        /*
 825         * Prevent a stale desc->threads_oneshot. Must be called
 826         * before setting the IRQTF_DIED flag.
 827         */
 828        irq_finalize_oneshot(desc, tsk->irqaction, true);
 829
 830        /*
 831         * Set the THREAD DIED flag to prevent further wakeups of the
 832         * soon to be gone threaded handler.
 833         */
 834        set_bit(IRQTF_DIED, &tsk->irqaction->flags);
 835}
 836
 837static void irq_setup_forced_threading(struct irqaction *new)
 838{
 839        if (!force_irqthreads)
 840                return;
 841        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 842                return;
 843
 844        new->flags |= IRQF_ONESHOT;
 845
 846        if (!new->thread_fn) {
 847                set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
 848                new->thread_fn = new->handler;
 849                new->handler = irq_default_primary_handler;
 850        }
 851}
 852
 853/*
 854 * Internal function to register an irqaction - typically used to
 855 * allocate special interrupts that are part of the architecture.
 856 */
 857static int
 858__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 859{
 860        struct irqaction *old, **old_ptr;
 861        const char *old_name = NULL;
 862        unsigned long flags, thread_mask = 0;
 863        int ret, nested, shared = 0;
 864        cpumask_var_t mask;
 865
 866        if (!desc)
 867                return -EINVAL;
 868
 869        if (desc->irq_data.chip == &no_irq_chip)
 870                return -ENOSYS;
 871        /*
 872         * Some drivers like serial.c use request_irq() heavily,
 873         * so we have to be careful not to interfere with a
 874         * running system.
 875         */
 876        if (new->flags & IRQF_SAMPLE_RANDOM) {
 877                /*
 878                 * This function might sleep, we want to call it first,
 879                 * outside of the atomic block.
 880                 * Yes, this might clear the entropy pool if the wrong
 881                 * driver is attempted to be loaded, without actually
 882                 * installing a new handler, but is this really a problem,
 883                 * only the sysadmin is able to do this.
 884                 */
 885                rand_initialize_irq(irq);
 886        }
 887
 888        /*
 889         * Check whether the interrupt nests into another interrupt
 890         * thread.
 891         */
 892        nested = irq_settings_is_nested_thread(desc);
 893        if (nested) {
 894                if (!new->thread_fn)
 895                        return -EINVAL;
 896                /*
 897                 * Replace the primary handler which was provided from
 898                 * the driver for non nested interrupt handling by the
 899                 * dummy function which warns when called.
 900                 */
 901                new->handler = irq_nested_primary_handler;
 902        } else {
 903                irq_setup_forced_threading(new);
 904        }
 905
 906        /*
 907         * Create a handler thread when a thread function is supplied
 908         * and the interrupt does not nest into another interrupt
 909         * thread.
 910         */
 911        if (new->thread_fn && !nested) {
 912                struct task_struct *t;
 913
 914                t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
 915                                   new->name);
 916                if (IS_ERR(t))
 917                        return PTR_ERR(t);
 918                /*
 919                 * We keep the reference to the task struct even if
 920                 * the thread dies to avoid that the interrupt code
 921                 * references an already freed task_struct.
 922                 */
 923                get_task_struct(t);
 924                new->thread = t;
 925        }
 926
 927        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 928                ret = -ENOMEM;
 929                goto out_thread;
 930        }
 931
 932        /*
 933         * The following block of code has to be executed atomically
 934         */
 935        raw_spin_lock_irqsave(&desc->lock, flags);
 936        old_ptr = &desc->action;
 937        old = *old_ptr;
 938        if (old) {
 939                /*
 940                 * Can't share interrupts unless both agree to and are
 941                 * the same type (level, edge, polarity). So both flag
 942                 * fields must have IRQF_SHARED set and the bits which
 943                 * set the trigger type must match. Also all must
 944                 * agree on ONESHOT.
 945                 */
 946                if (!((old->flags & new->flags) & IRQF_SHARED) ||
 947                    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
 948                    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
 949                        old_name = old->name;
 950                        goto mismatch;
 951                }
 952
 953                /* All handlers must agree on per-cpuness */
 954                if ((old->flags & IRQF_PERCPU) !=
 955                    (new->flags & IRQF_PERCPU))
 956                        goto mismatch;
 957
 958                /* add new interrupt at end of irq queue */
 959                do {
 960                        thread_mask |= old->thread_mask;
 961                        old_ptr = &old->next;
 962                        old = *old_ptr;
 963                } while (old);
 964                shared = 1;
 965        }
 966
 967        /*
 968         * Setup the thread mask for this irqaction. Unlikely to have
 969         * 32 resp 64 irqs sharing one line, but who knows.
 970         */
 971        if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
 972                ret = -EBUSY;
 973                goto out_mask;
 974        }
 975        new->thread_mask = 1 << ffz(thread_mask);
 976
 977        if (!shared) {
 978                init_waitqueue_head(&desc->wait_for_threads);
 979
 980                /* Setup the type (level, edge polarity) if configured: */
 981                if (new->flags & IRQF_TRIGGER_MASK) {
 982                        ret = __irq_set_trigger(desc, irq,
 983                                        new->flags & IRQF_TRIGGER_MASK);
 984
 985                        if (ret)
 986                                goto out_mask;
 987                }
 988
 989                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
 990                                  IRQS_ONESHOT | IRQS_WAITING);
 991                irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 992
 993                if (new->flags & IRQF_PERCPU) {
 994                        irqd_set(&desc->irq_data, IRQD_PER_CPU);
 995                        irq_settings_set_per_cpu(desc);
 996                }
 997
 998                if (new->flags & IRQF_ONESHOT)
 999                        desc->istate |= IRQS_ONESHOT;
1000
1001                if (irq_settings_can_autoenable(desc))
1002                        irq_startup(desc);
1003                else
1004                        /* Undo nested disables: */
1005                        desc->depth = 1;
1006
1007                /* Exclude IRQ from balancing if requested */
1008                if (new->flags & IRQF_NOBALANCING) {
1009                        irq_settings_set_no_balancing(desc);
1010                        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1011                }
1012
1013                /* Set default affinity mask once everything is setup */
1014                setup_affinity(irq, desc, mask);
1015
1016        } else if (new->flags & IRQF_TRIGGER_MASK) {
1017                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1018                unsigned int omsk = irq_settings_get_trigger_mask(desc);
1019
1020                if (nmsk != omsk)
1021                        /* hope the handler works with current  trigger mode */
1022                        pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1023                                   irq, nmsk, omsk);
1024        }
1025
1026        new->irq = irq;
1027        *old_ptr = new;
1028
1029        /* Reset broken irq detection when installing new handler */
1030        desc->irq_count = 0;
1031        desc->irqs_unhandled = 0;
1032
1033        /*
1034         * Check whether we disabled the irq via the spurious handler
1035         * before. Reenable it and give it another chance.
1036         */
1037        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1038                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1039                __enable_irq(desc, irq, false);
1040        }
1041
1042        raw_spin_unlock_irqrestore(&desc->lock, flags);
1043
1044        /*
1045         * Strictly no need to wake it up, but hung_task complains
1046         * when no hard interrupt wakes the thread up.
1047         */
1048        if (new->thread)
1049                wake_up_process(new->thread);
1050
1051        register_irq_proc(irq, desc);
1052        new->dir = NULL;
1053        register_handler_proc(irq, new);
1054        free_cpumask_var(mask);
1055
1056        return 0;
1057
1058mismatch:
1059#ifdef CONFIG_DEBUG_SHIRQ
1060        if (!(new->flags & IRQF_PROBE_SHARED)) {
1061                printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1062                if (old_name)
1063                        printk(KERN_ERR "current handler: %s\n", old_name);
1064                dump_stack();
1065        }
1066#endif
1067        ret = -EBUSY;
1068
1069out_mask:
1070        raw_spin_unlock_irqrestore(&desc->lock, flags);
1071        free_cpumask_var(mask);
1072
1073out_thread:
1074        if (new->thread) {
1075                struct task_struct *t = new->thread;
1076
1077                new->thread = NULL;
1078                if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1079                        kthread_stop(t);
1080                put_task_struct(t);
1081        }
1082        return ret;
1083}
1084
1085/**
1086 *      setup_irq - setup an interrupt
1087 *      @irq: Interrupt line to setup
1088 *      @act: irqaction for the interrupt
1089 *
1090 * Used to statically setup interrupts in the early boot process.
1091 */
1092int setup_irq(unsigned int irq, struct irqaction *act)
1093{
1094        int retval;
1095        struct irq_desc *desc = irq_to_desc(irq);
1096
1097        chip_bus_lock(desc);
1098        retval = __setup_irq(irq, desc, act);
1099        chip_bus_sync_unlock(desc);
1100
1101        return retval;
1102}
1103EXPORT_SYMBOL_GPL(setup_irq);
1104
1105 /*
1106 * Internal function to unregister an irqaction - used to free
1107 * regular and special interrupts that are part of the architecture.
1108 */
1109static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1110{
1111        struct irq_desc *desc = irq_to_desc(irq);
1112        struct irqaction *action, **action_ptr;
1113        unsigned long flags;
1114
1115        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1116
1117        if (!desc)
1118                return NULL;
1119
1120        raw_spin_lock_irqsave(&desc->lock, flags);
1121
1122        /*
1123         * There can be multiple actions per IRQ descriptor, find the right
1124         * one based on the dev_id:
1125         */
1126        action_ptr = &desc->action;
1127        for (;;) {
1128                action = *action_ptr;
1129
1130                if (!action) {
1131                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
1132                        raw_spin_unlock_irqrestore(&desc->lock, flags);
1133
1134                        return NULL;
1135                }
1136
1137                if (action->dev_id == dev_id)
1138                        break;
1139                action_ptr = &action->next;
1140        }
1141
1142        /* Found it - now remove it from the list of entries: */
1143        *action_ptr = action->next;
1144
1145        /* Currently used only by UML, might disappear one day: */
1146#ifdef CONFIG_IRQ_RELEASE_METHOD
1147        if (desc->irq_data.chip->release)
1148                desc->irq_data.chip->release(irq, dev_id);
1149#endif
1150
1151        /* If this was the last handler, shut down the IRQ line: */
1152        if (!desc->action)
1153                irq_shutdown(desc);
1154
1155#ifdef CONFIG_SMP
1156        /* make sure affinity_hint is cleaned up */
1157        if (WARN_ON_ONCE(desc->affinity_hint))
1158                desc->affinity_hint = NULL;
1159#endif
1160
1161        raw_spin_unlock_irqrestore(&desc->lock, flags);
1162
1163        unregister_handler_proc(irq, action);
1164
1165        /* Make sure it's not being used on another CPU: */
1166        synchronize_irq(irq);
1167
1168#ifdef CONFIG_DEBUG_SHIRQ
1169        /*
1170         * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1171         * event to happen even now it's being freed, so let's make sure that
1172         * is so by doing an extra call to the handler ....
1173         *
1174         * ( We do this after actually deregistering it, to make sure that a
1175         *   'real' IRQ doesn't run in * parallel with our fake. )
1176         */
1177        if (action->flags & IRQF_SHARED) {
1178                local_irq_save(flags);
1179                action->handler(irq, dev_id);
1180                local_irq_restore(flags);
1181        }
1182#endif
1183
1184        if (action->thread) {
1185                if (!test_bit(IRQTF_DIED, &action->thread_flags))
1186                        kthread_stop(action->thread);
1187                put_task_struct(action->thread);
1188        }
1189
1190        return action;
1191}
1192
1193/**
1194 *      remove_irq - free an interrupt
1195 *      @irq: Interrupt line to free
1196 *      @act: irqaction for the interrupt
1197 *
1198 * Used to remove interrupts statically setup by the early boot process.
1199 */
1200void remove_irq(unsigned int irq, struct irqaction *act)
1201{
1202        __free_irq(irq, act->dev_id);
1203}
1204EXPORT_SYMBOL_GPL(remove_irq);
1205
1206/**
1207 *      free_irq - free an interrupt allocated with request_irq
1208 *      @irq: Interrupt line to free
1209 *      @dev_id: Device identity to free
1210 *
1211 *      Remove an interrupt handler. The handler is removed and if the
1212 *      interrupt line is no longer in use by any driver it is disabled.
1213 *      On a shared IRQ the caller must ensure the interrupt is disabled
1214 *      on the card it drives before calling this function. The function
1215 *      does not return until any executing interrupts for this IRQ
1216 *      have completed.
1217 *
1218 *      This function must not be called from interrupt context.
1219 */
1220void free_irq(unsigned int irq, void *dev_id)
1221{
1222        struct irq_desc *desc = irq_to_desc(irq);
1223
1224        if (!desc)
1225                return;
1226
1227#ifdef CONFIG_SMP
1228        if (WARN_ON(desc->affinity_notify))
1229                desc->affinity_notify = NULL;
1230#endif
1231
1232        chip_bus_lock(desc);
1233        kfree(__free_irq(irq, dev_id));
1234        chip_bus_sync_unlock(desc);
1235}
1236EXPORT_SYMBOL(free_irq);
1237
1238/**
1239 *      request_threaded_irq - allocate an interrupt line
1240 *      @irq: Interrupt line to allocate
1241 *      @handler: Function to be called when the IRQ occurs.
1242 *                Primary handler for threaded interrupts
1243 *                If NULL and thread_fn != NULL the default
1244 *                primary handler is installed
1245 *      @thread_fn: Function called from the irq handler thread
1246 *                  If NULL, no irq thread is created
1247 *      @irqflags: Interrupt type flags
1248 *      @devname: An ascii name for the claiming device
1249 *      @dev_id: A cookie passed back to the handler function
1250 *
1251 *      This call allocates interrupt resources and enables the
1252 *      interrupt line and IRQ handling. From the point this
1253 *      call is made your handler function may be invoked. Since
1254 *      your handler function must clear any interrupt the board
1255 *      raises, you must take care both to initialise your hardware
1256 *      and to set up the interrupt handler in the right order.
1257 *
1258 *      If you want to set up a threaded irq handler for your device
1259 *      then you need to supply @handler and @thread_fn. @handler ist
1260 *      still called in hard interrupt context and has to check
1261 *      whether the interrupt originates from the device. If yes it
1262 *      needs to disable the interrupt on the device and return
1263 *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1264 *      @thread_fn. This split handler design is necessary to support
1265 *      shared interrupts.
1266 *
1267 *      Dev_id must be globally unique. Normally the address of the
1268 *      device data structure is used as the cookie. Since the handler
1269 *      receives this value it makes sense to use it.
1270 *
1271 *      If your interrupt is shared you must pass a non NULL dev_id
1272 *      as this is required when freeing the interrupt.
1273 *
1274 *      Flags:
1275 *
1276 *      IRQF_SHARED             Interrupt is shared
1277 *      IRQF_SAMPLE_RANDOM      The interrupt can be used for entropy
1278 *      IRQF_TRIGGER_*          Specify active edge(s) or level
1279 *
1280 */
1281int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1282                         irq_handler_t thread_fn, unsigned long irqflags,
1283                         const char *devname, void *dev_id)
1284{
1285        struct irqaction *action;
1286        struct irq_desc *desc;
1287        int retval;
1288
1289        /*
1290         * Sanity-check: shared interrupts must pass in a real dev-ID,
1291         * otherwise we'll have trouble later trying to figure out
1292         * which interrupt is which (messes up the interrupt freeing
1293         * logic etc).
1294         */
1295        if ((irqflags & IRQF_SHARED) && !dev_id)
1296                return -EINVAL;
1297
1298        desc = irq_to_desc(irq);
1299        if (!desc)
1300                return -EINVAL;
1301
1302        if (!irq_settings_can_request(desc))
1303                return -EINVAL;
1304
1305        if (!handler) {
1306                if (!thread_fn)
1307                        return -EINVAL;
1308                handler = irq_default_primary_handler;
1309        }
1310
1311        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1312        if (!action)
1313                return -ENOMEM;
1314
1315        action->handler = handler;
1316        action->thread_fn = thread_fn;
1317        action->flags = irqflags;
1318        action->name = devname;
1319        action->dev_id = dev_id;
1320
1321        chip_bus_lock(desc);
1322        retval = __setup_irq(irq, desc, action);
1323        chip_bus_sync_unlock(desc);
1324
1325        if (retval)
1326                kfree(action);
1327
1328#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1329        if (!retval && (irqflags & IRQF_SHARED)) {
1330                /*
1331                 * It's a shared IRQ -- the driver ought to be prepared for it
1332                 * to happen immediately, so let's make sure....
1333                 * We disable the irq to make sure that a 'real' IRQ doesn't
1334                 * run in parallel with our fake.
1335                 */
1336                unsigned long flags;
1337
1338                disable_irq(irq);
1339                local_irq_save(flags);
1340
1341                handler(irq, dev_id);
1342
1343                local_irq_restore(flags);
1344                enable_irq(irq);
1345        }
1346#endif
1347        return retval;
1348}
1349EXPORT_SYMBOL(request_threaded_irq);
1350
1351/**
1352 *      request_any_context_irq - allocate an interrupt line
1353 *      @irq: Interrupt line to allocate
1354 *      @handler: Function to be called when the IRQ occurs.
1355 *                Threaded handler for threaded interrupts.
1356 *      @flags: Interrupt type flags
1357 *      @name: An ascii name for the claiming device
1358 *      @dev_id: A cookie passed back to the handler function
1359 *
1360 *      This call allocates interrupt resources and enables the
1361 *      interrupt line and IRQ handling. It selects either a
1362 *      hardirq or threaded handling method depending on the
1363 *      context.
1364 *
1365 *      On failure, it returns a negative value. On success,
1366 *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1367 */
1368int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1369                            unsigned long flags, const char *name, void *dev_id)
1370{
1371        struct irq_desc *desc = irq_to_desc(irq);
1372        int ret;
1373
1374        if (!desc)
1375                return -EINVAL;
1376
1377        if (irq_settings_is_nested_thread(desc)) {
1378                ret = request_threaded_irq(irq, NULL, handler,
1379                                           flags, name, dev_id);
1380                return !ret ? IRQC_IS_NESTED : ret;
1381        }
1382
1383        ret = request_irq(irq, handler, flags, name, dev_id);
1384        return !ret ? IRQC_IS_HARDIRQ : ret;
1385}
1386EXPORT_SYMBOL_GPL(request_any_context_irq);
1387