linux/include/linux/interrupt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* interrupt.h */
   3#ifndef _LINUX_INTERRUPT_H
   4#define _LINUX_INTERRUPT_H
   5
   6#include <linux/kernel.h>
   7#include <linux/bitops.h>
   8#include <linux/cpumask.h>
   9#include <linux/irqreturn.h>
  10#include <linux/irqnr.h>
  11#include <linux/hardirq.h>
  12#include <linux/irqflags.h>
  13#include <linux/hrtimer.h>
  14#include <linux/kref.h>
  15#include <linux/workqueue.h>
  16#include <linux/jump_label.h>
  17
  18#include <linux/atomic.h>
  19#include <asm/ptrace.h>
  20#include <asm/irq.h>
  21#include <asm/sections.h>
  22
  23/*
  24 * These correspond to the IORESOURCE_IRQ_* defines in
  25 * linux/ioport.h to select the interrupt line behaviour.  When
  26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
  27 * setting should be assumed to be "as already configured", which
  28 * may be as per machine or firmware initialisation.
  29 */
  30#define IRQF_TRIGGER_NONE       0x00000000
  31#define IRQF_TRIGGER_RISING     0x00000001
  32#define IRQF_TRIGGER_FALLING    0x00000002
  33#define IRQF_TRIGGER_HIGH       0x00000004
  34#define IRQF_TRIGGER_LOW        0x00000008
  35#define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
  36                                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
  37#define IRQF_TRIGGER_PROBE      0x00000010
  38
  39/*
  40 * These flags used only by the kernel as part of the
  41 * irq handling routines.
  42 *
  43 * IRQF_SHARED - allow sharing the irq among several devices
  44 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
  45 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
  46 * IRQF_PERCPU - Interrupt is per cpu
  47 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
  48 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
  49 *                registered first in a shared interrupt is considered for
  50 *                performance reasons)
  51 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
  52 *                Used by threaded interrupts which need to keep the
  53 *                irq line disabled until the threaded handler has been run.
  54 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
  55 *                   that this interrupt will wake the system from a suspended
  56 *                   state.  See Documentation/power/suspend-and-interrupts.rst
  57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
  58 * IRQF_NO_THREAD - Interrupt cannot be threaded
  59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
  60 *                resume time.
  61 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
  62 *                interrupt handler after suspending interrupts. For system
  63 *                wakeup devices users need to implement wakeup detection in
  64 *                their interrupt handlers.
  65 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
  66 *                Users will enable it explicitly by enable_irq() or enable_nmi()
  67 *                later.
  68 * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
  69 *                 depends on IRQF_PERCPU.
  70 */
  71#define IRQF_SHARED             0x00000080
  72#define IRQF_PROBE_SHARED       0x00000100
  73#define __IRQF_TIMER            0x00000200
  74#define IRQF_PERCPU             0x00000400
  75#define IRQF_NOBALANCING        0x00000800
  76#define IRQF_IRQPOLL            0x00001000
  77#define IRQF_ONESHOT            0x00002000
  78#define IRQF_NO_SUSPEND         0x00004000
  79#define IRQF_FORCE_RESUME       0x00008000
  80#define IRQF_NO_THREAD          0x00010000
  81#define IRQF_EARLY_RESUME       0x00020000
  82#define IRQF_COND_SUSPEND       0x00040000
  83#define IRQF_NO_AUTOEN          0x00080000
  84#define IRQF_NO_DEBUG           0x00100000
  85
  86#define IRQF_TIMER              (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
  87
  88/*
  89 * These values can be returned by request_any_context_irq() and
  90 * describe the context the interrupt will be run in.
  91 *
  92 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
  93 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
  94 */
  95enum {
  96        IRQC_IS_HARDIRQ = 0,
  97        IRQC_IS_NESTED,
  98};
  99
 100typedef irqreturn_t (*irq_handler_t)(int, void *);
 101
 102/**
 103 * struct irqaction - per interrupt action descriptor
 104 * @handler:    interrupt handler function
 105 * @name:       name of the device
 106 * @dev_id:     cookie to identify the device
 107 * @percpu_dev_id:      cookie to identify the device
 108 * @next:       pointer to the next irqaction for shared interrupts
 109 * @irq:        interrupt number
 110 * @flags:      flags (see IRQF_* above)
 111 * @thread_fn:  interrupt handler function for threaded interrupts
 112 * @thread:     thread pointer for threaded interrupts
 113 * @secondary:  pointer to secondary irqaction (force threading)
 114 * @thread_flags:       flags related to @thread
 115 * @thread_mask:        bitmask for keeping track of @thread activity
 116 * @dir:        pointer to the proc/irq/NN/name entry
 117 */
 118struct irqaction {
 119        irq_handler_t           handler;
 120        void                    *dev_id;
 121        void __percpu           *percpu_dev_id;
 122        struct irqaction        *next;
 123        irq_handler_t           thread_fn;
 124        struct task_struct      *thread;
 125        struct irqaction        *secondary;
 126        unsigned int            irq;
 127        unsigned int            flags;
 128        unsigned long           thread_flags;
 129        unsigned long           thread_mask;
 130        const char              *name;
 131        struct proc_dir_entry   *dir;
 132} ____cacheline_internodealigned_in_smp;
 133
 134extern irqreturn_t no_action(int cpl, void *dev_id);
 135
 136/*
 137 * If a (PCI) device interrupt is not connected we set dev->irq to
 138 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
 139 * can distingiush that case from other error returns.
 140 *
 141 * 0x80000000 is guaranteed to be outside the available range of interrupts
 142 * and easy to distinguish from other possible incorrect values.
 143 */
 144#define IRQ_NOTCONNECTED        (1U << 31)
 145
 146extern int __must_check
 147request_threaded_irq(unsigned int irq, irq_handler_t handler,
 148                     irq_handler_t thread_fn,
 149                     unsigned long flags, const char *name, void *dev);
 150
 151/**
 152 * request_irq - Add a handler for an interrupt line
 153 * @irq:        The interrupt line to allocate
 154 * @handler:    Function to be called when the IRQ occurs.
 155 *              Primary handler for threaded interrupts
 156 *              If NULL, the default primary handler is installed
 157 * @flags:      Handling flags
 158 * @name:       Name of the device generating this interrupt
 159 * @dev:        A cookie passed to the handler function
 160 *
 161 * This call allocates an interrupt and establishes a handler; see
 162 * the documentation for request_threaded_irq() for details.
 163 */
 164static inline int __must_check
 165request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
 166            const char *name, void *dev)
 167{
 168        return request_threaded_irq(irq, handler, NULL, flags, name, dev);
 169}
 170
 171extern int __must_check
 172request_any_context_irq(unsigned int irq, irq_handler_t handler,
 173                        unsigned long flags, const char *name, void *dev_id);
 174
 175extern int __must_check
 176__request_percpu_irq(unsigned int irq, irq_handler_t handler,
 177                     unsigned long flags, const char *devname,
 178                     void __percpu *percpu_dev_id);
 179
 180extern int __must_check
 181request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
 182            const char *name, void *dev);
 183
 184static inline int __must_check
 185request_percpu_irq(unsigned int irq, irq_handler_t handler,
 186                   const char *devname, void __percpu *percpu_dev_id)
 187{
 188        return __request_percpu_irq(irq, handler, 0,
 189                                    devname, percpu_dev_id);
 190}
 191
 192extern int __must_check
 193request_percpu_nmi(unsigned int irq, irq_handler_t handler,
 194                   const char *devname, void __percpu *dev);
 195
 196extern const void *free_irq(unsigned int, void *);
 197extern void free_percpu_irq(unsigned int, void __percpu *);
 198
 199extern const void *free_nmi(unsigned int irq, void *dev_id);
 200extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
 201
 202struct device;
 203
 204extern int __must_check
 205devm_request_threaded_irq(struct device *dev, unsigned int irq,
 206                          irq_handler_t handler, irq_handler_t thread_fn,
 207                          unsigned long irqflags, const char *devname,
 208                          void *dev_id);
 209
 210static inline int __must_check
 211devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
 212                 unsigned long irqflags, const char *devname, void *dev_id)
 213{
 214        return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
 215                                         devname, dev_id);
 216}
 217
 218extern int __must_check
 219devm_request_any_context_irq(struct device *dev, unsigned int irq,
 220                 irq_handler_t handler, unsigned long irqflags,
 221                 const char *devname, void *dev_id);
 222
 223extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
 224
 225bool irq_has_action(unsigned int irq);
 226extern void disable_irq_nosync(unsigned int irq);
 227extern bool disable_hardirq(unsigned int irq);
 228extern void disable_irq(unsigned int irq);
 229extern void disable_percpu_irq(unsigned int irq);
 230extern void enable_irq(unsigned int irq);
 231extern void enable_percpu_irq(unsigned int irq, unsigned int type);
 232extern bool irq_percpu_is_enabled(unsigned int irq);
 233extern void irq_wake_thread(unsigned int irq, void *dev_id);
 234
 235extern void disable_nmi_nosync(unsigned int irq);
 236extern void disable_percpu_nmi(unsigned int irq);
 237extern void enable_nmi(unsigned int irq);
 238extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
 239extern int prepare_percpu_nmi(unsigned int irq);
 240extern void teardown_percpu_nmi(unsigned int irq);
 241
 242extern int irq_inject_interrupt(unsigned int irq);
 243
 244/* The following three functions are for the core kernel use only. */
 245extern void suspend_device_irqs(void);
 246extern void resume_device_irqs(void);
 247extern void rearm_wake_irq(unsigned int irq);
 248
 249/**
 250 * struct irq_affinity_notify - context for notification of IRQ affinity changes
 251 * @irq:                Interrupt to which notification applies
 252 * @kref:               Reference count, for internal use
 253 * @work:               Work item, for internal use
 254 * @notify:             Function to be called on change.  This will be
 255 *                      called in process context.
 256 * @release:            Function to be called on release.  This will be
 257 *                      called in process context.  Once registered, the
 258 *                      structure must only be freed when this function is
 259 *                      called or later.
 260 */
 261struct irq_affinity_notify {
 262        unsigned int irq;
 263        struct kref kref;
 264        struct work_struct work;
 265        void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
 266        void (*release)(struct kref *ref);
 267};
 268
 269#define IRQ_AFFINITY_MAX_SETS  4
 270
 271/**
 272 * struct irq_affinity - Description for automatic irq affinity assignements
 273 * @pre_vectors:        Don't apply affinity to @pre_vectors at beginning of
 274 *                      the MSI(-X) vector space
 275 * @post_vectors:       Don't apply affinity to @post_vectors at end of
 276 *                      the MSI(-X) vector space
 277 * @nr_sets:            The number of interrupt sets for which affinity
 278 *                      spreading is required
 279 * @set_size:           Array holding the size of each interrupt set
 280 * @calc_sets:          Callback for calculating the number and size
 281 *                      of interrupt sets
 282 * @priv:               Private data for usage by @calc_sets, usually a
 283 *                      pointer to driver/device specific data.
 284 */
 285struct irq_affinity {
 286        unsigned int    pre_vectors;
 287        unsigned int    post_vectors;
 288        unsigned int    nr_sets;
 289        unsigned int    set_size[IRQ_AFFINITY_MAX_SETS];
 290        void            (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
 291        void            *priv;
 292};
 293
 294/**
 295 * struct irq_affinity_desc - Interrupt affinity descriptor
 296 * @mask:       cpumask to hold the affinity assignment
 297 * @is_managed: 1 if the interrupt is managed internally
 298 */
 299struct irq_affinity_desc {
 300        struct cpumask  mask;
 301        unsigned int    is_managed : 1;
 302};
 303
 304#if defined(CONFIG_SMP)
 305
 306extern cpumask_var_t irq_default_affinity;
 307
 308extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
 309extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
 310
 311extern int irq_can_set_affinity(unsigned int irq);
 312extern int irq_select_affinity(unsigned int irq);
 313
 314extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
 315                                     bool setaffinity);
 316
 317/**
 318 * irq_update_affinity_hint - Update the affinity hint
 319 * @irq:        Interrupt to update
 320 * @m:          cpumask pointer (NULL to clear the hint)
 321 *
 322 * Updates the affinity hint, but does not change the affinity of the interrupt.
 323 */
 324static inline int
 325irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
 326{
 327        return __irq_apply_affinity_hint(irq, m, false);
 328}
 329
 330/**
 331 * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
 332 *                           cpumask to the interrupt
 333 * @irq:        Interrupt to update
 334 * @m:          cpumask pointer (NULL to clear the hint)
 335 *
 336 * Updates the affinity hint and if @m is not NULL it applies it as the
 337 * affinity of that interrupt.
 338 */
 339static inline int
 340irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
 341{
 342        return __irq_apply_affinity_hint(irq, m, true);
 343}
 344
 345/*
 346 * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
 347 * instead.
 348 */
 349static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 350{
 351        return irq_set_affinity_and_hint(irq, m);
 352}
 353
 354extern int irq_update_affinity_desc(unsigned int irq,
 355                                    struct irq_affinity_desc *affinity);
 356
 357extern int
 358irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
 359
 360struct irq_affinity_desc *
 361irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
 362
 363unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
 364                                       const struct irq_affinity *affd);
 365
 366#else /* CONFIG_SMP */
 367
 368static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
 369{
 370        return -EINVAL;
 371}
 372
 373static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
 374{
 375        return 0;
 376}
 377
 378static inline int irq_can_set_affinity(unsigned int irq)
 379{
 380        return 0;
 381}
 382
 383static inline int irq_select_affinity(unsigned int irq)  { return 0; }
 384
 385static inline int irq_update_affinity_hint(unsigned int irq,
 386                                           const struct cpumask *m)
 387{
 388        return -EINVAL;
 389}
 390
 391static inline int irq_set_affinity_and_hint(unsigned int irq,
 392                                            const struct cpumask *m)
 393{
 394        return -EINVAL;
 395}
 396
 397static inline int irq_set_affinity_hint(unsigned int irq,
 398                                        const struct cpumask *m)
 399{
 400        return -EINVAL;
 401}
 402
 403static inline int irq_update_affinity_desc(unsigned int irq,
 404                                           struct irq_affinity_desc *affinity)
 405{
 406        return -EINVAL;
 407}
 408
 409static inline int
 410irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 411{
 412        return 0;
 413}
 414
 415static inline struct irq_affinity_desc *
 416irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
 417{
 418        return NULL;
 419}
 420
 421static inline unsigned int
 422irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
 423                          const struct irq_affinity *affd)
 424{
 425        return maxvec;
 426}
 427
 428#endif /* CONFIG_SMP */
 429
 430/*
 431 * Special lockdep variants of irq disabling/enabling.
 432 * These should be used for locking constructs that
 433 * know that a particular irq context which is disabled,
 434 * and which is the only irq-context user of a lock,
 435 * that it's safe to take the lock in the irq-disabled
 436 * section without disabling hardirqs.
 437 *
 438 * On !CONFIG_LOCKDEP they are equivalent to the normal
 439 * irq disable/enable methods.
 440 */
 441static inline void disable_irq_nosync_lockdep(unsigned int irq)
 442{
 443        disable_irq_nosync(irq);
 444#ifdef CONFIG_LOCKDEP
 445        local_irq_disable();
 446#endif
 447}
 448
 449static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
 450{
 451        disable_irq_nosync(irq);
 452#ifdef CONFIG_LOCKDEP
 453        local_irq_save(*flags);
 454#endif
 455}
 456
 457static inline void disable_irq_lockdep(unsigned int irq)
 458{
 459        disable_irq(irq);
 460#ifdef CONFIG_LOCKDEP
 461        local_irq_disable();
 462#endif
 463}
 464
 465static inline void enable_irq_lockdep(unsigned int irq)
 466{
 467#ifdef CONFIG_LOCKDEP
 468        local_irq_enable();
 469#endif
 470        enable_irq(irq);
 471}
 472
 473static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
 474{
 475#ifdef CONFIG_LOCKDEP
 476        local_irq_restore(*flags);
 477#endif
 478        enable_irq(irq);
 479}
 480
 481/* IRQ wakeup (PM) control: */
 482extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
 483
 484static inline int enable_irq_wake(unsigned int irq)
 485{
 486        return irq_set_irq_wake(irq, 1);
 487}
 488
 489static inline int disable_irq_wake(unsigned int irq)
 490{
 491        return irq_set_irq_wake(irq, 0);
 492}
 493
 494/*
 495 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
 496 */
 497enum irqchip_irq_state {
 498        IRQCHIP_STATE_PENDING,          /* Is interrupt pending? */
 499        IRQCHIP_STATE_ACTIVE,           /* Is interrupt in progress? */
 500        IRQCHIP_STATE_MASKED,           /* Is interrupt masked? */
 501        IRQCHIP_STATE_LINE_LEVEL,       /* Is IRQ line high? */
 502};
 503
 504extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 505                                 bool *state);
 506extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 507                                 bool state);
 508
 509#ifdef CONFIG_IRQ_FORCED_THREADING
 510# ifdef CONFIG_PREEMPT_RT
 511#  define force_irqthreads()    (true)
 512# else
 513DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
 514#  define force_irqthreads()    (static_branch_unlikely(&force_irqthreads_key))
 515# endif
 516#else
 517#define force_irqthreads()      (false)
 518#endif
 519
 520#ifndef local_softirq_pending
 521
 522#ifndef local_softirq_pending_ref
 523#define local_softirq_pending_ref irq_stat.__softirq_pending
 524#endif
 525
 526#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
 527#define set_softirq_pending(x)  (__this_cpu_write(local_softirq_pending_ref, (x)))
 528#define or_softirq_pending(x)   (__this_cpu_or(local_softirq_pending_ref, (x)))
 529
 530#endif /* local_softirq_pending */
 531
 532/* Some architectures might implement lazy enabling/disabling of
 533 * interrupts. In some cases, such as stop_machine, we might want
 534 * to ensure that after a local_irq_disable(), interrupts have
 535 * really been disabled in hardware. Such architectures need to
 536 * implement the following hook.
 537 */
 538#ifndef hard_irq_disable
 539#define hard_irq_disable()      do { } while(0)
 540#endif
 541
 542/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
 543   frequency threaded job scheduling. For almost all the purposes
 544   tasklets are more than enough. F.e. all serial device BHs et
 545   al. should be converted to tasklets, not to softirqs.
 546 */
 547
 548enum
 549{
 550        HI_SOFTIRQ=0,
 551        TIMER_SOFTIRQ,
 552        NET_TX_SOFTIRQ,
 553        NET_RX_SOFTIRQ,
 554        BLOCK_SOFTIRQ,
 555        IRQ_POLL_SOFTIRQ,
 556        TASKLET_SOFTIRQ,
 557        SCHED_SOFTIRQ,
 558        HRTIMER_SOFTIRQ,
 559        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 560
 561        NR_SOFTIRQS
 562};
 563
 564/*
 565 * The following vectors can be safely ignored after ksoftirqd is parked:
 566 *
 567 * _ RCU:
 568 *      1) rcutree_migrate_callbacks() migrates the queue.
 569 *      2) rcu_report_dead() reports the final quiescent states.
 570 *
 571 * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
 572 */
 573#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
 574
 575/* map softirq index to softirq name. update 'softirq_to_name' in
 576 * kernel/softirq.c when adding a new softirq.
 577 */
 578extern const char * const softirq_to_name[NR_SOFTIRQS];
 579
 580/* softirq mask and active fields moved to irq_cpustat_t in
 581 * asm/hardirq.h to get better cache usage.  KAO
 582 */
 583
 584struct softirq_action
 585{
 586        void    (*action)(struct softirq_action *);
 587};
 588
 589asmlinkage void do_softirq(void);
 590asmlinkage void __do_softirq(void);
 591
 592#ifdef CONFIG_PREEMPT_RT
 593extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
 594#else
 595static inline void do_softirq_post_smp_call_flush(unsigned int unused)
 596{
 597        do_softirq();
 598}
 599#endif
 600
 601extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 602extern void softirq_init(void);
 603extern void __raise_softirq_irqoff(unsigned int nr);
 604
 605extern void raise_softirq_irqoff(unsigned int nr);
 606extern void raise_softirq(unsigned int nr);
 607
 608DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
 609
 610static inline struct task_struct *this_cpu_ksoftirqd(void)
 611{
 612        return this_cpu_read(ksoftirqd);
 613}
 614
 615/* Tasklets --- multithreaded analogue of BHs.
 616
 617   This API is deprecated. Please consider using threaded IRQs instead:
 618   https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
 619
 620   Main feature differing them of generic softirqs: tasklet
 621   is running only on one CPU simultaneously.
 622
 623   Main feature differing them of BHs: different tasklets
 624   may be run simultaneously on different CPUs.
 625
 626   Properties:
 627   * If tasklet_schedule() is called, then tasklet is guaranteed
 628     to be executed on some cpu at least once after this.
 629   * If the tasklet is already scheduled, but its execution is still not
 630     started, it will be executed only once.
 631   * If this tasklet is already running on another CPU (or schedule is called
 632     from tasklet itself), it is rescheduled for later.
 633   * Tasklet is strictly serialized wrt itself, but not
 634     wrt another tasklets. If client needs some intertask synchronization,
 635     he makes it with spinlocks.
 636 */
 637
 638struct tasklet_struct
 639{
 640        struct tasklet_struct *next;
 641        unsigned long state;
 642        atomic_t count;
 643        bool use_callback;
 644        union {
 645                void (*func)(unsigned long data);
 646                void (*callback)(struct tasklet_struct *t);
 647        };
 648        unsigned long data;
 649};
 650
 651#define DECLARE_TASKLET(name, _callback)                \
 652struct tasklet_struct name = {                          \
 653        .count = ATOMIC_INIT(0),                        \
 654        .callback = _callback,                          \
 655        .use_callback = true,                           \
 656}
 657
 658#define DECLARE_TASKLET_DISABLED(name, _callback)       \
 659struct tasklet_struct name = {                          \
 660        .count = ATOMIC_INIT(1),                        \
 661        .callback = _callback,                          \
 662        .use_callback = true,                           \
 663}
 664
 665#define from_tasklet(var, callback_tasklet, tasklet_fieldname)  \
 666        container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
 667
 668#define DECLARE_TASKLET_OLD(name, _func)                \
 669struct tasklet_struct name = {                          \
 670        .count = ATOMIC_INIT(0),                        \
 671        .func = _func,                                  \
 672}
 673
 674#define DECLARE_TASKLET_DISABLED_OLD(name, _func)       \
 675struct tasklet_struct name = {                          \
 676        .count = ATOMIC_INIT(1),                        \
 677        .func = _func,                                  \
 678}
 679
 680enum
 681{
 682        TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
 683        TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
 684};
 685
 686#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 687static inline int tasklet_trylock(struct tasklet_struct *t)
 688{
 689        return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
 690}
 691
 692void tasklet_unlock(struct tasklet_struct *t);
 693void tasklet_unlock_wait(struct tasklet_struct *t);
 694void tasklet_unlock_spin_wait(struct tasklet_struct *t);
 695
 696#else
 697static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
 698static inline void tasklet_unlock(struct tasklet_struct *t) { }
 699static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
 700static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
 701#endif
 702
 703extern void __tasklet_schedule(struct tasklet_struct *t);
 704
 705static inline void tasklet_schedule(struct tasklet_struct *t)
 706{
 707        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
 708                __tasklet_schedule(t);
 709}
 710
 711extern void __tasklet_hi_schedule(struct tasklet_struct *t);
 712
 713static inline void tasklet_hi_schedule(struct tasklet_struct *t)
 714{
 715        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
 716                __tasklet_hi_schedule(t);
 717}
 718
 719static inline void tasklet_disable_nosync(struct tasklet_struct *t)
 720{
 721        atomic_inc(&t->count);
 722        smp_mb__after_atomic();
 723}
 724
 725/*
 726 * Do not use in new code. Disabling tasklets from atomic contexts is
 727 * error prone and should be avoided.
 728 */
 729static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
 730{
 731        tasklet_disable_nosync(t);
 732        tasklet_unlock_spin_wait(t);
 733        smp_mb();
 734}
 735
 736static inline void tasklet_disable(struct tasklet_struct *t)
 737{
 738        tasklet_disable_nosync(t);
 739        tasklet_unlock_wait(t);
 740        smp_mb();
 741}
 742
 743static inline void tasklet_enable(struct tasklet_struct *t)
 744{
 745        smp_mb__before_atomic();
 746        atomic_dec(&t->count);
 747}
 748
 749extern void tasklet_kill(struct tasklet_struct *t);
 750extern void tasklet_init(struct tasklet_struct *t,
 751                         void (*func)(unsigned long), unsigned long data);
 752extern void tasklet_setup(struct tasklet_struct *t,
 753                          void (*callback)(struct tasklet_struct *));
 754
 755/*
 756 * Autoprobing for irqs:
 757 *
 758 * probe_irq_on() and probe_irq_off() provide robust primitives
 759 * for accurate IRQ probing during kernel initialization.  They are
 760 * reasonably simple to use, are not "fooled" by spurious interrupts,
 761 * and, unlike other attempts at IRQ probing, they do not get hung on
 762 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
 763 *
 764 * For reasonably foolproof probing, use them as follows:
 765 *
 766 * 1. clear and/or mask the device's internal interrupt.
 767 * 2. sti();
 768 * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
 769 * 4. enable the device and cause it to trigger an interrupt.
 770 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
 771 * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
 772 * 7. service the device to clear its pending interrupt.
 773 * 8. loop again if paranoia is required.
 774 *
 775 * probe_irq_on() returns a mask of allocated irq's.
 776 *
 777 * probe_irq_off() takes the mask as a parameter,
 778 * and returns the irq number which occurred,
 779 * or zero if none occurred, or a negative irq number
 780 * if more than one irq occurred.
 781 */
 782
 783#if !defined(CONFIG_GENERIC_IRQ_PROBE) 
 784static inline unsigned long probe_irq_on(void)
 785{
 786        return 0;
 787}
 788static inline int probe_irq_off(unsigned long val)
 789{
 790        return 0;
 791}
 792static inline unsigned int probe_irq_mask(unsigned long val)
 793{
 794        return 0;
 795}
 796#else
 797extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
 798extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
 799extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
 800#endif
 801
 802#ifdef CONFIG_PROC_FS
 803/* Initialize /proc/irq/ */
 804extern void init_irq_proc(void);
 805#else
 806static inline void init_irq_proc(void)
 807{
 808}
 809#endif
 810
 811#ifdef CONFIG_IRQ_TIMINGS
 812void irq_timings_enable(void);
 813void irq_timings_disable(void);
 814u64 irq_timings_next_event(u64 now);
 815#endif
 816
 817struct seq_file;
 818int show_interrupts(struct seq_file *p, void *v);
 819int arch_show_interrupts(struct seq_file *p, int prec);
 820
 821extern int early_irq_init(void);
 822extern int arch_probe_nr_irqs(void);
 823extern int arch_early_irq_init(void);
 824
 825/*
 826 * We want to know which function is an entrypoint of a hardirq or a softirq.
 827 */
 828#ifndef __irq_entry
 829# define __irq_entry     __section(".irqentry.text")
 830#endif
 831
 832#define __softirq_entry  __section(".softirqentry.text")
 833
 834#endif
 835