linux/lib/irq_poll.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to interrupt-poll handling in the block layer. This
   4 * is similar to NAPI for network devices.
   5 */
   6#include <linux/kernel.h>
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/bio.h>
  10#include <linux/interrupt.h>
  11#include <linux/cpu.h>
  12#include <linux/irq_poll.h>
  13#include <linux/delay.h>
  14
  15static unsigned int irq_poll_budget __read_mostly = 256;
  16
  17static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
  18
  19/**
  20 * irq_poll_sched - Schedule a run of the iopoll handler
  21 * @iop:      The parent iopoll structure
  22 *
  23 * Description:
  24 *     Add this irq_poll structure to the pending poll list and trigger the
  25 *     raise of the blk iopoll softirq.
  26 **/
  27void irq_poll_sched(struct irq_poll *iop)
  28{
  29        unsigned long flags;
  30
  31        if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
  32                return;
  33        if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
  34                return;
  35
  36        local_irq_save(flags);
  37        list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
  38        __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
  39        local_irq_restore(flags);
  40}
  41EXPORT_SYMBOL(irq_poll_sched);
  42
  43/**
  44 * __irq_poll_complete - Mark this @iop as un-polled again
  45 * @iop:      The parent iopoll structure
  46 *
  47 * Description:
  48 *     See irq_poll_complete(). This function must be called with interrupts
  49 *     disabled.
  50 **/
  51static void __irq_poll_complete(struct irq_poll *iop)
  52{
  53        list_del(&iop->list);
  54        smp_mb__before_atomic();
  55        clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
  56}
  57
  58/**
  59 * irq_poll_complete - Mark this @iop as un-polled again
  60 * @iop:      The parent iopoll structure
  61 *
  62 * Description:
  63 *     If a driver consumes less than the assigned budget in its run of the
  64 *     iopoll handler, it'll end the polled mode by calling this function. The
  65 *     iopoll handler will not be invoked again before irq_poll_sched()
  66 *     is called.
  67 **/
  68void irq_poll_complete(struct irq_poll *iop)
  69{
  70        unsigned long flags;
  71
  72        local_irq_save(flags);
  73        __irq_poll_complete(iop);
  74        local_irq_restore(flags);
  75}
  76EXPORT_SYMBOL(irq_poll_complete);
  77
  78static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
  79{
  80        struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
  81        int rearm = 0, budget = irq_poll_budget;
  82        unsigned long start_time = jiffies;
  83
  84        local_irq_disable();
  85
  86        while (!list_empty(list)) {
  87                struct irq_poll *iop;
  88                int work, weight;
  89
  90                /*
  91                 * If softirq window is exhausted then punt.
  92                 */
  93                if (budget <= 0 || time_after(jiffies, start_time)) {
  94                        rearm = 1;
  95                        break;
  96                }
  97
  98                local_irq_enable();
  99
 100                /* Even though interrupts have been re-enabled, this
 101                 * access is safe because interrupts can only add new
 102                 * entries to the tail of this list, and only ->poll()
 103                 * calls can remove this head entry from the list.
 104                 */
 105                iop = list_entry(list->next, struct irq_poll, list);
 106
 107                weight = iop->weight;
 108                work = 0;
 109                if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
 110                        work = iop->poll(iop, weight);
 111
 112                budget -= work;
 113
 114                local_irq_disable();
 115
 116                /*
 117                 * Drivers must not modify the iopoll state, if they
 118                 * consume their assigned weight (or more, some drivers can't
 119                 * easily just stop processing, they have to complete an
 120                 * entire mask of commands).In such cases this code
 121                 * still "owns" the iopoll instance and therefore can
 122                 * move the instance around on the list at-will.
 123                 */
 124                if (work >= weight) {
 125                        if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
 126                                __irq_poll_complete(iop);
 127                        else
 128                                list_move_tail(&iop->list, list);
 129                }
 130        }
 131
 132        if (rearm)
 133                __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
 134
 135        local_irq_enable();
 136}
 137
 138/**
 139 * irq_poll_disable - Disable iopoll on this @iop
 140 * @iop:      The parent iopoll structure
 141 *
 142 * Description:
 143 *     Disable io polling and wait for any pending callbacks to have completed.
 144 **/
 145void irq_poll_disable(struct irq_poll *iop)
 146{
 147        set_bit(IRQ_POLL_F_DISABLE, &iop->state);
 148        while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
 149                msleep(1);
 150        clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
 151}
 152EXPORT_SYMBOL(irq_poll_disable);
 153
 154/**
 155 * irq_poll_enable - Enable iopoll on this @iop
 156 * @iop:      The parent iopoll structure
 157 *
 158 * Description:
 159 *     Enable iopoll on this @iop. Note that the handler run will not be
 160 *     scheduled, it will only mark it as active.
 161 **/
 162void irq_poll_enable(struct irq_poll *iop)
 163{
 164        BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
 165        smp_mb__before_atomic();
 166        clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
 167}
 168EXPORT_SYMBOL(irq_poll_enable);
 169
 170/**
 171 * irq_poll_init - Initialize this @iop
 172 * @iop:      The parent iopoll structure
 173 * @weight:   The default weight (or command completion budget)
 174 * @poll_fn:  The handler to invoke
 175 *
 176 * Description:
 177 *     Initialize and enable this irq_poll structure.
 178 **/
 179void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
 180{
 181        memset(iop, 0, sizeof(*iop));
 182        INIT_LIST_HEAD(&iop->list);
 183        iop->weight = weight;
 184        iop->poll = poll_fn;
 185}
 186EXPORT_SYMBOL(irq_poll_init);
 187
 188static int irq_poll_cpu_dead(unsigned int cpu)
 189{
 190        /*
 191         * If a CPU goes away, splice its entries to the current CPU
 192         * and trigger a run of the softirq
 193         */
 194        local_irq_disable();
 195        list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
 196                         this_cpu_ptr(&blk_cpu_iopoll));
 197        __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
 198        local_irq_enable();
 199
 200        return 0;
 201}
 202
 203static __init int irq_poll_setup(void)
 204{
 205        int i;
 206
 207        for_each_possible_cpu(i)
 208                INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
 209
 210        open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
 211        cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
 212                                  irq_poll_cpu_dead);
 213        return 0;
 214}
 215subsys_initcall(irq_poll_setup);
 216