linux/drivers/infiniband/hw/hfi1/msix.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2/*
   3 * Copyright(c) 2018 - 2020 Intel Corporation.
   4 */
   5
   6#include "hfi.h"
   7#include "affinity.h"
   8#include "sdma.h"
   9#include "netdev.h"
  10
  11/**
  12 * msix_initialize() - Calculate, request and configure MSIx IRQs
  13 * @dd: valid hfi1 devdata
  14 *
  15 */
  16int msix_initialize(struct hfi1_devdata *dd)
  17{
  18        u32 total;
  19        int ret;
  20        struct hfi1_msix_entry *entries;
  21
  22        /*
  23         * MSIx interrupt count:
  24         *      one for the general, "slow path" interrupt
  25         *      one per used SDMA engine
  26         *      one per kernel receive context
  27         *      one for each VNIC context
  28         *      ...any new IRQs should be added here.
  29         */
  30        total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
  31
  32        if (total >= CCE_NUM_MSIX_VECTORS)
  33                return -EINVAL;
  34
  35        ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
  36        if (ret < 0) {
  37                dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
  38                return ret;
  39        }
  40
  41        entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
  42                          GFP_KERNEL);
  43        if (!entries) {
  44                pci_free_irq_vectors(dd->pcidev);
  45                return -ENOMEM;
  46        }
  47
  48        dd->msix_info.msix_entries = entries;
  49        spin_lock_init(&dd->msix_info.msix_lock);
  50        bitmap_zero(dd->msix_info.in_use_msix, total);
  51        dd->msix_info.max_requested = total;
  52        dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
  53
  54        return 0;
  55}
  56
  57/**
  58 * msix_request_irq() - Allocate a free MSIx IRQ
  59 * @dd: valid devdata
  60 * @arg: context information for the IRQ
  61 * @handler: IRQ handler
  62 * @thread: IRQ thread handler (could be NULL)
  63 * @type: affinty IRQ type
  64 * @name: IRQ name
  65 *
  66 * Allocated an MSIx vector if available, and then create the appropriate
  67 * meta data needed to keep track of the pci IRQ request.
  68 *
  69 * Return:
  70 *   < 0   Error
  71 *   >= 0  MSIx vector
  72 *
  73 */
  74static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
  75                            irq_handler_t handler, irq_handler_t thread,
  76                            enum irq_type type, const char *name)
  77{
  78        unsigned long nr;
  79        int irq;
  80        int ret;
  81        struct hfi1_msix_entry *me;
  82
  83        /* Allocate an MSIx vector */
  84        spin_lock(&dd->msix_info.msix_lock);
  85        nr = find_first_zero_bit(dd->msix_info.in_use_msix,
  86                                 dd->msix_info.max_requested);
  87        if (nr < dd->msix_info.max_requested)
  88                __set_bit(nr, dd->msix_info.in_use_msix);
  89        spin_unlock(&dd->msix_info.msix_lock);
  90
  91        if (nr == dd->msix_info.max_requested)
  92                return -ENOSPC;
  93
  94        if (type < IRQ_SDMA || type >= IRQ_OTHER)
  95                return -EINVAL;
  96
  97        irq = pci_irq_vector(dd->pcidev, nr);
  98        ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
  99        if (ret) {
 100                dd_dev_err(dd,
 101                           "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
 102                           name, irq, nr, ret);
 103                spin_lock(&dd->msix_info.msix_lock);
 104                __clear_bit(nr, dd->msix_info.in_use_msix);
 105                spin_unlock(&dd->msix_info.msix_lock);
 106                return ret;
 107        }
 108
 109        /*
 110         * assign arg after pci_request_irq call, so it will be
 111         * cleaned up
 112         */
 113        me = &dd->msix_info.msix_entries[nr];
 114        me->irq = irq;
 115        me->arg = arg;
 116        me->type = type;
 117
 118        /* This is a request, so a failure is not fatal */
 119        ret = hfi1_get_irq_affinity(dd, me);
 120        if (ret)
 121                dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
 122
 123        return nr;
 124}
 125
 126static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
 127                                       irq_handler_t handler,
 128                                       irq_handler_t thread,
 129                                       const char *name)
 130{
 131        int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
 132                                  rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
 133                                  name);
 134        if (nr < 0)
 135                return nr;
 136
 137        /*
 138         * Set the interrupt register and mask for this
 139         * context's interrupt.
 140         */
 141        rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
 142        rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
 143        rcd->msix_intr = nr;
 144        remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
 145
 146        return 0;
 147}
 148
 149/**
 150 * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
 151 * @rcd: valid rcd context
 152 *
 153 */
 154int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
 155{
 156        char name[MAX_NAME_SIZE];
 157
 158        snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
 159                 rcd->dd->unit, rcd->ctxt);
 160
 161        return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
 162                                           receive_context_thread, name);
 163}
 164
 165/**
 166 * msix_netdev_request_rcd_irq  - Helper function for RCVAVAIL IRQs
 167 * for netdev context
 168 * @rcd: valid netdev contexti
 169 */
 170int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
 171{
 172        char name[MAX_NAME_SIZE];
 173
 174        snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
 175                 rcd->dd->unit, rcd->ctxt);
 176        return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
 177                                           NULL, name);
 178}
 179
 180/**
 181 * msix_request_sdma_irq  - Helper for getting SDMA IRQ resources
 182 * @sde: valid sdma engine
 183 *
 184 */
 185int msix_request_sdma_irq(struct sdma_engine *sde)
 186{
 187        int nr;
 188        char name[MAX_NAME_SIZE];
 189
 190        snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
 191                 sde->dd->unit, sde->this_idx);
 192        nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
 193                              IRQ_SDMA, name);
 194        if (nr < 0)
 195                return nr;
 196        sde->msix_intr = nr;
 197        remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
 198
 199        return 0;
 200}
 201
 202/**
 203 * msix_request_general_irq - Helper for getting general IRQ
 204 * resources
 205 * @dd: valid device data
 206 */
 207int msix_request_general_irq(struct hfi1_devdata *dd)
 208{
 209        int nr;
 210        char name[MAX_NAME_SIZE];
 211
 212        snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
 213        nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
 214                              name);
 215        if (nr < 0)
 216                return nr;
 217
 218        /* general interrupt must be MSIx vector 0 */
 219        if (nr) {
 220                msix_free_irq(dd, (u8)nr);
 221                dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
 222                return -EINVAL;
 223        }
 224
 225        return 0;
 226}
 227
 228/**
 229 * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
 230 * @dd: valid devdata structure
 231 * @i: index of SDMA engine
 232 */
 233static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
 234{
 235        set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
 236        set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
 237                      IS_SDMA_PROGRESS_START + i, true);
 238        set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
 239        set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
 240                      true);
 241}
 242
 243/**
 244 * msix_request_irqs() - Allocate all MSIx IRQs
 245 * @dd: valid devdata structure
 246 *
 247 * Helper function to request the used MSIx IRQs.
 248 *
 249 */
 250int msix_request_irqs(struct hfi1_devdata *dd)
 251{
 252        int i;
 253        int ret = msix_request_general_irq(dd);
 254
 255        if (ret)
 256                return ret;
 257
 258        for (i = 0; i < dd->num_sdma; i++) {
 259                struct sdma_engine *sde = &dd->per_sdma[i];
 260
 261                ret = msix_request_sdma_irq(sde);
 262                if (ret)
 263                        return ret;
 264                enable_sdma_srcs(sde->dd, i);
 265        }
 266
 267        for (i = 0; i < dd->n_krcv_queues; i++) {
 268                struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
 269
 270                if (rcd)
 271                        ret = msix_request_rcd_irq(rcd);
 272                hfi1_rcd_put(rcd);
 273                if (ret)
 274                        return ret;
 275        }
 276
 277        return 0;
 278}
 279
 280/**
 281 * msix_free_irq() - Free the specified MSIx resources and IRQ
 282 * @dd: valid devdata
 283 * @msix_intr: MSIx vector to free.
 284 *
 285 */
 286void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
 287{
 288        struct hfi1_msix_entry *me;
 289
 290        if (msix_intr >= dd->msix_info.max_requested)
 291                return;
 292
 293        me = &dd->msix_info.msix_entries[msix_intr];
 294
 295        if (!me->arg) /* => no irq, no affinity */
 296                return;
 297
 298        hfi1_put_irq_affinity(dd, me);
 299        pci_free_irq(dd->pcidev, msix_intr, me->arg);
 300
 301        me->arg = NULL;
 302
 303        spin_lock(&dd->msix_info.msix_lock);
 304        __clear_bit(msix_intr, dd->msix_info.in_use_msix);
 305        spin_unlock(&dd->msix_info.msix_lock);
 306}
 307
 308/**
 309 * msix_clean_up_interrupts  - Free all MSIx IRQ resources
 310 * @dd: valid device data data structure
 311 *
 312 * Free the MSIx and associated PCI resources, if they have been allocated.
 313 */
 314void msix_clean_up_interrupts(struct hfi1_devdata *dd)
 315{
 316        int i;
 317        struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
 318
 319        /* remove irqs - must happen before disabling/turning off */
 320        for (i = 0; i < dd->msix_info.max_requested; i++, me++)
 321                msix_free_irq(dd, i);
 322
 323        /* clean structures */
 324        kfree(dd->msix_info.msix_entries);
 325        dd->msix_info.msix_entries = NULL;
 326        dd->msix_info.max_requested = 0;
 327
 328        pci_free_irq_vectors(dd->pcidev);
 329}
 330
 331/**
 332 * msix_netdev_synchronize_irq - netdev IRQ synchronize
 333 * @dd: valid devdata
 334 */
 335void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
 336{
 337        int i;
 338        int ctxt_count = hfi1_netdev_ctxt_count(dd);
 339
 340        for (i = 0; i < ctxt_count; i++) {
 341                struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
 342                struct hfi1_msix_entry *me;
 343
 344                me = &dd->msix_info.msix_entries[rcd->msix_intr];
 345
 346                synchronize_irq(me->irq);
 347        }
 348}
 349