linux/drivers/crypto/qat/qat_common/adf_isr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/kernel.h>
   4#include <linux/init.h>
   5#include <linux/types.h>
   6#include <linux/pci.h>
   7#include <linux/slab.h>
   8#include <linux/errno.h>
   9#include <linux/interrupt.h>
  10#include "adf_accel_devices.h"
  11#include "adf_common_drv.h"
  12#include "adf_cfg.h"
  13#include "adf_cfg_strings.h"
  14#include "adf_cfg_common.h"
  15#include "adf_transport_access_macros.h"
  16#include "adf_transport_internal.h"
  17
  18static int adf_enable_msix(struct adf_accel_dev *accel_dev)
  19{
  20        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
  21        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
  22        u32 msix_num_entries = 1;
  23
  24        if (hw_data->set_msix_rttable)
  25                hw_data->set_msix_rttable(accel_dev);
  26
  27        /* If SR-IOV is disabled, add entries for each bank */
  28        if (!accel_dev->pf.vf_info) {
  29                int i;
  30
  31                msix_num_entries += hw_data->num_banks;
  32                for (i = 0; i < msix_num_entries; i++)
  33                        pci_dev_info->msix_entries.entries[i].entry = i;
  34        } else {
  35                pci_dev_info->msix_entries.entries[0].entry =
  36                        hw_data->num_banks;
  37        }
  38
  39        if (pci_enable_msix_exact(pci_dev_info->pci_dev,
  40                                  pci_dev_info->msix_entries.entries,
  41                                  msix_num_entries)) {
  42                dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
  43                return -EFAULT;
  44        }
  45        return 0;
  46}
  47
  48static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
  49{
  50        pci_disable_msix(pci_dev_info->pci_dev);
  51}
  52
  53static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
  54{
  55        struct adf_etr_bank_data *bank = bank_ptr;
  56        struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
  57
  58        csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
  59                                            0);
  60        tasklet_hi_schedule(&bank->resp_handler);
  61        return IRQ_HANDLED;
  62}
  63
  64static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
  65{
  66        struct adf_accel_dev *accel_dev = dev_ptr;
  67
  68#ifdef CONFIG_PCI_IOV
  69        /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
  70        if (accel_dev->pf.vf_info) {
  71                struct adf_hw_device_data *hw_data = accel_dev->hw_device;
  72                struct adf_bar *pmisc =
  73                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
  74                void __iomem *pmisc_bar_addr = pmisc->virt_addr;
  75                u32 vf_mask;
  76
  77                /* Get the interrupt sources triggered by VFs */
  78                vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
  79                            0x0000FFFF) << 16) |
  80                          ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
  81                            0x01FFFE00) >> 9);
  82
  83                if (vf_mask) {
  84                        struct adf_accel_vf_info *vf_info;
  85                        bool irq_handled = false;
  86                        int i;
  87
  88                        /* Disable VF2PF interrupts for VFs with pending ints */
  89                        adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
  90
  91                        /*
  92                         * Schedule tasklets to handle VF2PF interrupt BHs
  93                         * unless the VF is malicious and is attempting to
  94                         * flood the host OS with VF2PF interrupts.
  95                         */
  96                        for_each_set_bit(i, (const unsigned long *)&vf_mask,
  97                                         (sizeof(vf_mask) * BITS_PER_BYTE)) {
  98                                vf_info = accel_dev->pf.vf_info + i;
  99
 100                                if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
 101                                        dev_info(&GET_DEV(accel_dev),
 102                                                 "Too many ints from VF%d\n",
 103                                                  vf_info->vf_nr + 1);
 104                                        continue;
 105                                }
 106
 107                                /* Tasklet will re-enable ints from this VF */
 108                                tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
 109                                irq_handled = true;
 110                        }
 111
 112                        if (irq_handled)
 113                                return IRQ_HANDLED;
 114                }
 115        }
 116#endif /* CONFIG_PCI_IOV */
 117
 118        dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
 119                accel_dev->accel_id);
 120
 121        return IRQ_NONE;
 122}
 123
 124static int adf_request_irqs(struct adf_accel_dev *accel_dev)
 125{
 126        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
 127        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 128        struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 129        struct adf_etr_data *etr_data = accel_dev->transport;
 130        int ret, i = 0;
 131        char *name;
 132
 133        /* Request msix irq for all banks unless SR-IOV enabled */
 134        if (!accel_dev->pf.vf_info) {
 135                for (i = 0; i < hw_data->num_banks; i++) {
 136                        struct adf_etr_bank_data *bank = &etr_data->banks[i];
 137                        unsigned int cpu, cpus = num_online_cpus();
 138
 139                        name = *(pci_dev_info->msix_entries.names + i);
 140                        snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
 141                                 "qat%d-bundle%d", accel_dev->accel_id, i);
 142                        ret = request_irq(msixe[i].vector,
 143                                          adf_msix_isr_bundle, 0, name, bank);
 144                        if (ret) {
 145                                dev_err(&GET_DEV(accel_dev),
 146                                        "failed to enable irq %d for %s\n",
 147                                        msixe[i].vector, name);
 148                                return ret;
 149                        }
 150
 151                        cpu = ((accel_dev->accel_id * hw_data->num_banks) +
 152                               i) % cpus;
 153                        irq_set_affinity_hint(msixe[i].vector,
 154                                              get_cpu_mask(cpu));
 155                }
 156        }
 157
 158        /* Request msix irq for AE */
 159        name = *(pci_dev_info->msix_entries.names + i);
 160        snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
 161                 "qat%d-ae-cluster", accel_dev->accel_id);
 162        ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
 163        if (ret) {
 164                dev_err(&GET_DEV(accel_dev),
 165                        "failed to enable irq %d, for %s\n",
 166                        msixe[i].vector, name);
 167                return ret;
 168        }
 169        return ret;
 170}
 171
 172static void adf_free_irqs(struct adf_accel_dev *accel_dev)
 173{
 174        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
 175        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 176        struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 177        struct adf_etr_data *etr_data = accel_dev->transport;
 178        int i = 0;
 179
 180        if (pci_dev_info->msix_entries.num_entries > 1) {
 181                for (i = 0; i < hw_data->num_banks; i++) {
 182                        irq_set_affinity_hint(msixe[i].vector, NULL);
 183                        free_irq(msixe[i].vector, &etr_data->banks[i]);
 184                }
 185        }
 186        irq_set_affinity_hint(msixe[i].vector, NULL);
 187        free_irq(msixe[i].vector, accel_dev);
 188}
 189
 190static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
 191{
 192        int i;
 193        char **names;
 194        struct msix_entry *entries;
 195        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 196        u32 msix_num_entries = 1;
 197
 198        /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
 199        if (!accel_dev->pf.vf_info)
 200                msix_num_entries += hw_data->num_banks;
 201
 202        entries = kcalloc_node(msix_num_entries, sizeof(*entries),
 203                               GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
 204        if (!entries)
 205                return -ENOMEM;
 206
 207        names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
 208        if (!names) {
 209                kfree(entries);
 210                return -ENOMEM;
 211        }
 212        for (i = 0; i < msix_num_entries; i++) {
 213                *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
 214                if (!(*(names + i)))
 215                        goto err;
 216        }
 217        accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
 218        accel_dev->accel_pci_dev.msix_entries.entries = entries;
 219        accel_dev->accel_pci_dev.msix_entries.names = names;
 220        return 0;
 221err:
 222        for (i = 0; i < msix_num_entries; i++)
 223                kfree(*(names + i));
 224        kfree(entries);
 225        kfree(names);
 226        return -ENOMEM;
 227}
 228
 229static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
 230{
 231        char **names = accel_dev->accel_pci_dev.msix_entries.names;
 232        int i;
 233
 234        kfree(accel_dev->accel_pci_dev.msix_entries.entries);
 235        for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
 236                kfree(*(names + i));
 237        kfree(names);
 238}
 239
 240static int adf_setup_bh(struct adf_accel_dev *accel_dev)
 241{
 242        struct adf_etr_data *priv_data = accel_dev->transport;
 243        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 244        int i;
 245
 246        for (i = 0; i < hw_data->num_banks; i++)
 247                tasklet_init(&priv_data->banks[i].resp_handler,
 248                             adf_response_handler,
 249                             (unsigned long)&priv_data->banks[i]);
 250        return 0;
 251}
 252
 253static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
 254{
 255        struct adf_etr_data *priv_data = accel_dev->transport;
 256        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 257        int i;
 258
 259        for (i = 0; i < hw_data->num_banks; i++) {
 260                tasklet_disable(&priv_data->banks[i].resp_handler);
 261                tasklet_kill(&priv_data->banks[i].resp_handler);
 262        }
 263}
 264
 265/**
 266 * adf_isr_resource_free() - Free IRQ for acceleration device
 267 * @accel_dev:  Pointer to acceleration device.
 268 *
 269 * Function frees interrupts for acceleration device.
 270 */
 271void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
 272{
 273        adf_free_irqs(accel_dev);
 274        adf_cleanup_bh(accel_dev);
 275        adf_disable_msix(&accel_dev->accel_pci_dev);
 276        adf_isr_free_msix_entry_table(accel_dev);
 277}
 278EXPORT_SYMBOL_GPL(adf_isr_resource_free);
 279
 280/**
 281 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
 282 * @accel_dev:  Pointer to acceleration device.
 283 *
 284 * Function allocates interrupts for acceleration device.
 285 *
 286 * Return: 0 on success, error code otherwise.
 287 */
 288int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 289{
 290        int ret;
 291
 292        ret = adf_isr_alloc_msix_entry_table(accel_dev);
 293        if (ret)
 294                goto err_out;
 295
 296        ret = adf_enable_msix(accel_dev);
 297        if (ret)
 298                goto err_free_msix_table;
 299
 300        ret = adf_setup_bh(accel_dev);
 301        if (ret)
 302                goto err_disable_msix;
 303
 304        ret = adf_request_irqs(accel_dev);
 305        if (ret)
 306                goto err_cleanup_bh;
 307
 308        return 0;
 309
 310err_cleanup_bh:
 311        adf_cleanup_bh(accel_dev);
 312
 313err_disable_msix:
 314        adf_disable_msix(&accel_dev->accel_pci_dev);
 315
 316err_free_msix_table:
 317        adf_isr_free_msix_entry_table(accel_dev);
 318
 319err_out:
 320        return ret;
 321}
 322EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
 323