linux/drivers/crypto/ccp/ccp-pci.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel.h>
  16#include <linux/device.h>
  17#include <linux/pci.h>
  18#include <linux/pci_ids.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/kthread.h>
  21#include <linux/sched.h>
  22#include <linux/interrupt.h>
  23#include <linux/spinlock.h>
  24#include <linux/delay.h>
  25#include <linux/ccp.h>
  26
  27#include "ccp-dev.h"
  28
  29#define MSIX_VECTORS                    2
  30
  31struct ccp_msix {
  32        u32 vector;
  33        char name[16];
  34};
  35
  36struct ccp_pci {
  37        int msix_count;
  38        struct ccp_msix msix[MSIX_VECTORS];
  39};
  40
  41static int ccp_get_msix_irqs(struct ccp_device *ccp)
  42{
  43        struct ccp_pci *ccp_pci = ccp->dev_specific;
  44        struct device *dev = ccp->dev;
  45        struct pci_dev *pdev = to_pci_dev(dev);
  46        struct msix_entry msix_entry[MSIX_VECTORS];
  47        unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
  48        int v, ret;
  49
  50        for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
  51                msix_entry[v].entry = v;
  52
  53        ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
  54        if (ret < 0)
  55                return ret;
  56
  57        ccp_pci->msix_count = ret;
  58        for (v = 0; v < ccp_pci->msix_count; v++) {
  59                /* Set the interrupt names and request the irqs */
  60                snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
  61                         ccp->name, v);
  62                ccp_pci->msix[v].vector = msix_entry[v].vector;
  63                ret = request_irq(ccp_pci->msix[v].vector,
  64                                  ccp->vdata->perform->irqhandler,
  65                                  0, ccp_pci->msix[v].name, dev);
  66                if (ret) {
  67                        dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
  68                                   ret);
  69                        goto e_irq;
  70                }
  71        }
  72
  73        return 0;
  74
  75e_irq:
  76        while (v--)
  77                free_irq(ccp_pci->msix[v].vector, dev);
  78
  79        pci_disable_msix(pdev);
  80
  81        ccp_pci->msix_count = 0;
  82
  83        return ret;
  84}
  85
  86static int ccp_get_msi_irq(struct ccp_device *ccp)
  87{
  88        struct device *dev = ccp->dev;
  89        struct pci_dev *pdev = to_pci_dev(dev);
  90        int ret;
  91
  92        ret = pci_enable_msi(pdev);
  93        if (ret)
  94                return ret;
  95
  96        ccp->irq = pdev->irq;
  97        ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
  98                          ccp->name, dev);
  99        if (ret) {
 100                dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
 101                goto e_msi;
 102        }
 103
 104        return 0;
 105
 106e_msi:
 107        pci_disable_msi(pdev);
 108
 109        return ret;
 110}
 111
 112static int ccp_get_irqs(struct ccp_device *ccp)
 113{
 114        struct device *dev = ccp->dev;
 115        int ret;
 116
 117        ret = ccp_get_msix_irqs(ccp);
 118        if (!ret)
 119                return 0;
 120
 121        /* Couldn't get MSI-X vectors, try MSI */
 122        dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
 123        ret = ccp_get_msi_irq(ccp);
 124        if (!ret)
 125                return 0;
 126
 127        /* Couldn't get MSI interrupt */
 128        dev_notice(dev, "could not enable MSI (%d)\n", ret);
 129
 130        return ret;
 131}
 132
 133static void ccp_free_irqs(struct ccp_device *ccp)
 134{
 135        struct ccp_pci *ccp_pci = ccp->dev_specific;
 136        struct device *dev = ccp->dev;
 137        struct pci_dev *pdev = to_pci_dev(dev);
 138
 139        if (ccp_pci->msix_count) {
 140                while (ccp_pci->msix_count--)
 141                        free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
 142                                 dev);
 143                pci_disable_msix(pdev);
 144        } else if (ccp->irq) {
 145                free_irq(ccp->irq, dev);
 146                pci_disable_msi(pdev);
 147        }
 148        ccp->irq = 0;
 149}
 150
 151static int ccp_find_mmio_area(struct ccp_device *ccp)
 152{
 153        struct device *dev = ccp->dev;
 154        struct pci_dev *pdev = to_pci_dev(dev);
 155        resource_size_t io_len;
 156        unsigned long io_flags;
 157
 158        io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
 159        io_len = pci_resource_len(pdev, ccp->vdata->bar);
 160        if ((io_flags & IORESOURCE_MEM) &&
 161            (io_len >= (ccp->vdata->offset + 0x800)))
 162                return ccp->vdata->bar;
 163
 164        return -EIO;
 165}
 166
 167static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 168{
 169        struct ccp_device *ccp;
 170        struct ccp_pci *ccp_pci;
 171        struct device *dev = &pdev->dev;
 172        unsigned int bar;
 173        int ret;
 174
 175        ret = -ENOMEM;
 176        ccp = ccp_alloc_struct(dev);
 177        if (!ccp)
 178                goto e_err;
 179
 180        ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
 181        if (!ccp_pci)
 182                goto e_err;
 183
 184        ccp->dev_specific = ccp_pci;
 185        ccp->vdata = (struct ccp_vdata *)id->driver_data;
 186        if (!ccp->vdata || !ccp->vdata->version) {
 187                ret = -ENODEV;
 188                dev_err(dev, "missing driver data\n");
 189                goto e_err;
 190        }
 191        ccp->get_irq = ccp_get_irqs;
 192        ccp->free_irq = ccp_free_irqs;
 193
 194        ret = pci_request_regions(pdev, "ccp");
 195        if (ret) {
 196                dev_err(dev, "pci_request_regions failed (%d)\n", ret);
 197                goto e_err;
 198        }
 199
 200        ret = pci_enable_device(pdev);
 201        if (ret) {
 202                dev_err(dev, "pci_enable_device failed (%d)\n", ret);
 203                goto e_regions;
 204        }
 205
 206        pci_set_master(pdev);
 207
 208        ret = ccp_find_mmio_area(ccp);
 209        if (ret < 0)
 210                goto e_device;
 211        bar = ret;
 212
 213        ret = -EIO;
 214        ccp->io_map = pci_iomap(pdev, bar, 0);
 215        if (!ccp->io_map) {
 216                dev_err(dev, "pci_iomap failed\n");
 217                goto e_device;
 218        }
 219        ccp->io_regs = ccp->io_map + ccp->vdata->offset;
 220
 221        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
 222        if (ret) {
 223                ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 224                if (ret) {
 225                        dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
 226                                ret);
 227                        goto e_iomap;
 228                }
 229        }
 230
 231        dev_set_drvdata(dev, ccp);
 232
 233        if (ccp->vdata->setup)
 234                ccp->vdata->setup(ccp);
 235
 236        ret = ccp->vdata->perform->init(ccp);
 237        if (ret)
 238                goto e_iomap;
 239
 240        dev_notice(dev, "enabled\n");
 241
 242        return 0;
 243
 244e_iomap:
 245        pci_iounmap(pdev, ccp->io_map);
 246
 247e_device:
 248        pci_disable_device(pdev);
 249
 250e_regions:
 251        pci_release_regions(pdev);
 252
 253e_err:
 254        dev_notice(dev, "initialization failed\n");
 255        return ret;
 256}
 257
 258static void ccp_pci_remove(struct pci_dev *pdev)
 259{
 260        struct device *dev = &pdev->dev;
 261        struct ccp_device *ccp = dev_get_drvdata(dev);
 262
 263        if (!ccp)
 264                return;
 265
 266        ccp->vdata->perform->destroy(ccp);
 267
 268        pci_iounmap(pdev, ccp->io_map);
 269
 270        pci_disable_device(pdev);
 271
 272        pci_release_regions(pdev);
 273
 274        dev_notice(dev, "disabled\n");
 275}
 276
 277#ifdef CONFIG_PM
 278static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 279{
 280        struct device *dev = &pdev->dev;
 281        struct ccp_device *ccp = dev_get_drvdata(dev);
 282        unsigned long flags;
 283        unsigned int i;
 284
 285        spin_lock_irqsave(&ccp->cmd_lock, flags);
 286
 287        ccp->suspending = 1;
 288
 289        /* Wake all the queue kthreads to prepare for suspend */
 290        for (i = 0; i < ccp->cmd_q_count; i++)
 291                wake_up_process(ccp->cmd_q[i].kthread);
 292
 293        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 294
 295        /* Wait for all queue kthreads to say they're done */
 296        while (!ccp_queues_suspended(ccp))
 297                wait_event_interruptible(ccp->suspend_queue,
 298                                         ccp_queues_suspended(ccp));
 299
 300        return 0;
 301}
 302
 303static int ccp_pci_resume(struct pci_dev *pdev)
 304{
 305        struct device *dev = &pdev->dev;
 306        struct ccp_device *ccp = dev_get_drvdata(dev);
 307        unsigned long flags;
 308        unsigned int i;
 309
 310        spin_lock_irqsave(&ccp->cmd_lock, flags);
 311
 312        ccp->suspending = 0;
 313
 314        /* Wake up all the kthreads */
 315        for (i = 0; i < ccp->cmd_q_count; i++) {
 316                ccp->cmd_q[i].suspended = 0;
 317                wake_up_process(ccp->cmd_q[i].kthread);
 318        }
 319
 320        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 321
 322        return 0;
 323}
 324#endif
 325
 326static const struct pci_device_id ccp_pci_table[] = {
 327        { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
 328        { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
 329        { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
 330        /* Last entry must be zero */
 331        { 0, }
 332};
 333MODULE_DEVICE_TABLE(pci, ccp_pci_table);
 334
 335static struct pci_driver ccp_pci_driver = {
 336        .name = "ccp",
 337        .id_table = ccp_pci_table,
 338        .probe = ccp_pci_probe,
 339        .remove = ccp_pci_remove,
 340#ifdef CONFIG_PM
 341        .suspend = ccp_pci_suspend,
 342        .resume = ccp_pci_resume,
 343#endif
 344};
 345
 346int ccp_pci_init(void)
 347{
 348        return pci_register_driver(&ccp_pci_driver);
 349}
 350
 351void ccp_pci_exit(void)
 352{
 353        pci_unregister_driver(&ccp_pci_driver);
 354}
 355