linux/drivers/dma/ioat/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel I/OAT DMA Linux driver
   4 * Copyright(c) 2004 - 2015 Intel Corporation.
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/pci.h>
  11#include <linux/interrupt.h>
  12#include <linux/dmaengine.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/workqueue.h>
  16#include <linux/prefetch.h>
  17#include <linux/dca.h>
  18#include <linux/aer.h>
  19#include <linux/sizes.h>
  20#include "dma.h"
  21#include "registers.h"
  22#include "hw.h"
  23
  24#include "../dmaengine.h"
  25
  26MODULE_VERSION(IOAT_DMA_VERSION);
  27MODULE_LICENSE("Dual BSD/GPL");
  28MODULE_AUTHOR("Intel Corporation");
  29
  30static const struct pci_device_id ioat_pci_tbl[] = {
  31        /* I/OAT v3 platforms */
  32        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
  33        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
  34        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
  35        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
  36        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
  37        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
  38        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
  39        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
  40
  41        /* I/OAT v3.2 platforms */
  42        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
  43        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
  44        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
  45        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
  46        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
  47        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
  48        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
  49        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
  50        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
  51        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
  52
  53        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
  54        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
  55        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
  56        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
  57        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
  58        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
  59        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
  60        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
  61        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
  62        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
  63
  64        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
  65        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
  66        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
  67        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
  68        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
  69        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
  70        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
  71        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
  72        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
  73        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
  74
  75        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
  76        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
  77        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
  78        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
  79        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
  80        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
  81        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
  82        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
  83        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
  84        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
  85
  86        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
  87        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
  88        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
  89        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
  90        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
  91        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
  92        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
  93        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
  94        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
  95        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
  96
  97        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
  98
  99        /* I/OAT v3.3 platforms */
 100        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
 101        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
 102        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
 103        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
 104
 105        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
 106        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
 107        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
 108        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
 109
 110        /* I/OAT v3.4 platforms */
 111        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
 112
 113        { 0, }
 114};
 115MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
 116
 117static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 118static void ioat_remove(struct pci_dev *pdev);
 119static void
 120ioat_init_channel(struct ioatdma_device *ioat_dma,
 121                  struct ioatdma_chan *ioat_chan, int idx);
 122static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
 123static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
 124static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
 125
 126static int ioat_dca_enabled = 1;
 127module_param(ioat_dca_enabled, int, 0644);
 128MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
 129int ioat_pending_level = 7;
 130module_param(ioat_pending_level, int, 0644);
 131MODULE_PARM_DESC(ioat_pending_level,
 132                 "high-water mark for pushing ioat descriptors (default: 7)");
 133static char ioat_interrupt_style[32] = "msix";
 134module_param_string(ioat_interrupt_style, ioat_interrupt_style,
 135                    sizeof(ioat_interrupt_style), 0644);
 136MODULE_PARM_DESC(ioat_interrupt_style,
 137                 "set ioat interrupt style: msix (default), msi, intx");
 138
 139struct kmem_cache *ioat_cache;
 140struct kmem_cache *ioat_sed_cache;
 141
 142static bool is_jf_ioat(struct pci_dev *pdev)
 143{
 144        switch (pdev->device) {
 145        case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
 146        case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
 147        case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
 148        case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
 149        case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
 150        case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
 151        case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
 152        case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
 153        case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
 154        case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
 155                return true;
 156        default:
 157                return false;
 158        }
 159}
 160
 161static bool is_snb_ioat(struct pci_dev *pdev)
 162{
 163        switch (pdev->device) {
 164        case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
 165        case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
 166        case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
 167        case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
 168        case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
 169        case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
 170        case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
 171        case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
 172        case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
 173        case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
 174                return true;
 175        default:
 176                return false;
 177        }
 178}
 179
 180static bool is_ivb_ioat(struct pci_dev *pdev)
 181{
 182        switch (pdev->device) {
 183        case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
 184        case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
 185        case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
 186        case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
 187        case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
 188        case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
 189        case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
 190        case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
 191        case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
 192        case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
 193                return true;
 194        default:
 195                return false;
 196        }
 197
 198}
 199
 200static bool is_hsw_ioat(struct pci_dev *pdev)
 201{
 202        switch (pdev->device) {
 203        case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
 204        case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
 205        case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
 206        case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
 207        case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
 208        case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
 209        case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
 210        case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
 211        case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
 212        case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
 213                return true;
 214        default:
 215                return false;
 216        }
 217
 218}
 219
 220static bool is_bdx_ioat(struct pci_dev *pdev)
 221{
 222        switch (pdev->device) {
 223        case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
 224        case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
 225        case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
 226        case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
 227        case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
 228        case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
 229        case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
 230        case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
 231        case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
 232        case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
 233                return true;
 234        default:
 235                return false;
 236        }
 237}
 238
 239static inline bool is_skx_ioat(struct pci_dev *pdev)
 240{
 241        return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
 242}
 243
 244static bool is_xeon_cb32(struct pci_dev *pdev)
 245{
 246        return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
 247                is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
 248}
 249
 250bool is_bwd_ioat(struct pci_dev *pdev)
 251{
 252        switch (pdev->device) {
 253        case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
 254        case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
 255        case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
 256        case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
 257        /* even though not Atom, BDX-DE has same DMA silicon */
 258        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
 259        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
 260        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
 261        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
 262                return true;
 263        default:
 264                return false;
 265        }
 266}
 267
 268static bool is_bwd_noraid(struct pci_dev *pdev)
 269{
 270        switch (pdev->device) {
 271        case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
 272        case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
 273        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
 274        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
 275        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
 276        case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
 277                return true;
 278        default:
 279                return false;
 280        }
 281
 282}
 283
 284/*
 285 * Perform a IOAT transaction to verify the HW works.
 286 */
 287#define IOAT_TEST_SIZE 2000
 288
 289static void ioat_dma_test_callback(void *dma_async_param)
 290{
 291        struct completion *cmp = dma_async_param;
 292
 293        complete(cmp);
 294}
 295
 296/**
 297 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 298 * @ioat_dma: dma device to be tested
 299 */
 300static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
 301{
 302        int i;
 303        u8 *src;
 304        u8 *dest;
 305        struct dma_device *dma = &ioat_dma->dma_dev;
 306        struct device *dev = &ioat_dma->pdev->dev;
 307        struct dma_chan *dma_chan;
 308        struct dma_async_tx_descriptor *tx;
 309        dma_addr_t dma_dest, dma_src;
 310        dma_cookie_t cookie;
 311        int err = 0;
 312        struct completion cmp;
 313        unsigned long tmo;
 314        unsigned long flags;
 315
 316        src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
 317        if (!src)
 318                return -ENOMEM;
 319        dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
 320        if (!dest) {
 321                kfree(src);
 322                return -ENOMEM;
 323        }
 324
 325        /* Fill in src buffer */
 326        for (i = 0; i < IOAT_TEST_SIZE; i++)
 327                src[i] = (u8)i;
 328
 329        /* Start copy, using first DMA channel */
 330        dma_chan = container_of(dma->channels.next, struct dma_chan,
 331                                device_node);
 332        if (dma->device_alloc_chan_resources(dma_chan) < 1) {
 333                dev_err(dev, "selftest cannot allocate chan resource\n");
 334                err = -ENODEV;
 335                goto out;
 336        }
 337
 338        dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 339        if (dma_mapping_error(dev, dma_src)) {
 340                dev_err(dev, "mapping src buffer failed\n");
 341                err = -ENOMEM;
 342                goto free_resources;
 343        }
 344        dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
 345        if (dma_mapping_error(dev, dma_dest)) {
 346                dev_err(dev, "mapping dest buffer failed\n");
 347                err = -ENOMEM;
 348                goto unmap_src;
 349        }
 350        flags = DMA_PREP_INTERRUPT;
 351        tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
 352                                                      dma_src, IOAT_TEST_SIZE,
 353                                                      flags);
 354        if (!tx) {
 355                dev_err(dev, "Self-test prep failed, disabling\n");
 356                err = -ENODEV;
 357                goto unmap_dma;
 358        }
 359
 360        async_tx_ack(tx);
 361        init_completion(&cmp);
 362        tx->callback = ioat_dma_test_callback;
 363        tx->callback_param = &cmp;
 364        cookie = tx->tx_submit(tx);
 365        if (cookie < 0) {
 366                dev_err(dev, "Self-test setup failed, disabling\n");
 367                err = -ENODEV;
 368                goto unmap_dma;
 369        }
 370        dma->device_issue_pending(dma_chan);
 371
 372        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 373
 374        if (tmo == 0 ||
 375            dma->device_tx_status(dma_chan, cookie, NULL)
 376                                        != DMA_COMPLETE) {
 377                dev_err(dev, "Self-test copy timed out, disabling\n");
 378                err = -ENODEV;
 379                goto unmap_dma;
 380        }
 381        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
 382                dev_err(dev, "Self-test copy failed compare, disabling\n");
 383                err = -ENODEV;
 384                goto unmap_dma;
 385        }
 386
 387unmap_dma:
 388        dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
 389unmap_src:
 390        dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 391free_resources:
 392        dma->device_free_chan_resources(dma_chan);
 393out:
 394        kfree(src);
 395        kfree(dest);
 396        return err;
 397}
 398
 399/**
 400 * ioat_dma_setup_interrupts - setup interrupt handler
 401 * @ioat_dma: ioat dma device
 402 */
 403int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
 404{
 405        struct ioatdma_chan *ioat_chan;
 406        struct pci_dev *pdev = ioat_dma->pdev;
 407        struct device *dev = &pdev->dev;
 408        struct msix_entry *msix;
 409        int i, j, msixcnt;
 410        int err = -EINVAL;
 411        u8 intrctrl = 0;
 412
 413        if (!strcmp(ioat_interrupt_style, "msix"))
 414                goto msix;
 415        if (!strcmp(ioat_interrupt_style, "msi"))
 416                goto msi;
 417        if (!strcmp(ioat_interrupt_style, "intx"))
 418                goto intx;
 419        dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
 420        goto err_no_irq;
 421
 422msix:
 423        /* The number of MSI-X vectors should equal the number of channels */
 424        msixcnt = ioat_dma->dma_dev.chancnt;
 425        for (i = 0; i < msixcnt; i++)
 426                ioat_dma->msix_entries[i].entry = i;
 427
 428        err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
 429        if (err)
 430                goto msi;
 431
 432        for (i = 0; i < msixcnt; i++) {
 433                msix = &ioat_dma->msix_entries[i];
 434                ioat_chan = ioat_chan_by_index(ioat_dma, i);
 435                err = devm_request_irq(dev, msix->vector,
 436                                       ioat_dma_do_interrupt_msix, 0,
 437                                       "ioat-msix", ioat_chan);
 438                if (err) {
 439                        for (j = 0; j < i; j++) {
 440                                msix = &ioat_dma->msix_entries[j];
 441                                ioat_chan = ioat_chan_by_index(ioat_dma, j);
 442                                devm_free_irq(dev, msix->vector, ioat_chan);
 443                        }
 444                        goto msi;
 445                }
 446        }
 447        intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
 448        ioat_dma->irq_mode = IOAT_MSIX;
 449        goto done;
 450
 451msi:
 452        err = pci_enable_msi(pdev);
 453        if (err)
 454                goto intx;
 455
 456        err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
 457                               "ioat-msi", ioat_dma);
 458        if (err) {
 459                pci_disable_msi(pdev);
 460                goto intx;
 461        }
 462        ioat_dma->irq_mode = IOAT_MSI;
 463        goto done;
 464
 465intx:
 466        err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
 467                               IRQF_SHARED, "ioat-intx", ioat_dma);
 468        if (err)
 469                goto err_no_irq;
 470
 471        ioat_dma->irq_mode = IOAT_INTX;
 472done:
 473        if (is_bwd_ioat(pdev))
 474                ioat_intr_quirk(ioat_dma);
 475        intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
 476        writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
 477        return 0;
 478
 479err_no_irq:
 480        /* Disable all interrupt generation */
 481        writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
 482        ioat_dma->irq_mode = IOAT_NOIRQ;
 483        dev_err(dev, "no usable interrupts\n");
 484        return err;
 485}
 486
 487static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
 488{
 489        /* Disable all interrupt generation */
 490        writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
 491}
 492
 493static int ioat_probe(struct ioatdma_device *ioat_dma)
 494{
 495        int err = -ENODEV;
 496        struct dma_device *dma = &ioat_dma->dma_dev;
 497        struct pci_dev *pdev = ioat_dma->pdev;
 498        struct device *dev = &pdev->dev;
 499
 500        ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
 501                                                    sizeof(u64),
 502                                                    SMP_CACHE_BYTES,
 503                                                    SMP_CACHE_BYTES);
 504
 505        if (!ioat_dma->completion_pool) {
 506                err = -ENOMEM;
 507                goto err_out;
 508        }
 509
 510        ioat_enumerate_channels(ioat_dma);
 511
 512        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 513        dma->dev = &pdev->dev;
 514
 515        if (!dma->chancnt) {
 516                dev_err(dev, "channel enumeration error\n");
 517                goto err_setup_interrupts;
 518        }
 519
 520        err = ioat_dma_setup_interrupts(ioat_dma);
 521        if (err)
 522                goto err_setup_interrupts;
 523
 524        err = ioat3_dma_self_test(ioat_dma);
 525        if (err)
 526                goto err_self_test;
 527
 528        return 0;
 529
 530err_self_test:
 531        ioat_disable_interrupts(ioat_dma);
 532err_setup_interrupts:
 533        dma_pool_destroy(ioat_dma->completion_pool);
 534err_out:
 535        return err;
 536}
 537
 538static int ioat_register(struct ioatdma_device *ioat_dma)
 539{
 540        int err = dma_async_device_register(&ioat_dma->dma_dev);
 541
 542        if (err) {
 543                ioat_disable_interrupts(ioat_dma);
 544                dma_pool_destroy(ioat_dma->completion_pool);
 545        }
 546
 547        return err;
 548}
 549
 550static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
 551{
 552        struct dma_device *dma = &ioat_dma->dma_dev;
 553
 554        ioat_disable_interrupts(ioat_dma);
 555
 556        ioat_kobject_del(ioat_dma);
 557
 558        dma_async_device_unregister(dma);
 559}
 560
 561/**
 562 * ioat_enumerate_channels - find and initialize the device's channels
 563 * @ioat_dma: the ioat dma device to be enumerated
 564 */
 565static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
 566{
 567        struct ioatdma_chan *ioat_chan;
 568        struct device *dev = &ioat_dma->pdev->dev;
 569        struct dma_device *dma = &ioat_dma->dma_dev;
 570        u8 xfercap_log;
 571        int i;
 572
 573        INIT_LIST_HEAD(&dma->channels);
 574        dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
 575        dma->chancnt &= 0x1f; /* bits [4:0] valid */
 576        if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
 577                dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
 578                         dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
 579                dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
 580        }
 581        xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
 582        xfercap_log &= 0x1f; /* bits [4:0] valid */
 583        if (xfercap_log == 0)
 584                return;
 585        dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
 586
 587        for (i = 0; i < dma->chancnt; i++) {
 588                ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
 589                if (!ioat_chan)
 590                        break;
 591
 592                ioat_init_channel(ioat_dma, ioat_chan, i);
 593                ioat_chan->xfercap_log = xfercap_log;
 594                spin_lock_init(&ioat_chan->prep_lock);
 595                if (ioat_reset_hw(ioat_chan)) {
 596                        i = 0;
 597                        break;
 598                }
 599        }
 600        dma->chancnt = i;
 601}
 602
 603/**
 604 * ioat_free_chan_resources - release all the descriptors
 605 * @c: the channel to be cleaned
 606 */
 607static void ioat_free_chan_resources(struct dma_chan *c)
 608{
 609        struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
 610        struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
 611        struct ioat_ring_ent *desc;
 612        const int total_descs = 1 << ioat_chan->alloc_order;
 613        int descs;
 614        int i;
 615
 616        /* Before freeing channel resources first check
 617         * if they have been previously allocated for this channel.
 618         */
 619        if (!ioat_chan->ring)
 620                return;
 621
 622        ioat_stop(ioat_chan);
 623
 624        if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
 625                ioat_reset_hw(ioat_chan);
 626
 627                /* Put LTR to idle */
 628                if (ioat_dma->version >= IOAT_VER_3_4)
 629                        writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
 630                               ioat_chan->reg_base +
 631                               IOAT_CHAN_LTR_SWSEL_OFFSET);
 632        }
 633
 634        spin_lock_bh(&ioat_chan->cleanup_lock);
 635        spin_lock_bh(&ioat_chan->prep_lock);
 636        descs = ioat_ring_space(ioat_chan);
 637        dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
 638        for (i = 0; i < descs; i++) {
 639                desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
 640                ioat_free_ring_ent(desc, c);
 641        }
 642
 643        if (descs < total_descs)
 644                dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
 645                        total_descs - descs);
 646
 647        for (i = 0; i < total_descs - descs; i++) {
 648                desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
 649                dump_desc_dbg(ioat_chan, desc);
 650                ioat_free_ring_ent(desc, c);
 651        }
 652
 653        for (i = 0; i < ioat_chan->desc_chunks; i++) {
 654                dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
 655                                  ioat_chan->descs[i].virt,
 656                                  ioat_chan->descs[i].hw);
 657                ioat_chan->descs[i].virt = NULL;
 658                ioat_chan->descs[i].hw = 0;
 659        }
 660        ioat_chan->desc_chunks = 0;
 661
 662        kfree(ioat_chan->ring);
 663        ioat_chan->ring = NULL;
 664        ioat_chan->alloc_order = 0;
 665        dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
 666                      ioat_chan->completion_dma);
 667        spin_unlock_bh(&ioat_chan->prep_lock);
 668        spin_unlock_bh(&ioat_chan->cleanup_lock);
 669
 670        ioat_chan->last_completion = 0;
 671        ioat_chan->completion_dma = 0;
 672        ioat_chan->dmacount = 0;
 673}
 674
 675/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
 676 * @chan: channel to be initialized
 677 */
 678static int ioat_alloc_chan_resources(struct dma_chan *c)
 679{
 680        struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
 681        struct ioat_ring_ent **ring;
 682        u64 status;
 683        int order;
 684        int i = 0;
 685        u32 chanerr;
 686
 687        /* have we already been set up? */
 688        if (ioat_chan->ring)
 689                return 1 << ioat_chan->alloc_order;
 690
 691        /* Setup register to interrupt and write completion status on error */
 692        writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
 693
 694        /* allocate a completion writeback area */
 695        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 696        ioat_chan->completion =
 697                dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
 698                                GFP_NOWAIT, &ioat_chan->completion_dma);
 699        if (!ioat_chan->completion)
 700                return -ENOMEM;
 701
 702        writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
 703               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
 704        writel(((u64)ioat_chan->completion_dma) >> 32,
 705               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 706
 707        order = IOAT_MAX_ORDER;
 708        ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
 709        if (!ring)
 710                return -ENOMEM;
 711
 712        spin_lock_bh(&ioat_chan->cleanup_lock);
 713        spin_lock_bh(&ioat_chan->prep_lock);
 714        ioat_chan->ring = ring;
 715        ioat_chan->head = 0;
 716        ioat_chan->issued = 0;
 717        ioat_chan->tail = 0;
 718        ioat_chan->alloc_order = order;
 719        set_bit(IOAT_RUN, &ioat_chan->state);
 720        spin_unlock_bh(&ioat_chan->prep_lock);
 721        spin_unlock_bh(&ioat_chan->cleanup_lock);
 722
 723        /* Setting up LTR values for 3.4 or later */
 724        if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
 725                u32 lat_val;
 726
 727                lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
 728                        IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
 729                        IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
 730                writel(lat_val, ioat_chan->reg_base +
 731                                IOAT_CHAN_LTR_ACTIVE_OFFSET);
 732
 733                lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
 734                          IOAT_CHAN_LTR_IDLE_SNLATSCALE |
 735                          IOAT_CHAN_LTR_IDLE_SNREQMNT;
 736                writel(lat_val, ioat_chan->reg_base +
 737                                IOAT_CHAN_LTR_IDLE_OFFSET);
 738
 739                /* Select to active */
 740                writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
 741                       ioat_chan->reg_base +
 742                       IOAT_CHAN_LTR_SWSEL_OFFSET);
 743        }
 744
 745        ioat_start_null_desc(ioat_chan);
 746
 747        /* check that we got off the ground */
 748        do {
 749                udelay(1);
 750                status = ioat_chansts(ioat_chan);
 751        } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
 752
 753        if (is_ioat_active(status) || is_ioat_idle(status))
 754                return 1 << ioat_chan->alloc_order;
 755
 756        chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 757
 758        dev_WARN(to_dev(ioat_chan),
 759                 "failed to start channel chanerr: %#x\n", chanerr);
 760        ioat_free_chan_resources(c);
 761        return -EFAULT;
 762}
 763
 764/* common channel initialization */
 765static void
 766ioat_init_channel(struct ioatdma_device *ioat_dma,
 767                  struct ioatdma_chan *ioat_chan, int idx)
 768{
 769        struct dma_device *dma = &ioat_dma->dma_dev;
 770        struct dma_chan *c = &ioat_chan->dma_chan;
 771        unsigned long data = (unsigned long) c;
 772
 773        ioat_chan->ioat_dma = ioat_dma;
 774        ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
 775        spin_lock_init(&ioat_chan->cleanup_lock);
 776        ioat_chan->dma_chan.device = dma;
 777        dma_cookie_init(&ioat_chan->dma_chan);
 778        list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
 779        ioat_dma->idx[idx] = ioat_chan;
 780        timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
 781        tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
 782}
 783
 784#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
 785static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
 786{
 787        int i, src_idx;
 788        struct page *dest;
 789        struct page *xor_srcs[IOAT_NUM_SRC_TEST];
 790        struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
 791        dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
 792        dma_addr_t dest_dma;
 793        struct dma_async_tx_descriptor *tx;
 794        struct dma_chan *dma_chan;
 795        dma_cookie_t cookie;
 796        u8 cmp_byte = 0;
 797        u32 cmp_word;
 798        u32 xor_val_result;
 799        int err = 0;
 800        struct completion cmp;
 801        unsigned long tmo;
 802        struct device *dev = &ioat_dma->pdev->dev;
 803        struct dma_device *dma = &ioat_dma->dma_dev;
 804        u8 op = 0;
 805
 806        dev_dbg(dev, "%s\n", __func__);
 807
 808        if (!dma_has_cap(DMA_XOR, dma->cap_mask))
 809                return 0;
 810
 811        for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
 812                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 813                if (!xor_srcs[src_idx]) {
 814                        while (src_idx--)
 815                                __free_page(xor_srcs[src_idx]);
 816                        return -ENOMEM;
 817                }
 818        }
 819
 820        dest = alloc_page(GFP_KERNEL);
 821        if (!dest) {
 822                while (src_idx--)
 823                        __free_page(xor_srcs[src_idx]);
 824                return -ENOMEM;
 825        }
 826
 827        /* Fill in src buffers */
 828        for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
 829                u8 *ptr = page_address(xor_srcs[src_idx]);
 830
 831                for (i = 0; i < PAGE_SIZE; i++)
 832                        ptr[i] = (1 << src_idx);
 833        }
 834
 835        for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
 836                cmp_byte ^= (u8) (1 << src_idx);
 837
 838        cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 839                        (cmp_byte << 8) | cmp_byte;
 840
 841        memset(page_address(dest), 0, PAGE_SIZE);
 842
 843        dma_chan = container_of(dma->channels.next, struct dma_chan,
 844                                device_node);
 845        if (dma->device_alloc_chan_resources(dma_chan) < 1) {
 846                err = -ENODEV;
 847                goto out;
 848        }
 849
 850        /* test xor */
 851        op = IOAT_OP_XOR;
 852
 853        dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 854        if (dma_mapping_error(dev, dest_dma)) {
 855                err = -ENOMEM;
 856                goto free_resources;
 857        }
 858
 859        for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
 860                dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
 861                                           DMA_TO_DEVICE);
 862                if (dma_mapping_error(dev, dma_srcs[i])) {
 863                        err = -ENOMEM;
 864                        goto dma_unmap;
 865                }
 866        }
 867        tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 868                                      IOAT_NUM_SRC_TEST, PAGE_SIZE,
 869                                      DMA_PREP_INTERRUPT);
 870
 871        if (!tx) {
 872                dev_err(dev, "Self-test xor prep failed\n");
 873                err = -ENODEV;
 874                goto dma_unmap;
 875        }
 876
 877        async_tx_ack(tx);
 878        init_completion(&cmp);
 879        tx->callback = ioat_dma_test_callback;
 880        tx->callback_param = &cmp;
 881        cookie = tx->tx_submit(tx);
 882        if (cookie < 0) {
 883                dev_err(dev, "Self-test xor setup failed\n");
 884                err = -ENODEV;
 885                goto dma_unmap;
 886        }
 887        dma->device_issue_pending(dma_chan);
 888
 889        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 890
 891        if (tmo == 0 ||
 892            dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
 893                dev_err(dev, "Self-test xor timed out\n");
 894                err = -ENODEV;
 895                goto dma_unmap;
 896        }
 897
 898        for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
 899                dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
 900
 901        dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 902        for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 903                u32 *ptr = page_address(dest);
 904
 905                if (ptr[i] != cmp_word) {
 906                        dev_err(dev, "Self-test xor failed compare\n");
 907                        err = -ENODEV;
 908                        goto free_resources;
 909                }
 910        }
 911        dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 912
 913        dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 914
 915        /* skip validate if the capability is not present */
 916        if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
 917                goto free_resources;
 918
 919        op = IOAT_OP_XOR_VAL;
 920
 921        /* validate the sources with the destintation page */
 922        for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
 923                xor_val_srcs[i] = xor_srcs[i];
 924        xor_val_srcs[i] = dest;
 925
 926        xor_val_result = 1;
 927
 928        for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
 929                dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
 930                                           DMA_TO_DEVICE);
 931                if (dma_mapping_error(dev, dma_srcs[i])) {
 932                        err = -ENOMEM;
 933                        goto dma_unmap;
 934                }
 935        }
 936        tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
 937                                          IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
 938                                          &xor_val_result, DMA_PREP_INTERRUPT);
 939        if (!tx) {
 940                dev_err(dev, "Self-test zero prep failed\n");
 941                err = -ENODEV;
 942                goto dma_unmap;
 943        }
 944
 945        async_tx_ack(tx);
 946        init_completion(&cmp);
 947        tx->callback = ioat_dma_test_callback;
 948        tx->callback_param = &cmp;
 949        cookie = tx->tx_submit(tx);
 950        if (cookie < 0) {
 951                dev_err(dev, "Self-test zero setup failed\n");
 952                err = -ENODEV;
 953                goto dma_unmap;
 954        }
 955        dma->device_issue_pending(dma_chan);
 956
 957        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 958
 959        if (tmo == 0 ||
 960            dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
 961                dev_err(dev, "Self-test validate timed out\n");
 962                err = -ENODEV;
 963                goto dma_unmap;
 964        }
 965
 966        for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
 967                dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
 968
 969        if (xor_val_result != 0) {
 970                dev_err(dev, "Self-test validate failed compare\n");
 971                err = -ENODEV;
 972                goto free_resources;
 973        }
 974
 975        memset(page_address(dest), 0, PAGE_SIZE);
 976
 977        /* test for non-zero parity sum */
 978        op = IOAT_OP_XOR_VAL;
 979
 980        xor_val_result = 0;
 981        for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
 982                dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
 983                                           DMA_TO_DEVICE);
 984                if (dma_mapping_error(dev, dma_srcs[i])) {
 985                        err = -ENOMEM;
 986                        goto dma_unmap;
 987                }
 988        }
 989        tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
 990                                          IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
 991                                          &xor_val_result, DMA_PREP_INTERRUPT);
 992        if (!tx) {
 993                dev_err(dev, "Self-test 2nd zero prep failed\n");
 994                err = -ENODEV;
 995                goto dma_unmap;
 996        }
 997
 998        async_tx_ack(tx);
 999        init_completion(&cmp);
1000        tx->callback = ioat_dma_test_callback;
1001        tx->callback_param = &cmp;
1002        cookie = tx->tx_submit(tx);
1003        if (cookie < 0) {
1004                dev_err(dev, "Self-test  2nd zero setup failed\n");
1005                err = -ENODEV;
1006                goto dma_unmap;
1007        }
1008        dma->device_issue_pending(dma_chan);
1009
1010        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1011
1012        if (tmo == 0 ||
1013            dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1014                dev_err(dev, "Self-test 2nd validate timed out\n");
1015                err = -ENODEV;
1016                goto dma_unmap;
1017        }
1018
1019        if (xor_val_result != SUM_CHECK_P_RESULT) {
1020                dev_err(dev, "Self-test validate failed compare\n");
1021                err = -ENODEV;
1022                goto dma_unmap;
1023        }
1024
1025        for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1026                dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1027
1028        goto free_resources;
1029dma_unmap:
1030        if (op == IOAT_OP_XOR) {
1031                while (--i >= 0)
1032                        dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1033                                       DMA_TO_DEVICE);
1034                dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1035        } else if (op == IOAT_OP_XOR_VAL) {
1036                while (--i >= 0)
1037                        dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1038                                       DMA_TO_DEVICE);
1039        }
1040free_resources:
1041        dma->device_free_chan_resources(dma_chan);
1042out:
1043        src_idx = IOAT_NUM_SRC_TEST;
1044        while (src_idx--)
1045                __free_page(xor_srcs[src_idx]);
1046        __free_page(dest);
1047        return err;
1048}
1049
1050static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1051{
1052        int rc;
1053
1054        rc = ioat_dma_self_test(ioat_dma);
1055        if (rc)
1056                return rc;
1057
1058        rc = ioat_xor_val_self_test(ioat_dma);
1059
1060        return rc;
1061}
1062
1063static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1064{
1065        struct dma_device *dma;
1066        struct dma_chan *c;
1067        struct ioatdma_chan *ioat_chan;
1068        u32 errmask;
1069
1070        dma = &ioat_dma->dma_dev;
1071
1072        /*
1073         * if we have descriptor write back error status, we mask the
1074         * error interrupts
1075         */
1076        if (ioat_dma->cap & IOAT_CAP_DWBES) {
1077                list_for_each_entry(c, &dma->channels, device_node) {
1078                        ioat_chan = to_ioat_chan(c);
1079                        errmask = readl(ioat_chan->reg_base +
1080                                        IOAT_CHANERR_MASK_OFFSET);
1081                        errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1082                                   IOAT_CHANERR_XOR_Q_ERR;
1083                        writel(errmask, ioat_chan->reg_base +
1084                                        IOAT_CHANERR_MASK_OFFSET);
1085                }
1086        }
1087}
1088
1089static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1090{
1091        struct pci_dev *pdev = ioat_dma->pdev;
1092        int dca_en = system_has_dca_enabled(pdev);
1093        struct dma_device *dma;
1094        struct dma_chan *c;
1095        struct ioatdma_chan *ioat_chan;
1096        int err;
1097        u16 val16;
1098
1099        dma = &ioat_dma->dma_dev;
1100        dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1101        dma->device_issue_pending = ioat_issue_pending;
1102        dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1103        dma->device_free_chan_resources = ioat_free_chan_resources;
1104
1105        dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1106        dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1107
1108        ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1109
1110        if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1111                ioat_dma->cap &=
1112                        ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1113
1114        /* dca is incompatible with raid operations */
1115        if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1116                ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1117
1118        if (ioat_dma->cap & IOAT_CAP_XOR) {
1119                dma->max_xor = 8;
1120
1121                dma_cap_set(DMA_XOR, dma->cap_mask);
1122                dma->device_prep_dma_xor = ioat_prep_xor;
1123
1124                dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1125                dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1126        }
1127
1128        if (ioat_dma->cap & IOAT_CAP_PQ) {
1129
1130                dma->device_prep_dma_pq = ioat_prep_pq;
1131                dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1132                dma_cap_set(DMA_PQ, dma->cap_mask);
1133                dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1134
1135                if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1136                        dma_set_maxpq(dma, 16, 0);
1137                else
1138                        dma_set_maxpq(dma, 8, 0);
1139
1140                if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1141                        dma->device_prep_dma_xor = ioat_prep_pqxor;
1142                        dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1143                        dma_cap_set(DMA_XOR, dma->cap_mask);
1144                        dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1145
1146                        if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1147                                dma->max_xor = 16;
1148                        else
1149                                dma->max_xor = 8;
1150                }
1151        }
1152
1153        dma->device_tx_status = ioat_tx_status;
1154
1155        /* starting with CB3.3 super extended descriptors are supported */
1156        if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1157                char pool_name[14];
1158                int i;
1159
1160                for (i = 0; i < MAX_SED_POOLS; i++) {
1161                        snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1162
1163                        /* allocate SED DMA pool */
1164                        ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1165                                        &pdev->dev,
1166                                        SED_SIZE * (i + 1), 64, 0);
1167                        if (!ioat_dma->sed_hw_pool[i])
1168                                return -ENOMEM;
1169
1170                }
1171        }
1172
1173        if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1174                dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1175
1176        err = ioat_probe(ioat_dma);
1177        if (err)
1178                return err;
1179
1180        list_for_each_entry(c, &dma->channels, device_node) {
1181                ioat_chan = to_ioat_chan(c);
1182                writel(IOAT_DMA_DCA_ANY_CPU,
1183                       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1184        }
1185
1186        err = ioat_register(ioat_dma);
1187        if (err)
1188                return err;
1189
1190        ioat_kobject_add(ioat_dma, &ioat_ktype);
1191
1192        if (dca)
1193                ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1194
1195        /* disable relaxed ordering */
1196        err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
1197        if (err)
1198                return pcibios_err_to_errno(err);
1199
1200        /* clear relaxed ordering enable */
1201        val16 &= ~IOAT_DEVCTRL_ROE;
1202        err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
1203        if (err)
1204                return pcibios_err_to_errno(err);
1205
1206        if (ioat_dma->cap & IOAT_CAP_DPS)
1207                writeb(ioat_pending_level + 1,
1208                       ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
1209
1210        return 0;
1211}
1212
1213static void ioat_shutdown(struct pci_dev *pdev)
1214{
1215        struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1216        struct ioatdma_chan *ioat_chan;
1217        int i;
1218
1219        if (!ioat_dma)
1220                return;
1221
1222        for (i = 0; i < IOAT_MAX_CHANS; i++) {
1223                ioat_chan = ioat_dma->idx[i];
1224                if (!ioat_chan)
1225                        continue;
1226
1227                spin_lock_bh(&ioat_chan->prep_lock);
1228                set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1229                spin_unlock_bh(&ioat_chan->prep_lock);
1230                /*
1231                 * Synchronization rule for del_timer_sync():
1232                 *  - The caller must not hold locks which would prevent
1233                 *    completion of the timer's handler.
1234                 * So prep_lock cannot be held before calling it.
1235                 */
1236                del_timer_sync(&ioat_chan->timer);
1237
1238                /* this should quiesce then reset */
1239                ioat_reset_hw(ioat_chan);
1240        }
1241
1242        ioat_disable_interrupts(ioat_dma);
1243}
1244
1245static void ioat_resume(struct ioatdma_device *ioat_dma)
1246{
1247        struct ioatdma_chan *ioat_chan;
1248        u32 chanerr;
1249        int i;
1250
1251        for (i = 0; i < IOAT_MAX_CHANS; i++) {
1252                ioat_chan = ioat_dma->idx[i];
1253                if (!ioat_chan)
1254                        continue;
1255
1256                spin_lock_bh(&ioat_chan->prep_lock);
1257                clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1258                spin_unlock_bh(&ioat_chan->prep_lock);
1259
1260                chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1261                writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1262
1263                /* no need to reset as shutdown already did that */
1264        }
1265}
1266
1267#define DRV_NAME "ioatdma"
1268
1269static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1270                                                 pci_channel_state_t error)
1271{
1272        dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1273
1274        /* quiesce and block I/O */
1275        ioat_shutdown(pdev);
1276
1277        return PCI_ERS_RESULT_NEED_RESET;
1278}
1279
1280static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1281{
1282        pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1283
1284        dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1285
1286        if (pci_enable_device_mem(pdev) < 0) {
1287                dev_err(&pdev->dev,
1288                        "Failed to enable PCIe device after reset.\n");
1289                result = PCI_ERS_RESULT_DISCONNECT;
1290        } else {
1291                pci_set_master(pdev);
1292                pci_restore_state(pdev);
1293                pci_save_state(pdev);
1294                pci_wake_from_d3(pdev, false);
1295        }
1296
1297        return result;
1298}
1299
1300static void ioat_pcie_error_resume(struct pci_dev *pdev)
1301{
1302        struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1303
1304        dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1305
1306        /* initialize and bring everything back */
1307        ioat_resume(ioat_dma);
1308}
1309
1310static const struct pci_error_handlers ioat_err_handler = {
1311        .error_detected = ioat_pcie_error_detected,
1312        .slot_reset = ioat_pcie_error_slot_reset,
1313        .resume = ioat_pcie_error_resume,
1314};
1315
1316static struct pci_driver ioat_pci_driver = {
1317        .name           = DRV_NAME,
1318        .id_table       = ioat_pci_tbl,
1319        .probe          = ioat_pci_probe,
1320        .remove         = ioat_remove,
1321        .shutdown       = ioat_shutdown,
1322        .err_handler    = &ioat_err_handler,
1323};
1324
1325static void release_ioatdma(struct dma_device *device)
1326{
1327        struct ioatdma_device *d = to_ioatdma_device(device);
1328        int i;
1329
1330        for (i = 0; i < IOAT_MAX_CHANS; i++)
1331                kfree(d->idx[i]);
1332
1333        dma_pool_destroy(d->completion_pool);
1334        kfree(d);
1335}
1336
1337static struct ioatdma_device *
1338alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1339{
1340        struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
1341
1342        if (!d)
1343                return NULL;
1344        d->pdev = pdev;
1345        d->reg_base = iobase;
1346        d->dma_dev.device_release = release_ioatdma;
1347        return d;
1348}
1349
1350static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1351{
1352        void __iomem * const *iomap;
1353        struct device *dev = &pdev->dev;
1354        struct ioatdma_device *device;
1355        int err;
1356
1357        err = pcim_enable_device(pdev);
1358        if (err)
1359                return err;
1360
1361        err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1362        if (err)
1363                return err;
1364        iomap = pcim_iomap_table(pdev);
1365        if (!iomap)
1366                return -ENOMEM;
1367
1368        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1369        if (err)
1370                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1371        if (err)
1372                return err;
1373
1374        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1375        if (err)
1376                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1377        if (err)
1378                return err;
1379
1380        device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1381        if (!device)
1382                return -ENOMEM;
1383        pci_set_master(pdev);
1384        pci_set_drvdata(pdev, device);
1385
1386        device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1387        if (device->version >= IOAT_VER_3_4)
1388                ioat_dca_enabled = 0;
1389        if (device->version >= IOAT_VER_3_0) {
1390                if (is_skx_ioat(pdev))
1391                        device->version = IOAT_VER_3_2;
1392                err = ioat3_dma_probe(device, ioat_dca_enabled);
1393
1394                if (device->version >= IOAT_VER_3_3)
1395                        pci_enable_pcie_error_reporting(pdev);
1396        } else
1397                return -ENODEV;
1398
1399        if (err) {
1400                dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1401                pci_disable_pcie_error_reporting(pdev);
1402                return -ENODEV;
1403        }
1404
1405        return 0;
1406}
1407
1408static void ioat_remove(struct pci_dev *pdev)
1409{
1410        struct ioatdma_device *device = pci_get_drvdata(pdev);
1411
1412        if (!device)
1413                return;
1414
1415        ioat_shutdown(pdev);
1416
1417        dev_err(&pdev->dev, "Removing dma and dca services\n");
1418        if (device->dca) {
1419                unregister_dca_provider(device->dca, &pdev->dev);
1420                free_dca_provider(device->dca);
1421                device->dca = NULL;
1422        }
1423
1424        pci_disable_pcie_error_reporting(pdev);
1425        ioat_dma_remove(device);
1426}
1427
1428static int __init ioat_init_module(void)
1429{
1430        int err = -ENOMEM;
1431
1432        pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1433                DRV_NAME, IOAT_DMA_VERSION);
1434
1435        ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1436                                        0, SLAB_HWCACHE_ALIGN, NULL);
1437        if (!ioat_cache)
1438                return -ENOMEM;
1439
1440        ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1441        if (!ioat_sed_cache)
1442                goto err_ioat_cache;
1443
1444        err = pci_register_driver(&ioat_pci_driver);
1445        if (err)
1446                goto err_ioat3_cache;
1447
1448        return 0;
1449
1450 err_ioat3_cache:
1451        kmem_cache_destroy(ioat_sed_cache);
1452
1453 err_ioat_cache:
1454        kmem_cache_destroy(ioat_cache);
1455
1456        return err;
1457}
1458module_init(ioat_init_module);
1459
1460static void __exit ioat_exit_module(void)
1461{
1462        pci_unregister_driver(&ioat_pci_driver);
1463        kmem_cache_destroy(ioat_cache);
1464}
1465module_exit(ioat_exit_module);
1466