linux/drivers/dma/ioat/dma.c
<<
>>
Prefs
   1/*
   2 * Intel I/OAT DMA Linux driver
   3 * Copyright(c) 2004 - 2009 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 */
  22
  23/*
  24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
  25 * copy operations.
  26 */
  27
  28#include <linux/init.h>
  29#include <linux/module.h>
  30#include <linux/slab.h>
  31#include <linux/pci.h>
  32#include <linux/interrupt.h>
  33#include <linux/dmaengine.h>
  34#include <linux/delay.h>
  35#include <linux/dma-mapping.h>
  36#include <linux/workqueue.h>
  37#include <linux/prefetch.h>
  38#include <linux/i7300_idle.h>
  39#include "dma.h"
  40#include "registers.h"
  41#include "hw.h"
  42
  43#include "../dmaengine.h"
  44
  45int ioat_pending_level = 4;
  46module_param(ioat_pending_level, int, 0644);
  47MODULE_PARM_DESC(ioat_pending_level,
  48                 "high-water mark for pushing ioat descriptors (default: 4)");
  49
  50/* internal functions */
  51static void ioat1_cleanup(struct ioat_dma_chan *ioat);
  52static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
  53
  54/**
  55 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
  56 * @irq: interrupt id
  57 * @data: interrupt data
  58 */
  59static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
  60{
  61        struct ioatdma_device *instance = data;
  62        struct ioat_chan_common *chan;
  63        unsigned long attnstatus;
  64        int bit;
  65        u8 intrctrl;
  66
  67        intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
  68
  69        if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
  70                return IRQ_NONE;
  71
  72        if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
  73                writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
  74                return IRQ_NONE;
  75        }
  76
  77        attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
  78        for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
  79                chan = ioat_chan_by_index(instance, bit);
  80                if (test_bit(IOAT_RUN, &chan->state))
  81                        tasklet_schedule(&chan->cleanup_task);
  82        }
  83
  84        writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
  85        return IRQ_HANDLED;
  86}
  87
  88/**
  89 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
  90 * @irq: interrupt id
  91 * @data: interrupt data
  92 */
  93static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
  94{
  95        struct ioat_chan_common *chan = data;
  96
  97        if (test_bit(IOAT_RUN, &chan->state))
  98                tasklet_schedule(&chan->cleanup_task);
  99
 100        return IRQ_HANDLED;
 101}
 102
 103/* common channel initialization */
 104void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
 105{
 106        struct dma_device *dma = &device->common;
 107        struct dma_chan *c = &chan->common;
 108        unsigned long data = (unsigned long) c;
 109
 110        chan->device = device;
 111        chan->reg_base = device->reg_base + (0x80 * (idx + 1));
 112        spin_lock_init(&chan->cleanup_lock);
 113        chan->common.device = dma;
 114        dma_cookie_init(&chan->common);
 115        list_add_tail(&chan->common.device_node, &dma->channels);
 116        device->idx[idx] = chan;
 117        init_timer(&chan->timer);
 118        chan->timer.function = device->timer_fn;
 119        chan->timer.data = data;
 120        tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
 121}
 122
 123/**
 124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
 125 * @device: the device to be enumerated
 126 */
 127static int ioat1_enumerate_channels(struct ioatdma_device *device)
 128{
 129        u8 xfercap_scale;
 130        u32 xfercap;
 131        int i;
 132        struct ioat_dma_chan *ioat;
 133        struct device *dev = &device->pdev->dev;
 134        struct dma_device *dma = &device->common;
 135
 136        INIT_LIST_HEAD(&dma->channels);
 137        dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
 138        dma->chancnt &= 0x1f; /* bits [4:0] valid */
 139        if (dma->chancnt > ARRAY_SIZE(device->idx)) {
 140                dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
 141                         dma->chancnt, ARRAY_SIZE(device->idx));
 142                dma->chancnt = ARRAY_SIZE(device->idx);
 143        }
 144        xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
 145        xfercap_scale &= 0x1f; /* bits [4:0] valid */
 146        xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
 147        dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
 148
 149#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
 150        if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
 151                dma->chancnt--;
 152#endif
 153        for (i = 0; i < dma->chancnt; i++) {
 154                ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
 155                if (!ioat)
 156                        break;
 157
 158                ioat_init_channel(device, &ioat->base, i);
 159                ioat->xfercap = xfercap;
 160                spin_lock_init(&ioat->desc_lock);
 161                INIT_LIST_HEAD(&ioat->free_desc);
 162                INIT_LIST_HEAD(&ioat->used_desc);
 163        }
 164        dma->chancnt = i;
 165        return i;
 166}
 167
 168/**
 169 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 170 *                                 descriptors to hw
 171 * @chan: DMA channel handle
 172 */
 173static inline void
 174__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
 175{
 176        void __iomem *reg_base = ioat->base.reg_base;
 177
 178        dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
 179                __func__, ioat->pending);
 180        ioat->pending = 0;
 181        writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
 182}
 183
 184static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
 185{
 186        struct ioat_dma_chan *ioat = to_ioat_chan(chan);
 187
 188        if (ioat->pending > 0) {
 189                spin_lock_bh(&ioat->desc_lock);
 190                __ioat1_dma_memcpy_issue_pending(ioat);
 191                spin_unlock_bh(&ioat->desc_lock);
 192        }
 193}
 194
 195/**
 196 * ioat1_reset_channel - restart a channel
 197 * @ioat: IOAT DMA channel handle
 198 */
 199static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
 200{
 201        struct ioat_chan_common *chan = &ioat->base;
 202        void __iomem *reg_base = chan->reg_base;
 203        u32 chansts, chanerr;
 204
 205        dev_warn(to_dev(chan), "reset\n");
 206        chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
 207        chansts = *chan->completion & IOAT_CHANSTS_STATUS;
 208        if (chanerr) {
 209                dev_err(to_dev(chan),
 210                        "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
 211                        chan_num(chan), chansts, chanerr);
 212                writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
 213        }
 214
 215        /*
 216         * whack it upside the head with a reset
 217         * and wait for things to settle out.
 218         * force the pending count to a really big negative
 219         * to make sure no one forces an issue_pending
 220         * while we're waiting.
 221         */
 222
 223        ioat->pending = INT_MIN;
 224        writeb(IOAT_CHANCMD_RESET,
 225               reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
 226        set_bit(IOAT_RESET_PENDING, &chan->state);
 227        mod_timer(&chan->timer, jiffies + RESET_DELAY);
 228}
 229
 230static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
 231{
 232        struct dma_chan *c = tx->chan;
 233        struct ioat_dma_chan *ioat = to_ioat_chan(c);
 234        struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
 235        struct ioat_chan_common *chan = &ioat->base;
 236        struct ioat_desc_sw *first;
 237        struct ioat_desc_sw *chain_tail;
 238        dma_cookie_t cookie;
 239
 240        spin_lock_bh(&ioat->desc_lock);
 241        /* cookie incr and addition to used_list must be atomic */
 242        cookie = dma_cookie_assign(tx);
 243        dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
 244
 245        /* write address into NextDescriptor field of last desc in chain */
 246        first = to_ioat_desc(desc->tx_list.next);
 247        chain_tail = to_ioat_desc(ioat->used_desc.prev);
 248        /* make descriptor updates globally visible before chaining */
 249        wmb();
 250        chain_tail->hw->next = first->txd.phys;
 251        list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
 252        dump_desc_dbg(ioat, chain_tail);
 253        dump_desc_dbg(ioat, first);
 254
 255        if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
 256                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 257
 258        ioat->active += desc->hw->tx_cnt;
 259        ioat->pending += desc->hw->tx_cnt;
 260        if (ioat->pending >= ioat_pending_level)
 261                __ioat1_dma_memcpy_issue_pending(ioat);
 262        spin_unlock_bh(&ioat->desc_lock);
 263
 264        return cookie;
 265}
 266
 267/**
 268 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
 269 * @ioat: the channel supplying the memory pool for the descriptors
 270 * @flags: allocation flags
 271 */
 272static struct ioat_desc_sw *
 273ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
 274{
 275        struct ioat_dma_descriptor *desc;
 276        struct ioat_desc_sw *desc_sw;
 277        struct ioatdma_device *ioatdma_device;
 278        dma_addr_t phys;
 279
 280        ioatdma_device = ioat->base.device;
 281        desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
 282        if (unlikely(!desc))
 283                return NULL;
 284
 285        desc_sw = kzalloc(sizeof(*desc_sw), flags);
 286        if (unlikely(!desc_sw)) {
 287                pci_pool_free(ioatdma_device->dma_pool, desc, phys);
 288                return NULL;
 289        }
 290
 291        memset(desc, 0, sizeof(*desc));
 292
 293        INIT_LIST_HEAD(&desc_sw->tx_list);
 294        dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
 295        desc_sw->txd.tx_submit = ioat1_tx_submit;
 296        desc_sw->hw = desc;
 297        desc_sw->txd.phys = phys;
 298        set_desc_id(desc_sw, -1);
 299
 300        return desc_sw;
 301}
 302
 303static int ioat_initial_desc_count = 256;
 304module_param(ioat_initial_desc_count, int, 0644);
 305MODULE_PARM_DESC(ioat_initial_desc_count,
 306                 "ioat1: initial descriptors per channel (default: 256)");
 307/**
 308 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
 309 * @chan: the channel to be filled out
 310 */
 311static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
 312{
 313        struct ioat_dma_chan *ioat = to_ioat_chan(c);
 314        struct ioat_chan_common *chan = &ioat->base;
 315        struct ioat_desc_sw *desc;
 316        u32 chanerr;
 317        int i;
 318        LIST_HEAD(tmp_list);
 319
 320        /* have we already been set up? */
 321        if (!list_empty(&ioat->free_desc))
 322                return ioat->desccount;
 323
 324        /* Setup register to interrupt and write completion status on error */
 325        writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
 326
 327        chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 328        if (chanerr) {
 329                dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
 330                writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
 331        }
 332
 333        /* Allocate descriptors */
 334        for (i = 0; i < ioat_initial_desc_count; i++) {
 335                desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
 336                if (!desc) {
 337                        dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
 338                        break;
 339                }
 340                set_desc_id(desc, i);
 341                list_add_tail(&desc->node, &tmp_list);
 342        }
 343        spin_lock_bh(&ioat->desc_lock);
 344        ioat->desccount = i;
 345        list_splice(&tmp_list, &ioat->free_desc);
 346        spin_unlock_bh(&ioat->desc_lock);
 347
 348        /* allocate a completion writeback area */
 349        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 350        chan->completion = pci_pool_alloc(chan->device->completion_pool,
 351                                          GFP_KERNEL, &chan->completion_dma);
 352        memset(chan->completion, 0, sizeof(*chan->completion));
 353        writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
 354               chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
 355        writel(((u64) chan->completion_dma) >> 32,
 356               chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 357
 358        set_bit(IOAT_RUN, &chan->state);
 359        ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
 360        dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
 361                __func__, ioat->desccount);
 362        return ioat->desccount;
 363}
 364
 365void ioat_stop(struct ioat_chan_common *chan)
 366{
 367        struct ioatdma_device *device = chan->device;
 368        struct pci_dev *pdev = device->pdev;
 369        int chan_id = chan_num(chan);
 370        struct msix_entry *msix;
 371
 372        /* 1/ stop irq from firing tasklets
 373         * 2/ stop the tasklet from re-arming irqs
 374         */
 375        clear_bit(IOAT_RUN, &chan->state);
 376
 377        /* flush inflight interrupts */
 378        switch (device->irq_mode) {
 379        case IOAT_MSIX:
 380                msix = &device->msix_entries[chan_id];
 381                synchronize_irq(msix->vector);
 382                break;
 383        case IOAT_MSI:
 384        case IOAT_INTX:
 385                synchronize_irq(pdev->irq);
 386                break;
 387        default:
 388                break;
 389        }
 390
 391        /* flush inflight timers */
 392        del_timer_sync(&chan->timer);
 393
 394        /* flush inflight tasklet runs */
 395        tasklet_kill(&chan->cleanup_task);
 396
 397        /* final cleanup now that everything is quiesced and can't re-arm */
 398        device->cleanup_fn((unsigned long) &chan->common);
 399}
 400
 401/**
 402 * ioat1_dma_free_chan_resources - release all the descriptors
 403 * @chan: the channel to be cleaned
 404 */
 405static void ioat1_dma_free_chan_resources(struct dma_chan *c)
 406{
 407        struct ioat_dma_chan *ioat = to_ioat_chan(c);
 408        struct ioat_chan_common *chan = &ioat->base;
 409        struct ioatdma_device *ioatdma_device = chan->device;
 410        struct ioat_desc_sw *desc, *_desc;
 411        int in_use_descs = 0;
 412
 413        /* Before freeing channel resources first check
 414         * if they have been previously allocated for this channel.
 415         */
 416        if (ioat->desccount == 0)
 417                return;
 418
 419        ioat_stop(chan);
 420
 421        /* Delay 100ms after reset to allow internal DMA logic to quiesce
 422         * before removing DMA descriptor resources.
 423         */
 424        writeb(IOAT_CHANCMD_RESET,
 425               chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
 426        mdelay(100);
 427
 428        spin_lock_bh(&ioat->desc_lock);
 429        list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
 430                dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
 431                        __func__, desc_id(desc));
 432                dump_desc_dbg(ioat, desc);
 433                in_use_descs++;
 434                list_del(&desc->node);
 435                pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 436                              desc->txd.phys);
 437                kfree(desc);
 438        }
 439        list_for_each_entry_safe(desc, _desc,
 440                                 &ioat->free_desc, node) {
 441                list_del(&desc->node);
 442                pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 443                              desc->txd.phys);
 444                kfree(desc);
 445        }
 446        spin_unlock_bh(&ioat->desc_lock);
 447
 448        pci_pool_free(ioatdma_device->completion_pool,
 449                      chan->completion,
 450                      chan->completion_dma);
 451
 452        /* one is ok since we left it on there on purpose */
 453        if (in_use_descs > 1)
 454                dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
 455                        in_use_descs - 1);
 456
 457        chan->last_completion = 0;
 458        chan->completion_dma = 0;
 459        ioat->pending = 0;
 460        ioat->desccount = 0;
 461}
 462
 463/**
 464 * ioat1_dma_get_next_descriptor - return the next available descriptor
 465 * @ioat: IOAT DMA channel handle
 466 *
 467 * Gets the next descriptor from the chain, and must be called with the
 468 * channel's desc_lock held.  Allocates more descriptors if the channel
 469 * has run out.
 470 */
 471static struct ioat_desc_sw *
 472ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
 473{
 474        struct ioat_desc_sw *new;
 475
 476        if (!list_empty(&ioat->free_desc)) {
 477                new = to_ioat_desc(ioat->free_desc.next);
 478                list_del(&new->node);
 479        } else {
 480                /* try to get another desc */
 481                new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
 482                if (!new) {
 483                        dev_err(to_dev(&ioat->base), "alloc failed\n");
 484                        return NULL;
 485                }
 486        }
 487        dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
 488                __func__, desc_id(new));
 489        prefetch(new->hw);
 490        return new;
 491}
 492
 493static struct dma_async_tx_descriptor *
 494ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
 495                      dma_addr_t dma_src, size_t len, unsigned long flags)
 496{
 497        struct ioat_dma_chan *ioat = to_ioat_chan(c);
 498        struct ioat_desc_sw *desc;
 499        size_t copy;
 500        LIST_HEAD(chain);
 501        dma_addr_t src = dma_src;
 502        dma_addr_t dest = dma_dest;
 503        size_t total_len = len;
 504        struct ioat_dma_descriptor *hw = NULL;
 505        int tx_cnt = 0;
 506
 507        spin_lock_bh(&ioat->desc_lock);
 508        desc = ioat1_dma_get_next_descriptor(ioat);
 509        do {
 510                if (!desc)
 511                        break;
 512
 513                tx_cnt++;
 514                copy = min_t(size_t, len, ioat->xfercap);
 515
 516                hw = desc->hw;
 517                hw->size = copy;
 518                hw->ctl = 0;
 519                hw->src_addr = src;
 520                hw->dst_addr = dest;
 521
 522                list_add_tail(&desc->node, &chain);
 523
 524                len -= copy;
 525                dest += copy;
 526                src += copy;
 527                if (len) {
 528                        struct ioat_desc_sw *next;
 529
 530                        async_tx_ack(&desc->txd);
 531                        next = ioat1_dma_get_next_descriptor(ioat);
 532                        hw->next = next ? next->txd.phys : 0;
 533                        dump_desc_dbg(ioat, desc);
 534                        desc = next;
 535                } else
 536                        hw->next = 0;
 537        } while (len);
 538
 539        if (!desc) {
 540                struct ioat_chan_common *chan = &ioat->base;
 541
 542                dev_err(to_dev(chan),
 543                        "chan%d - get_next_desc failed\n", chan_num(chan));
 544                list_splice(&chain, &ioat->free_desc);
 545                spin_unlock_bh(&ioat->desc_lock);
 546                return NULL;
 547        }
 548        spin_unlock_bh(&ioat->desc_lock);
 549
 550        desc->txd.flags = flags;
 551        desc->len = total_len;
 552        list_splice(&chain, &desc->tx_list);
 553        hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
 554        hw->ctl_f.compl_write = 1;
 555        hw->tx_cnt = tx_cnt;
 556        dump_desc_dbg(ioat, desc);
 557
 558        return &desc->txd;
 559}
 560
 561static void ioat1_cleanup_event(unsigned long data)
 562{
 563        struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
 564        struct ioat_chan_common *chan = &ioat->base;
 565
 566        ioat1_cleanup(ioat);
 567        if (!test_bit(IOAT_RUN, &chan->state))
 568                return;
 569        writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
 570}
 571
 572void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
 573                    size_t len, struct ioat_dma_descriptor *hw)
 574{
 575        struct pci_dev *pdev = chan->device->pdev;
 576        size_t offset = len - hw->size;
 577
 578        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
 579                ioat_unmap(pdev, hw->dst_addr - offset, len,
 580                           PCI_DMA_FROMDEVICE, flags, 1);
 581
 582        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
 583                ioat_unmap(pdev, hw->src_addr - offset, len,
 584                           PCI_DMA_TODEVICE, flags, 0);
 585}
 586
 587dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
 588{
 589        dma_addr_t phys_complete;
 590        u64 completion;
 591
 592        completion = *chan->completion;
 593        phys_complete = ioat_chansts_to_addr(completion);
 594
 595        dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
 596                (unsigned long long) phys_complete);
 597
 598        if (is_ioat_halted(completion)) {
 599                u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 600                dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
 601                        chanerr);
 602
 603                /* TODO do something to salvage the situation */
 604        }
 605
 606        return phys_complete;
 607}
 608
 609bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
 610                           dma_addr_t *phys_complete)
 611{
 612        *phys_complete = ioat_get_current_completion(chan);
 613        if (*phys_complete == chan->last_completion)
 614                return false;
 615        clear_bit(IOAT_COMPLETION_ACK, &chan->state);
 616        mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 617
 618        return true;
 619}
 620
 621static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
 622{
 623        struct ioat_chan_common *chan = &ioat->base;
 624        struct list_head *_desc, *n;
 625        struct dma_async_tx_descriptor *tx;
 626
 627        dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
 628                 __func__, (unsigned long long) phys_complete);
 629        list_for_each_safe(_desc, n, &ioat->used_desc) {
 630                struct ioat_desc_sw *desc;
 631
 632                prefetch(n);
 633                desc = list_entry(_desc, typeof(*desc), node);
 634                tx = &desc->txd;
 635                /*
 636                 * Incoming DMA requests may use multiple descriptors,
 637                 * due to exceeding xfercap, perhaps. If so, only the
 638                 * last one will have a cookie, and require unmapping.
 639                 */
 640                dump_desc_dbg(ioat, desc);
 641                if (tx->cookie) {
 642                        dma_cookie_complete(tx);
 643                        dma_descriptor_unmap(tx);
 644                        ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
 645                        ioat->active -= desc->hw->tx_cnt;
 646                        if (tx->callback) {
 647                                tx->callback(tx->callback_param);
 648                                tx->callback = NULL;
 649                        }
 650                }
 651
 652                if (tx->phys != phys_complete) {
 653                        /*
 654                         * a completed entry, but not the last, so clean
 655                         * up if the client is done with the descriptor
 656                         */
 657                        if (async_tx_test_ack(tx))
 658                                list_move_tail(&desc->node, &ioat->free_desc);
 659                } else {
 660                        /*
 661                         * last used desc. Do not remove, so we can
 662                         * append from it.
 663                         */
 664
 665                        /* if nothing else is pending, cancel the
 666                         * completion timeout
 667                         */
 668                        if (n == &ioat->used_desc) {
 669                                dev_dbg(to_dev(chan),
 670                                        "%s cancel completion timeout\n",
 671                                        __func__);
 672                                clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
 673                        }
 674
 675                        /* TODO check status bits? */
 676                        break;
 677                }
 678        }
 679
 680        chan->last_completion = phys_complete;
 681}
 682
 683/**
 684 * ioat1_cleanup - cleanup up finished descriptors
 685 * @chan: ioat channel to be cleaned up
 686 *
 687 * To prevent lock contention we defer cleanup when the locks are
 688 * contended with a terminal timeout that forces cleanup and catches
 689 * completion notification errors.
 690 */
 691static void ioat1_cleanup(struct ioat_dma_chan *ioat)
 692{
 693        struct ioat_chan_common *chan = &ioat->base;
 694        dma_addr_t phys_complete;
 695
 696        prefetch(chan->completion);
 697
 698        if (!spin_trylock_bh(&chan->cleanup_lock))
 699                return;
 700
 701        if (!ioat_cleanup_preamble(chan, &phys_complete)) {
 702                spin_unlock_bh(&chan->cleanup_lock);
 703                return;
 704        }
 705
 706        if (!spin_trylock_bh(&ioat->desc_lock)) {
 707                spin_unlock_bh(&chan->cleanup_lock);
 708                return;
 709        }
 710
 711        __cleanup(ioat, phys_complete);
 712
 713        spin_unlock_bh(&ioat->desc_lock);
 714        spin_unlock_bh(&chan->cleanup_lock);
 715}
 716
 717static void ioat1_timer_event(unsigned long data)
 718{
 719        struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
 720        struct ioat_chan_common *chan = &ioat->base;
 721
 722        dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
 723
 724        spin_lock_bh(&chan->cleanup_lock);
 725        if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
 726                struct ioat_desc_sw *desc;
 727
 728                spin_lock_bh(&ioat->desc_lock);
 729
 730                /* restart active descriptors */
 731                desc = to_ioat_desc(ioat->used_desc.prev);
 732                ioat_set_chainaddr(ioat, desc->txd.phys);
 733                ioat_start(chan);
 734
 735                ioat->pending = 0;
 736                set_bit(IOAT_COMPLETION_PENDING, &chan->state);
 737                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 738                spin_unlock_bh(&ioat->desc_lock);
 739        } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
 740                dma_addr_t phys_complete;
 741
 742                spin_lock_bh(&ioat->desc_lock);
 743                /* if we haven't made progress and we have already
 744                 * acknowledged a pending completion once, then be more
 745                 * forceful with a restart
 746                 */
 747                if (ioat_cleanup_preamble(chan, &phys_complete))
 748                        __cleanup(ioat, phys_complete);
 749                else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
 750                        ioat1_reset_channel(ioat);
 751                else {
 752                        u64 status = ioat_chansts(chan);
 753
 754                        /* manually update the last completion address */
 755                        if (ioat_chansts_to_addr(status) != 0)
 756                                *chan->completion = status;
 757
 758                        set_bit(IOAT_COMPLETION_ACK, &chan->state);
 759                        mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 760                }
 761                spin_unlock_bh(&ioat->desc_lock);
 762        }
 763        spin_unlock_bh(&chan->cleanup_lock);
 764}
 765
 766enum dma_status
 767ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
 768                   struct dma_tx_state *txstate)
 769{
 770        struct ioat_chan_common *chan = to_chan_common(c);
 771        struct ioatdma_device *device = chan->device;
 772        enum dma_status ret;
 773
 774        ret = dma_cookie_status(c, cookie, txstate);
 775        if (ret == DMA_SUCCESS)
 776                return ret;
 777
 778        device->cleanup_fn((unsigned long) c);
 779
 780        return dma_cookie_status(c, cookie, txstate);
 781}
 782
 783static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
 784{
 785        struct ioat_chan_common *chan = &ioat->base;
 786        struct ioat_desc_sw *desc;
 787        struct ioat_dma_descriptor *hw;
 788
 789        spin_lock_bh(&ioat->desc_lock);
 790
 791        desc = ioat1_dma_get_next_descriptor(ioat);
 792
 793        if (!desc) {
 794                dev_err(to_dev(chan),
 795                        "Unable to start null desc - get next desc failed\n");
 796                spin_unlock_bh(&ioat->desc_lock);
 797                return;
 798        }
 799
 800        hw = desc->hw;
 801        hw->ctl = 0;
 802        hw->ctl_f.null = 1;
 803        hw->ctl_f.int_en = 1;
 804        hw->ctl_f.compl_write = 1;
 805        /* set size to non-zero value (channel returns error when size is 0) */
 806        hw->size = NULL_DESC_BUFFER_SIZE;
 807        hw->src_addr = 0;
 808        hw->dst_addr = 0;
 809        async_tx_ack(&desc->txd);
 810        hw->next = 0;
 811        list_add_tail(&desc->node, &ioat->used_desc);
 812        dump_desc_dbg(ioat, desc);
 813
 814        ioat_set_chainaddr(ioat, desc->txd.phys);
 815        ioat_start(chan);
 816        spin_unlock_bh(&ioat->desc_lock);
 817}
 818
 819/*
 820 * Perform a IOAT transaction to verify the HW works.
 821 */
 822#define IOAT_TEST_SIZE 2000
 823
 824static void ioat_dma_test_callback(void *dma_async_param)
 825{
 826        struct completion *cmp = dma_async_param;
 827
 828        complete(cmp);
 829}
 830
 831/**
 832 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 833 * @device: device to be tested
 834 */
 835int ioat_dma_self_test(struct ioatdma_device *device)
 836{
 837        int i;
 838        u8 *src;
 839        u8 *dest;
 840        struct dma_device *dma = &device->common;
 841        struct device *dev = &device->pdev->dev;
 842        struct dma_chan *dma_chan;
 843        struct dma_async_tx_descriptor *tx;
 844        dma_addr_t dma_dest, dma_src;
 845        dma_cookie_t cookie;
 846        int err = 0;
 847        struct completion cmp;
 848        unsigned long tmo;
 849        unsigned long flags;
 850
 851        src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 852        if (!src)
 853                return -ENOMEM;
 854        dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 855        if (!dest) {
 856                kfree(src);
 857                return -ENOMEM;
 858        }
 859
 860        /* Fill in src buffer */
 861        for (i = 0; i < IOAT_TEST_SIZE; i++)
 862                src[i] = (u8)i;
 863
 864        /* Start copy, using first DMA channel */
 865        dma_chan = container_of(dma->channels.next, struct dma_chan,
 866                                device_node);
 867        if (dma->device_alloc_chan_resources(dma_chan) < 1) {
 868                dev_err(dev, "selftest cannot allocate chan resource\n");
 869                err = -ENODEV;
 870                goto out;
 871        }
 872
 873        dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 874        if (dma_mapping_error(dev, dma_src)) {
 875                dev_err(dev, "mapping src buffer failed\n");
 876                goto free_resources;
 877        }
 878        dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
 879        if (dma_mapping_error(dev, dma_dest)) {
 880                dev_err(dev, "mapping dest buffer failed\n");
 881                goto unmap_src;
 882        }
 883        flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
 884                DMA_PREP_INTERRUPT;
 885        tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
 886                                                   IOAT_TEST_SIZE, flags);
 887        if (!tx) {
 888                dev_err(dev, "Self-test prep failed, disabling\n");
 889                err = -ENODEV;
 890                goto unmap_dma;
 891        }
 892
 893        async_tx_ack(tx);
 894        init_completion(&cmp);
 895        tx->callback = ioat_dma_test_callback;
 896        tx->callback_param = &cmp;
 897        cookie = tx->tx_submit(tx);
 898        if (cookie < 0) {
 899                dev_err(dev, "Self-test setup failed, disabling\n");
 900                err = -ENODEV;
 901                goto unmap_dma;
 902        }
 903        dma->device_issue_pending(dma_chan);
 904
 905        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 906
 907        if (tmo == 0 ||
 908            dma->device_tx_status(dma_chan, cookie, NULL)
 909                                        != DMA_SUCCESS) {
 910                dev_err(dev, "Self-test copy timed out, disabling\n");
 911                err = -ENODEV;
 912                goto unmap_dma;
 913        }
 914        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
 915                dev_err(dev, "Self-test copy failed compare, disabling\n");
 916                err = -ENODEV;
 917                goto free_resources;
 918        }
 919
 920unmap_dma:
 921        dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
 922unmap_src:
 923        dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 924free_resources:
 925        dma->device_free_chan_resources(dma_chan);
 926out:
 927        kfree(src);
 928        kfree(dest);
 929        return err;
 930}
 931
 932static char ioat_interrupt_style[32] = "msix";
 933module_param_string(ioat_interrupt_style, ioat_interrupt_style,
 934                    sizeof(ioat_interrupt_style), 0644);
 935MODULE_PARM_DESC(ioat_interrupt_style,
 936                 "set ioat interrupt style: msix (default), "
 937                 "msix-single-vector, msi, intx)");
 938
 939/**
 940 * ioat_dma_setup_interrupts - setup interrupt handler
 941 * @device: ioat device
 942 */
 943int ioat_dma_setup_interrupts(struct ioatdma_device *device)
 944{
 945        struct ioat_chan_common *chan;
 946        struct pci_dev *pdev = device->pdev;
 947        struct device *dev = &pdev->dev;
 948        struct msix_entry *msix;
 949        int i, j, msixcnt;
 950        int err = -EINVAL;
 951        u8 intrctrl = 0;
 952
 953        if (!strcmp(ioat_interrupt_style, "msix"))
 954                goto msix;
 955        if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
 956                goto msix_single_vector;
 957        if (!strcmp(ioat_interrupt_style, "msi"))
 958                goto msi;
 959        if (!strcmp(ioat_interrupt_style, "intx"))
 960                goto intx;
 961        dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
 962        goto err_no_irq;
 963
 964msix:
 965        /* The number of MSI-X vectors should equal the number of channels */
 966        msixcnt = device->common.chancnt;
 967        for (i = 0; i < msixcnt; i++)
 968                device->msix_entries[i].entry = i;
 969
 970        err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
 971        if (err < 0)
 972                goto msi;
 973        if (err > 0)
 974                goto msix_single_vector;
 975
 976        for (i = 0; i < msixcnt; i++) {
 977                msix = &device->msix_entries[i];
 978                chan = ioat_chan_by_index(device, i);
 979                err = devm_request_irq(dev, msix->vector,
 980                                       ioat_dma_do_interrupt_msix, 0,
 981                                       "ioat-msix", chan);
 982                if (err) {
 983                        for (j = 0; j < i; j++) {
 984                                msix = &device->msix_entries[j];
 985                                chan = ioat_chan_by_index(device, j);
 986                                devm_free_irq(dev, msix->vector, chan);
 987                        }
 988                        goto msix_single_vector;
 989                }
 990        }
 991        intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
 992        device->irq_mode = IOAT_MSIX;
 993        goto done;
 994
 995msix_single_vector:
 996        msix = &device->msix_entries[0];
 997        msix->entry = 0;
 998        err = pci_enable_msix(pdev, device->msix_entries, 1);
 999        if (err)
1000                goto msi;
1001
1002        err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1003                               "ioat-msix", device);
1004        if (err) {
1005                pci_disable_msix(pdev);
1006                goto msi;
1007        }
1008        device->irq_mode = IOAT_MSIX_SINGLE;
1009        goto done;
1010
1011msi:
1012        err = pci_enable_msi(pdev);
1013        if (err)
1014                goto intx;
1015
1016        err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1017                               "ioat-msi", device);
1018        if (err) {
1019                pci_disable_msi(pdev);
1020                goto intx;
1021        }
1022        device->irq_mode = IOAT_MSIX;
1023        goto done;
1024
1025intx:
1026        err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1027                               IRQF_SHARED, "ioat-intx", device);
1028        if (err)
1029                goto err_no_irq;
1030
1031        device->irq_mode = IOAT_INTX;
1032done:
1033        if (device->intr_quirk)
1034                device->intr_quirk(device);
1035        intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1036        writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1037        return 0;
1038
1039err_no_irq:
1040        /* Disable all interrupt generation */
1041        writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1042        device->irq_mode = IOAT_NOIRQ;
1043        dev_err(dev, "no usable interrupts\n");
1044        return err;
1045}
1046EXPORT_SYMBOL(ioat_dma_setup_interrupts);
1047
1048static void ioat_disable_interrupts(struct ioatdma_device *device)
1049{
1050        /* Disable all interrupt generation */
1051        writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1052}
1053
1054int ioat_probe(struct ioatdma_device *device)
1055{
1056        int err = -ENODEV;
1057        struct dma_device *dma = &device->common;
1058        struct pci_dev *pdev = device->pdev;
1059        struct device *dev = &pdev->dev;
1060
1061        /* DMA coherent memory pool for DMA descriptor allocations */
1062        device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1063                                           sizeof(struct ioat_dma_descriptor),
1064                                           64, 0);
1065        if (!device->dma_pool) {
1066                err = -ENOMEM;
1067                goto err_dma_pool;
1068        }
1069
1070        device->completion_pool = pci_pool_create("completion_pool", pdev,
1071                                                  sizeof(u64), SMP_CACHE_BYTES,
1072                                                  SMP_CACHE_BYTES);
1073
1074        if (!device->completion_pool) {
1075                err = -ENOMEM;
1076                goto err_completion_pool;
1077        }
1078
1079        device->enumerate_channels(device);
1080
1081        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1082        dma->dev = &pdev->dev;
1083
1084        if (!dma->chancnt) {
1085                dev_err(dev, "channel enumeration error\n");
1086                goto err_setup_interrupts;
1087        }
1088
1089        err = ioat_dma_setup_interrupts(device);
1090        if (err)
1091                goto err_setup_interrupts;
1092
1093        err = device->self_test(device);
1094        if (err)
1095                goto err_self_test;
1096
1097        return 0;
1098
1099err_self_test:
1100        ioat_disable_interrupts(device);
1101err_setup_interrupts:
1102        pci_pool_destroy(device->completion_pool);
1103err_completion_pool:
1104        pci_pool_destroy(device->dma_pool);
1105err_dma_pool:
1106        return err;
1107}
1108
1109int ioat_register(struct ioatdma_device *device)
1110{
1111        int err = dma_async_device_register(&device->common);
1112
1113        if (err) {
1114                ioat_disable_interrupts(device);
1115                pci_pool_destroy(device->completion_pool);
1116                pci_pool_destroy(device->dma_pool);
1117        }
1118
1119        return err;
1120}
1121
1122/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1123static void ioat1_intr_quirk(struct ioatdma_device *device)
1124{
1125        struct pci_dev *pdev = device->pdev;
1126        u32 dmactrl;
1127
1128        pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1129        if (pdev->msi_enabled)
1130                dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1131        else
1132                dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1133        pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1134}
1135
1136static ssize_t ring_size_show(struct dma_chan *c, char *page)
1137{
1138        struct ioat_dma_chan *ioat = to_ioat_chan(c);
1139
1140        return sprintf(page, "%d\n", ioat->desccount);
1141}
1142static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1143
1144static ssize_t ring_active_show(struct dma_chan *c, char *page)
1145{
1146        struct ioat_dma_chan *ioat = to_ioat_chan(c);
1147
1148        return sprintf(page, "%d\n", ioat->active);
1149}
1150static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1151
1152static ssize_t cap_show(struct dma_chan *c, char *page)
1153{
1154        struct dma_device *dma = c->device;
1155
1156        return sprintf(page, "copy%s%s%s%s%s%s\n",
1157                       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1158                       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1159                       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1160                       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1161                       dma_has_cap(DMA_MEMSET, dma->cap_mask)  ? " fill" : "",
1162                       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1163
1164}
1165struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1166
1167static ssize_t version_show(struct dma_chan *c, char *page)
1168{
1169        struct dma_device *dma = c->device;
1170        struct ioatdma_device *device = to_ioatdma_device(dma);
1171
1172        return sprintf(page, "%d.%d\n",
1173                       device->version >> 4, device->version & 0xf);
1174}
1175struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1176
1177static struct attribute *ioat1_attrs[] = {
1178        &ring_size_attr.attr,
1179        &ring_active_attr.attr,
1180        &ioat_cap_attr.attr,
1181        &ioat_version_attr.attr,
1182        NULL,
1183};
1184
1185static ssize_t
1186ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1187{
1188        struct ioat_sysfs_entry *entry;
1189        struct ioat_chan_common *chan;
1190
1191        entry = container_of(attr, struct ioat_sysfs_entry, attr);
1192        chan = container_of(kobj, struct ioat_chan_common, kobj);
1193
1194        if (!entry->show)
1195                return -EIO;
1196        return entry->show(&chan->common, page);
1197}
1198
1199const struct sysfs_ops ioat_sysfs_ops = {
1200        .show   = ioat_attr_show,
1201};
1202
1203static struct kobj_type ioat1_ktype = {
1204        .sysfs_ops = &ioat_sysfs_ops,
1205        .default_attrs = ioat1_attrs,
1206};
1207
1208void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1209{
1210        struct dma_device *dma = &device->common;
1211        struct dma_chan *c;
1212
1213        list_for_each_entry(c, &dma->channels, device_node) {
1214                struct ioat_chan_common *chan = to_chan_common(c);
1215                struct kobject *parent = &c->dev->device.kobj;
1216                int err;
1217
1218                err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1219                if (err) {
1220                        dev_warn(to_dev(chan),
1221                                 "sysfs init error (%d), continuing...\n", err);
1222                        kobject_put(&chan->kobj);
1223                        set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1224                }
1225        }
1226}
1227
1228void ioat_kobject_del(struct ioatdma_device *device)
1229{
1230        struct dma_device *dma = &device->common;
1231        struct dma_chan *c;
1232
1233        list_for_each_entry(c, &dma->channels, device_node) {
1234                struct ioat_chan_common *chan = to_chan_common(c);
1235
1236                if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1237                        kobject_del(&chan->kobj);
1238                        kobject_put(&chan->kobj);
1239                }
1240        }
1241}
1242
1243int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1244{
1245        struct pci_dev *pdev = device->pdev;
1246        struct dma_device *dma;
1247        int err;
1248
1249        device->intr_quirk = ioat1_intr_quirk;
1250        device->enumerate_channels = ioat1_enumerate_channels;
1251        device->self_test = ioat_dma_self_test;
1252        device->timer_fn = ioat1_timer_event;
1253        device->cleanup_fn = ioat1_cleanup_event;
1254        dma = &device->common;
1255        dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1256        dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1257        dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1258        dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1259        dma->device_tx_status = ioat_dma_tx_status;
1260
1261        err = ioat_probe(device);
1262        if (err)
1263                return err;
1264        err = ioat_register(device);
1265        if (err)
1266                return err;
1267        ioat_kobject_add(device, &ioat1_ktype);
1268
1269        if (dca)
1270                device->dca = ioat_dca_init(pdev, device->reg_base);
1271
1272        return err;
1273}
1274
1275void ioat_dma_remove(struct ioatdma_device *device)
1276{
1277        struct dma_device *dma = &device->common;
1278
1279        ioat_disable_interrupts(device);
1280
1281        ioat_kobject_del(device);
1282
1283        dma_async_device_unregister(dma);
1284
1285        pci_pool_destroy(device->dma_pool);
1286        pci_pool_destroy(device->completion_pool);
1287
1288        INIT_LIST_HEAD(&dma->channels);
1289}
1290