linux/drivers/dma/ioat/dma_v2.c
<<
>>
Prefs
   1/*
   2 * Intel I/OAT DMA Linux driver
   3 * Copyright(c) 2004 - 2009 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in
  15 * the file called "COPYING".
  16 *
  17 */
  18
  19/*
  20 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
  21 * does asynchronous data movement and checksumming operations.
  22 */
  23
  24#include <linux/init.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/pci.h>
  28#include <linux/interrupt.h>
  29#include <linux/dmaengine.h>
  30#include <linux/delay.h>
  31#include <linux/dma-mapping.h>
  32#include <linux/workqueue.h>
  33#include <linux/prefetch.h>
  34#include <linux/i7300_idle.h>
  35#include "dma.h"
  36#include "dma_v2.h"
  37#include "registers.h"
  38#include "hw.h"
  39
  40#include "../dmaengine.h"
  41
  42int ioat_ring_alloc_order = 8;
  43module_param(ioat_ring_alloc_order, int, 0644);
  44MODULE_PARM_DESC(ioat_ring_alloc_order,
  45                 "ioat2+: allocate 2^n descriptors per channel"
  46                 " (default: 8 max: 16)");
  47static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
  48module_param(ioat_ring_max_alloc_order, int, 0644);
  49MODULE_PARM_DESC(ioat_ring_max_alloc_order,
  50                 "ioat2+: upper limit for ring size (default: 16)");
  51
  52void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
  53{
  54        struct ioat_chan_common *chan = &ioat->base;
  55
  56        ioat->dmacount += ioat2_ring_pending(ioat);
  57        ioat->issued = ioat->head;
  58        writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
  59        dev_dbg(to_dev(chan),
  60                "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
  61                __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
  62}
  63
  64void ioat2_issue_pending(struct dma_chan *c)
  65{
  66        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  67
  68        if (ioat2_ring_pending(ioat)) {
  69                spin_lock_bh(&ioat->prep_lock);
  70                __ioat2_issue_pending(ioat);
  71                spin_unlock_bh(&ioat->prep_lock);
  72        }
  73}
  74
  75/**
  76 * ioat2_update_pending - log pending descriptors
  77 * @ioat: ioat2+ channel
  78 *
  79 * Check if the number of unsubmitted descriptors has exceeded the
  80 * watermark.  Called with prep_lock held
  81 */
  82static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
  83{
  84        if (ioat2_ring_pending(ioat) > ioat_pending_level)
  85                __ioat2_issue_pending(ioat);
  86}
  87
  88static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
  89{
  90        struct ioat_ring_ent *desc;
  91        struct ioat_dma_descriptor *hw;
  92
  93        if (ioat2_ring_space(ioat) < 1) {
  94                dev_err(to_dev(&ioat->base),
  95                        "Unable to start null desc - ring full\n");
  96                return;
  97        }
  98
  99        dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
 100                __func__, ioat->head, ioat->tail, ioat->issued);
 101        desc = ioat2_get_ring_ent(ioat, ioat->head);
 102
 103        hw = desc->hw;
 104        hw->ctl = 0;
 105        hw->ctl_f.null = 1;
 106        hw->ctl_f.int_en = 1;
 107        hw->ctl_f.compl_write = 1;
 108        /* set size to non-zero value (channel returns error when size is 0) */
 109        hw->size = NULL_DESC_BUFFER_SIZE;
 110        hw->src_addr = 0;
 111        hw->dst_addr = 0;
 112        async_tx_ack(&desc->txd);
 113        ioat2_set_chainaddr(ioat, desc->txd.phys);
 114        dump_desc_dbg(ioat, desc);
 115        wmb();
 116        ioat->head += 1;
 117        __ioat2_issue_pending(ioat);
 118}
 119
 120static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
 121{
 122        spin_lock_bh(&ioat->prep_lock);
 123        __ioat2_start_null_desc(ioat);
 124        spin_unlock_bh(&ioat->prep_lock);
 125}
 126
 127static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 128{
 129        struct ioat_chan_common *chan = &ioat->base;
 130        struct dma_async_tx_descriptor *tx;
 131        struct ioat_ring_ent *desc;
 132        bool seen_current = false;
 133        u16 active;
 134        int idx = ioat->tail, i;
 135
 136        dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
 137                __func__, ioat->head, ioat->tail, ioat->issued);
 138
 139        active = ioat2_ring_active(ioat);
 140        for (i = 0; i < active && !seen_current; i++) {
 141                smp_read_barrier_depends();
 142                prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
 143                desc = ioat2_get_ring_ent(ioat, idx + i);
 144                tx = &desc->txd;
 145                dump_desc_dbg(ioat, desc);
 146                if (tx->cookie) {
 147                        dma_descriptor_unmap(tx);
 148                        dma_cookie_complete(tx);
 149                        if (tx->callback) {
 150                                tx->callback(tx->callback_param);
 151                                tx->callback = NULL;
 152                        }
 153                }
 154
 155                if (tx->phys == phys_complete)
 156                        seen_current = true;
 157        }
 158        smp_mb(); /* finish all descriptor reads before incrementing tail */
 159        ioat->tail = idx + i;
 160        BUG_ON(active && !seen_current); /* no active descs have written a completion? */
 161
 162        chan->last_completion = phys_complete;
 163        if (active - i == 0) {
 164                dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
 165                        __func__);
 166                clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
 167                mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
 168        }
 169}
 170
 171/**
 172 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
 173 * @chan: ioat channel to be cleaned up
 174 */
 175static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
 176{
 177        struct ioat_chan_common *chan = &ioat->base;
 178        dma_addr_t phys_complete;
 179
 180        spin_lock_bh(&chan->cleanup_lock);
 181        if (ioat_cleanup_preamble(chan, &phys_complete))
 182                __cleanup(ioat, phys_complete);
 183        spin_unlock_bh(&chan->cleanup_lock);
 184}
 185
 186void ioat2_cleanup_event(unsigned long data)
 187{
 188        struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
 189        struct ioat_chan_common *chan = &ioat->base;
 190
 191        ioat2_cleanup(ioat);
 192        if (!test_bit(IOAT_RUN, &chan->state))
 193                return;
 194        writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
 195}
 196
 197void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
 198{
 199        struct ioat_chan_common *chan = &ioat->base;
 200
 201        /* set the tail to be re-issued */
 202        ioat->issued = ioat->tail;
 203        ioat->dmacount = 0;
 204        set_bit(IOAT_COMPLETION_PENDING, &chan->state);
 205        mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 206
 207        dev_dbg(to_dev(chan),
 208                "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
 209                __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
 210
 211        if (ioat2_ring_pending(ioat)) {
 212                struct ioat_ring_ent *desc;
 213
 214                desc = ioat2_get_ring_ent(ioat, ioat->tail);
 215                ioat2_set_chainaddr(ioat, desc->txd.phys);
 216                __ioat2_issue_pending(ioat);
 217        } else
 218                __ioat2_start_null_desc(ioat);
 219}
 220
 221int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
 222{
 223        unsigned long end = jiffies + tmo;
 224        int err = 0;
 225        u32 status;
 226
 227        status = ioat_chansts(chan);
 228        if (is_ioat_active(status) || is_ioat_idle(status))
 229                ioat_suspend(chan);
 230        while (is_ioat_active(status) || is_ioat_idle(status)) {
 231                if (tmo && time_after(jiffies, end)) {
 232                        err = -ETIMEDOUT;
 233                        break;
 234                }
 235                status = ioat_chansts(chan);
 236                cpu_relax();
 237        }
 238
 239        return err;
 240}
 241
 242int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
 243{
 244        unsigned long end = jiffies + tmo;
 245        int err = 0;
 246
 247        ioat_reset(chan);
 248        while (ioat_reset_pending(chan)) {
 249                if (end && time_after(jiffies, end)) {
 250                        err = -ETIMEDOUT;
 251                        break;
 252                }
 253                cpu_relax();
 254        }
 255
 256        return err;
 257}
 258
 259static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
 260{
 261        struct ioat_chan_common *chan = &ioat->base;
 262        dma_addr_t phys_complete;
 263
 264        ioat2_quiesce(chan, 0);
 265        if (ioat_cleanup_preamble(chan, &phys_complete))
 266                __cleanup(ioat, phys_complete);
 267
 268        __ioat2_restart_chan(ioat);
 269}
 270
 271static void check_active(struct ioat2_dma_chan *ioat)
 272{
 273        struct ioat_chan_common *chan = &ioat->base;
 274
 275        if (ioat2_ring_active(ioat)) {
 276                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 277                return;
 278        }
 279
 280        if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
 281                mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
 282        else if (ioat->alloc_order > ioat_get_alloc_order()) {
 283                /* if the ring is idle, empty, and oversized try to step
 284                 * down the size
 285                 */
 286                reshape_ring(ioat, ioat->alloc_order - 1);
 287
 288                /* keep shrinking until we get back to our minimum
 289                 * default size
 290                 */
 291                if (ioat->alloc_order > ioat_get_alloc_order())
 292                        mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
 293        }
 294
 295}
 296
 297void ioat2_timer_event(unsigned long data)
 298{
 299        struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
 300        struct ioat_chan_common *chan = &ioat->base;
 301        dma_addr_t phys_complete;
 302        u64 status;
 303
 304        status = ioat_chansts(chan);
 305
 306        /* when halted due to errors check for channel
 307         * programming errors before advancing the completion state
 308         */
 309        if (is_ioat_halted(status)) {
 310                u32 chanerr;
 311
 312                chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 313                dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
 314                        __func__, chanerr);
 315                if (test_bit(IOAT_RUN, &chan->state))
 316                        BUG_ON(is_ioat_bug(chanerr));
 317                else /* we never got off the ground */
 318                        return;
 319        }
 320
 321        /* if we haven't made progress and we have already
 322         * acknowledged a pending completion once, then be more
 323         * forceful with a restart
 324         */
 325        spin_lock_bh(&chan->cleanup_lock);
 326        if (ioat_cleanup_preamble(chan, &phys_complete))
 327                __cleanup(ioat, phys_complete);
 328        else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
 329                spin_lock_bh(&ioat->prep_lock);
 330                ioat2_restart_channel(ioat);
 331                spin_unlock_bh(&ioat->prep_lock);
 332                spin_unlock_bh(&chan->cleanup_lock);
 333                return;
 334        } else {
 335                set_bit(IOAT_COMPLETION_ACK, &chan->state);
 336                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 337        }
 338
 339
 340        if (ioat2_ring_active(ioat))
 341                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 342        else {
 343                spin_lock_bh(&ioat->prep_lock);
 344                check_active(ioat);
 345                spin_unlock_bh(&ioat->prep_lock);
 346        }
 347        spin_unlock_bh(&chan->cleanup_lock);
 348}
 349
 350static int ioat2_reset_hw(struct ioat_chan_common *chan)
 351{
 352        /* throw away whatever the channel was doing and get it initialized */
 353        u32 chanerr;
 354
 355        ioat2_quiesce(chan, msecs_to_jiffies(100));
 356
 357        chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 358        writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
 359
 360        return ioat2_reset_sync(chan, msecs_to_jiffies(200));
 361}
 362
 363/**
 364 * ioat2_enumerate_channels - find and initialize the device's channels
 365 * @device: the device to be enumerated
 366 */
 367int ioat2_enumerate_channels(struct ioatdma_device *device)
 368{
 369        struct ioat2_dma_chan *ioat;
 370        struct device *dev = &device->pdev->dev;
 371        struct dma_device *dma = &device->common;
 372        u8 xfercap_log;
 373        int i;
 374
 375        INIT_LIST_HEAD(&dma->channels);
 376        dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
 377        dma->chancnt &= 0x1f; /* bits [4:0] valid */
 378        if (dma->chancnt > ARRAY_SIZE(device->idx)) {
 379                dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
 380                         dma->chancnt, ARRAY_SIZE(device->idx));
 381                dma->chancnt = ARRAY_SIZE(device->idx);
 382        }
 383        xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
 384        xfercap_log &= 0x1f; /* bits [4:0] valid */
 385        if (xfercap_log == 0)
 386                return 0;
 387        dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
 388
 389        /* FIXME which i/oat version is i7300? */
 390#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
 391        if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
 392                dma->chancnt--;
 393#endif
 394        for (i = 0; i < dma->chancnt; i++) {
 395                ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
 396                if (!ioat)
 397                        break;
 398
 399                ioat_init_channel(device, &ioat->base, i);
 400                ioat->xfercap_log = xfercap_log;
 401                spin_lock_init(&ioat->prep_lock);
 402                if (device->reset_hw(&ioat->base)) {
 403                        i = 0;
 404                        break;
 405                }
 406        }
 407        dma->chancnt = i;
 408        return i;
 409}
 410
 411static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
 412{
 413        struct dma_chan *c = tx->chan;
 414        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 415        struct ioat_chan_common *chan = &ioat->base;
 416        dma_cookie_t cookie;
 417
 418        cookie = dma_cookie_assign(tx);
 419        dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
 420
 421        if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
 422                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 423
 424        /* make descriptor updates visible before advancing ioat->head,
 425         * this is purposefully not smp_wmb() since we are also
 426         * publishing the descriptor updates to a dma device
 427         */
 428        wmb();
 429
 430        ioat->head += ioat->produce;
 431
 432        ioat2_update_pending(ioat);
 433        spin_unlock_bh(&ioat->prep_lock);
 434
 435        return cookie;
 436}
 437
 438static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
 439{
 440        struct ioat_dma_descriptor *hw;
 441        struct ioat_ring_ent *desc;
 442        struct ioatdma_device *dma;
 443        dma_addr_t phys;
 444
 445        dma = to_ioatdma_device(chan->device);
 446        hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
 447        if (!hw)
 448                return NULL;
 449        memset(hw, 0, sizeof(*hw));
 450
 451        desc = kmem_cache_zalloc(ioat2_cache, flags);
 452        if (!desc) {
 453                pci_pool_free(dma->dma_pool, hw, phys);
 454                return NULL;
 455        }
 456
 457        dma_async_tx_descriptor_init(&desc->txd, chan);
 458        desc->txd.tx_submit = ioat2_tx_submit_unlock;
 459        desc->hw = hw;
 460        desc->txd.phys = phys;
 461        return desc;
 462}
 463
 464static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
 465{
 466        struct ioatdma_device *dma;
 467
 468        dma = to_ioatdma_device(chan->device);
 469        pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
 470        kmem_cache_free(ioat2_cache, desc);
 471}
 472
 473static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
 474{
 475        struct ioat_ring_ent **ring;
 476        int descs = 1 << order;
 477        int i;
 478
 479        if (order > ioat_get_max_alloc_order())
 480                return NULL;
 481
 482        /* allocate the array to hold the software ring */
 483        ring = kcalloc(descs, sizeof(*ring), flags);
 484        if (!ring)
 485                return NULL;
 486        for (i = 0; i < descs; i++) {
 487                ring[i] = ioat2_alloc_ring_ent(c, flags);
 488                if (!ring[i]) {
 489                        while (i--)
 490                                ioat2_free_ring_ent(ring[i], c);
 491                        kfree(ring);
 492                        return NULL;
 493                }
 494                set_desc_id(ring[i], i);
 495        }
 496
 497        /* link descs */
 498        for (i = 0; i < descs-1; i++) {
 499                struct ioat_ring_ent *next = ring[i+1];
 500                struct ioat_dma_descriptor *hw = ring[i]->hw;
 501
 502                hw->next = next->txd.phys;
 503        }
 504        ring[i]->hw->next = ring[0]->txd.phys;
 505
 506        return ring;
 507}
 508
 509void ioat2_free_chan_resources(struct dma_chan *c);
 510
 511/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
 512 * @chan: channel to be initialized
 513 */
 514int ioat2_alloc_chan_resources(struct dma_chan *c)
 515{
 516        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 517        struct ioat_chan_common *chan = &ioat->base;
 518        struct ioat_ring_ent **ring;
 519        u64 status;
 520        int order;
 521        int i = 0;
 522
 523        /* have we already been set up? */
 524        if (ioat->ring)
 525                return 1 << ioat->alloc_order;
 526
 527        /* Setup register to interrupt and write completion status on error */
 528        writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
 529
 530        /* allocate a completion writeback area */
 531        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 532        chan->completion = pci_pool_alloc(chan->device->completion_pool,
 533                                          GFP_KERNEL, &chan->completion_dma);
 534        if (!chan->completion)
 535                return -ENOMEM;
 536
 537        memset(chan->completion, 0, sizeof(*chan->completion));
 538        writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
 539               chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
 540        writel(((u64) chan->completion_dma) >> 32,
 541               chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 542
 543        order = ioat_get_alloc_order();
 544        ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
 545        if (!ring)
 546                return -ENOMEM;
 547
 548        spin_lock_bh(&chan->cleanup_lock);
 549        spin_lock_bh(&ioat->prep_lock);
 550        ioat->ring = ring;
 551        ioat->head = 0;
 552        ioat->issued = 0;
 553        ioat->tail = 0;
 554        ioat->alloc_order = order;
 555        set_bit(IOAT_RUN, &chan->state);
 556        spin_unlock_bh(&ioat->prep_lock);
 557        spin_unlock_bh(&chan->cleanup_lock);
 558
 559        ioat2_start_null_desc(ioat);
 560
 561        /* check that we got off the ground */
 562        do {
 563                udelay(1);
 564                status = ioat_chansts(chan);
 565        } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
 566
 567        if (is_ioat_active(status) || is_ioat_idle(status)) {
 568                return 1 << ioat->alloc_order;
 569        } else {
 570                u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 571
 572                dev_WARN(to_dev(chan),
 573                        "failed to start channel chanerr: %#x\n", chanerr);
 574                ioat2_free_chan_resources(c);
 575                return -EFAULT;
 576        }
 577}
 578
 579bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
 580{
 581        /* reshape differs from normal ring allocation in that we want
 582         * to allocate a new software ring while only
 583         * extending/truncating the hardware ring
 584         */
 585        struct ioat_chan_common *chan = &ioat->base;
 586        struct dma_chan *c = &chan->common;
 587        const u32 curr_size = ioat2_ring_size(ioat);
 588        const u16 active = ioat2_ring_active(ioat);
 589        const u32 new_size = 1 << order;
 590        struct ioat_ring_ent **ring;
 591        u16 i;
 592
 593        if (order > ioat_get_max_alloc_order())
 594                return false;
 595
 596        /* double check that we have at least 1 free descriptor */
 597        if (active == curr_size)
 598                return false;
 599
 600        /* when shrinking, verify that we can hold the current active
 601         * set in the new ring
 602         */
 603        if (active >= new_size)
 604                return false;
 605
 606        /* allocate the array to hold the software ring */
 607        ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
 608        if (!ring)
 609                return false;
 610
 611        /* allocate/trim descriptors as needed */
 612        if (new_size > curr_size) {
 613                /* copy current descriptors to the new ring */
 614                for (i = 0; i < curr_size; i++) {
 615                        u16 curr_idx = (ioat->tail+i) & (curr_size-1);
 616                        u16 new_idx = (ioat->tail+i) & (new_size-1);
 617
 618                        ring[new_idx] = ioat->ring[curr_idx];
 619                        set_desc_id(ring[new_idx], new_idx);
 620                }
 621
 622                /* add new descriptors to the ring */
 623                for (i = curr_size; i < new_size; i++) {
 624                        u16 new_idx = (ioat->tail+i) & (new_size-1);
 625
 626                        ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
 627                        if (!ring[new_idx]) {
 628                                while (i--) {
 629                                        u16 new_idx = (ioat->tail+i) & (new_size-1);
 630
 631                                        ioat2_free_ring_ent(ring[new_idx], c);
 632                                }
 633                                kfree(ring);
 634                                return false;
 635                        }
 636                        set_desc_id(ring[new_idx], new_idx);
 637                }
 638
 639                /* hw link new descriptors */
 640                for (i = curr_size-1; i < new_size; i++) {
 641                        u16 new_idx = (ioat->tail+i) & (new_size-1);
 642                        struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
 643                        struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
 644
 645                        hw->next = next->txd.phys;
 646                }
 647        } else {
 648                struct ioat_dma_descriptor *hw;
 649                struct ioat_ring_ent *next;
 650
 651                /* copy current descriptors to the new ring, dropping the
 652                 * removed descriptors
 653                 */
 654                for (i = 0; i < new_size; i++) {
 655                        u16 curr_idx = (ioat->tail+i) & (curr_size-1);
 656                        u16 new_idx = (ioat->tail+i) & (new_size-1);
 657
 658                        ring[new_idx] = ioat->ring[curr_idx];
 659                        set_desc_id(ring[new_idx], new_idx);
 660                }
 661
 662                /* free deleted descriptors */
 663                for (i = new_size; i < curr_size; i++) {
 664                        struct ioat_ring_ent *ent;
 665
 666                        ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
 667                        ioat2_free_ring_ent(ent, c);
 668                }
 669
 670                /* fix up hardware ring */
 671                hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
 672                next = ring[(ioat->tail+new_size) & (new_size-1)];
 673                hw->next = next->txd.phys;
 674        }
 675
 676        dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
 677                __func__, new_size);
 678
 679        kfree(ioat->ring);
 680        ioat->ring = ring;
 681        ioat->alloc_order = order;
 682
 683        return true;
 684}
 685
 686/**
 687 * ioat2_check_space_lock - verify space and grab ring producer lock
 688 * @ioat: ioat2,3 channel (ring) to operate on
 689 * @num_descs: allocation length
 690 */
 691int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
 692{
 693        struct ioat_chan_common *chan = &ioat->base;
 694        bool retry;
 695
 696 retry:
 697        spin_lock_bh(&ioat->prep_lock);
 698        /* never allow the last descriptor to be consumed, we need at
 699         * least one free at all times to allow for on-the-fly ring
 700         * resizing.
 701         */
 702        if (likely(ioat2_ring_space(ioat) > num_descs)) {
 703                dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
 704                        __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
 705                ioat->produce = num_descs;
 706                return 0;  /* with ioat->prep_lock held */
 707        }
 708        retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
 709        spin_unlock_bh(&ioat->prep_lock);
 710
 711        /* is another cpu already trying to expand the ring? */
 712        if (retry)
 713                goto retry;
 714
 715        spin_lock_bh(&chan->cleanup_lock);
 716        spin_lock_bh(&ioat->prep_lock);
 717        retry = reshape_ring(ioat, ioat->alloc_order + 1);
 718        clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
 719        spin_unlock_bh(&ioat->prep_lock);
 720        spin_unlock_bh(&chan->cleanup_lock);
 721
 722        /* if we were able to expand the ring retry the allocation */
 723        if (retry)
 724                goto retry;
 725
 726        if (printk_ratelimit())
 727                dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
 728                        __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
 729
 730        /* progress reclaim in the allocation failure case we may be
 731         * called under bh_disabled so we need to trigger the timer
 732         * event directly
 733         */
 734        if (time_is_before_jiffies(chan->timer.expires)
 735            && timer_pending(&chan->timer)) {
 736                struct ioatdma_device *device = chan->device;
 737
 738                mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 739                device->timer_fn((unsigned long) &chan->common);
 740        }
 741
 742        return -ENOMEM;
 743}
 744
 745struct dma_async_tx_descriptor *
 746ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
 747                           dma_addr_t dma_src, size_t len, unsigned long flags)
 748{
 749        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 750        struct ioat_dma_descriptor *hw;
 751        struct ioat_ring_ent *desc;
 752        dma_addr_t dst = dma_dest;
 753        dma_addr_t src = dma_src;
 754        size_t total_len = len;
 755        int num_descs, idx, i;
 756
 757        num_descs = ioat2_xferlen_to_descs(ioat, len);
 758        if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
 759                idx = ioat->head;
 760        else
 761                return NULL;
 762        i = 0;
 763        do {
 764                size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
 765
 766                desc = ioat2_get_ring_ent(ioat, idx + i);
 767                hw = desc->hw;
 768
 769                hw->size = copy;
 770                hw->ctl = 0;
 771                hw->src_addr = src;
 772                hw->dst_addr = dst;
 773
 774                len -= copy;
 775                dst += copy;
 776                src += copy;
 777                dump_desc_dbg(ioat, desc);
 778        } while (++i < num_descs);
 779
 780        desc->txd.flags = flags;
 781        desc->len = total_len;
 782        hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
 783        hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
 784        hw->ctl_f.compl_write = 1;
 785        dump_desc_dbg(ioat, desc);
 786        /* we leave the channel locked to ensure in order submission */
 787
 788        return &desc->txd;
 789}
 790
 791/**
 792 * ioat2_free_chan_resources - release all the descriptors
 793 * @chan: the channel to be cleaned
 794 */
 795void ioat2_free_chan_resources(struct dma_chan *c)
 796{
 797        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 798        struct ioat_chan_common *chan = &ioat->base;
 799        struct ioatdma_device *device = chan->device;
 800        struct ioat_ring_ent *desc;
 801        const u16 total_descs = 1 << ioat->alloc_order;
 802        int descs;
 803        int i;
 804
 805        /* Before freeing channel resources first check
 806         * if they have been previously allocated for this channel.
 807         */
 808        if (!ioat->ring)
 809                return;
 810
 811        ioat_stop(chan);
 812        device->reset_hw(chan);
 813
 814        spin_lock_bh(&chan->cleanup_lock);
 815        spin_lock_bh(&ioat->prep_lock);
 816        descs = ioat2_ring_space(ioat);
 817        dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
 818        for (i = 0; i < descs; i++) {
 819                desc = ioat2_get_ring_ent(ioat, ioat->head + i);
 820                ioat2_free_ring_ent(desc, c);
 821        }
 822
 823        if (descs < total_descs)
 824                dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
 825                        total_descs - descs);
 826
 827        for (i = 0; i < total_descs - descs; i++) {
 828                desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
 829                dump_desc_dbg(ioat, desc);
 830                ioat2_free_ring_ent(desc, c);
 831        }
 832
 833        kfree(ioat->ring);
 834        ioat->ring = NULL;
 835        ioat->alloc_order = 0;
 836        pci_pool_free(device->completion_pool, chan->completion,
 837                      chan->completion_dma);
 838        spin_unlock_bh(&ioat->prep_lock);
 839        spin_unlock_bh(&chan->cleanup_lock);
 840
 841        chan->last_completion = 0;
 842        chan->completion_dma = 0;
 843        ioat->dmacount = 0;
 844}
 845
 846static ssize_t ring_size_show(struct dma_chan *c, char *page)
 847{
 848        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 849
 850        return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
 851}
 852static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
 853
 854static ssize_t ring_active_show(struct dma_chan *c, char *page)
 855{
 856        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 857
 858        /* ...taken outside the lock, no need to be precise */
 859        return sprintf(page, "%d\n", ioat2_ring_active(ioat));
 860}
 861static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
 862
 863static struct attribute *ioat2_attrs[] = {
 864        &ring_size_attr.attr,
 865        &ring_active_attr.attr,
 866        &ioat_cap_attr.attr,
 867        &ioat_version_attr.attr,
 868        NULL,
 869};
 870
 871struct kobj_type ioat2_ktype = {
 872        .sysfs_ops = &ioat_sysfs_ops,
 873        .default_attrs = ioat2_attrs,
 874};
 875
 876int ioat2_dma_probe(struct ioatdma_device *device, int dca)
 877{
 878        struct pci_dev *pdev = device->pdev;
 879        struct dma_device *dma;
 880        struct dma_chan *c;
 881        struct ioat_chan_common *chan;
 882        int err;
 883
 884        device->enumerate_channels = ioat2_enumerate_channels;
 885        device->reset_hw = ioat2_reset_hw;
 886        device->cleanup_fn = ioat2_cleanup_event;
 887        device->timer_fn = ioat2_timer_event;
 888        device->self_test = ioat_dma_self_test;
 889        dma = &device->common;
 890        dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
 891        dma->device_issue_pending = ioat2_issue_pending;
 892        dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
 893        dma->device_free_chan_resources = ioat2_free_chan_resources;
 894        dma->device_tx_status = ioat_dma_tx_status;
 895
 896        err = ioat_probe(device);
 897        if (err)
 898                return err;
 899
 900        list_for_each_entry(c, &dma->channels, device_node) {
 901                chan = to_chan_common(c);
 902                writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
 903                       chan->reg_base + IOAT_DCACTRL_OFFSET);
 904        }
 905
 906        err = ioat_register(device);
 907        if (err)
 908                return err;
 909
 910        ioat_kobject_add(device, &ioat2_ktype);
 911
 912        if (dca)
 913                device->dca = ioat2_dca_init(pdev, device->reg_base);
 914
 915        return err;
 916}
 917