linux/drivers/dma/iop-adma.c
<<
>>
Prefs
   1/*
   2 * offload engine driver for the Intel Xscale series of i/o processors
   3 * Copyright © 2006, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 */
  19
  20/*
  21 * This driver supports the asynchrounous DMA copy and RAID engines available
  22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
  23 */
  24
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/delay.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/spinlock.h>
  30#include <linux/interrupt.h>
  31#include <linux/platform_device.h>
  32#include <linux/memory.h>
  33#include <linux/ioport.h>
  34#include <linux/raid/pq.h>
  35
  36#include <mach/adma.h>
  37
  38#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
  39#define to_iop_adma_device(dev) \
  40        container_of(dev, struct iop_adma_device, common)
  41#define tx_to_iop_adma_slot(tx) \
  42        container_of(tx, struct iop_adma_desc_slot, async_tx)
  43
  44/**
  45 * iop_adma_free_slots - flags descriptor slots for reuse
  46 * @slot: Slot to free
  47 * Caller must hold &iop_chan->lock while calling this function
  48 */
  49static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
  50{
  51        int stride = slot->slots_per_op;
  52
  53        while (stride--) {
  54                slot->slots_per_op = 0;
  55                slot = list_entry(slot->slot_node.next,
  56                                struct iop_adma_desc_slot,
  57                                slot_node);
  58        }
  59}
  60
  61static void
  62iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
  63{
  64        struct dma_async_tx_descriptor *tx = &desc->async_tx;
  65        struct iop_adma_desc_slot *unmap = desc->group_head;
  66        struct device *dev = &iop_chan->device->pdev->dev;
  67        u32 len = unmap->unmap_len;
  68        enum dma_ctrl_flags flags = tx->flags;
  69        u32 src_cnt;
  70        dma_addr_t addr;
  71        dma_addr_t dest;
  72
  73        src_cnt = unmap->unmap_src_cnt;
  74        dest = iop_desc_get_dest_addr(unmap, iop_chan);
  75        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  76                enum dma_data_direction dir;
  77
  78                if (src_cnt > 1) /* is xor? */
  79                        dir = DMA_BIDIRECTIONAL;
  80                else
  81                        dir = DMA_FROM_DEVICE;
  82
  83                dma_unmap_page(dev, dest, len, dir);
  84        }
  85
  86        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  87                while (src_cnt--) {
  88                        addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
  89                        if (addr == dest)
  90                                continue;
  91                        dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
  92                }
  93        }
  94        desc->group_head = NULL;
  95}
  96
  97static void
  98iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
  99{
 100        struct dma_async_tx_descriptor *tx = &desc->async_tx;
 101        struct iop_adma_desc_slot *unmap = desc->group_head;
 102        struct device *dev = &iop_chan->device->pdev->dev;
 103        u32 len = unmap->unmap_len;
 104        enum dma_ctrl_flags flags = tx->flags;
 105        u32 src_cnt = unmap->unmap_src_cnt;
 106        dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
 107        dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
 108        int i;
 109
 110        if (tx->flags & DMA_PREP_CONTINUE)
 111                src_cnt -= 3;
 112
 113        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
 114                dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
 115                dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
 116        }
 117
 118        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 119                dma_addr_t addr;
 120
 121                for (i = 0; i < src_cnt; i++) {
 122                        addr = iop_desc_get_src_addr(unmap, iop_chan, i);
 123                        dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
 124                }
 125                if (desc->pq_check_result) {
 126                        dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
 127                        dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
 128                }
 129        }
 130
 131        desc->group_head = NULL;
 132}
 133
 134
 135static dma_cookie_t
 136iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
 137        struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
 138{
 139        struct dma_async_tx_descriptor *tx = &desc->async_tx;
 140
 141        BUG_ON(tx->cookie < 0);
 142        if (tx->cookie > 0) {
 143                cookie = tx->cookie;
 144                tx->cookie = 0;
 145
 146                /* call the callback (must not sleep or submit new
 147                 * operations to this channel)
 148                 */
 149                if (tx->callback)
 150                        tx->callback(tx->callback_param);
 151
 152                /* unmap dma addresses
 153                 * (unmap_single vs unmap_page?)
 154                 */
 155                if (desc->group_head && desc->unmap_len) {
 156                        if (iop_desc_is_pq(desc))
 157                                iop_desc_unmap_pq(iop_chan, desc);
 158                        else
 159                                iop_desc_unmap(iop_chan, desc);
 160                }
 161        }
 162
 163        /* run dependent operations */
 164        dma_run_dependencies(tx);
 165
 166        return cookie;
 167}
 168
 169static int
 170iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
 171        struct iop_adma_chan *iop_chan)
 172{
 173        /* the client is allowed to attach dependent operations
 174         * until 'ack' is set
 175         */
 176        if (!async_tx_test_ack(&desc->async_tx))
 177                return 0;
 178
 179        /* leave the last descriptor in the chain
 180         * so we can append to it
 181         */
 182        if (desc->chain_node.next == &iop_chan->chain)
 183                return 1;
 184
 185        dev_dbg(iop_chan->device->common.dev,
 186                "\tfree slot: %d slots_per_op: %d\n",
 187                desc->idx, desc->slots_per_op);
 188
 189        list_del(&desc->chain_node);
 190        iop_adma_free_slots(desc);
 191
 192        return 0;
 193}
 194
 195static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 196{
 197        struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
 198        dma_cookie_t cookie = 0;
 199        u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
 200        int busy = iop_chan_is_busy(iop_chan);
 201        int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
 202
 203        dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 204        /* free completed slots from the chain starting with
 205         * the oldest descriptor
 206         */
 207        list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 208                                        chain_node) {
 209                pr_debug("\tcookie: %d slot: %d busy: %d "
 210                        "this_desc: %#x next_desc: %#x ack: %d\n",
 211                        iter->async_tx.cookie, iter->idx, busy,
 212                        iter->async_tx.phys, iop_desc_get_next_desc(iter),
 213                        async_tx_test_ack(&iter->async_tx));
 214                prefetch(_iter);
 215                prefetch(&_iter->async_tx);
 216
 217                /* do not advance past the current descriptor loaded into the
 218                 * hardware channel, subsequent descriptors are either in
 219                 * process or have not been submitted
 220                 */
 221                if (seen_current)
 222                        break;
 223
 224                /* stop the search if we reach the current descriptor and the
 225                 * channel is busy, or if it appears that the current descriptor
 226                 * needs to be re-read (i.e. has been appended to)
 227                 */
 228                if (iter->async_tx.phys == current_desc) {
 229                        BUG_ON(seen_current++);
 230                        if (busy || iop_desc_get_next_desc(iter))
 231                                break;
 232                }
 233
 234                /* detect the start of a group transaction */
 235                if (!slot_cnt && !slots_per_op) {
 236                        slot_cnt = iter->slot_cnt;
 237                        slots_per_op = iter->slots_per_op;
 238                        if (slot_cnt <= slots_per_op) {
 239                                slot_cnt = 0;
 240                                slots_per_op = 0;
 241                        }
 242                }
 243
 244                if (slot_cnt) {
 245                        pr_debug("\tgroup++\n");
 246                        if (!grp_start)
 247                                grp_start = iter;
 248                        slot_cnt -= slots_per_op;
 249                }
 250
 251                /* all the members of a group are complete */
 252                if (slots_per_op != 0 && slot_cnt == 0) {
 253                        struct iop_adma_desc_slot *grp_iter, *_grp_iter;
 254                        int end_of_chain = 0;
 255                        pr_debug("\tgroup end\n");
 256
 257                        /* collect the total results */
 258                        if (grp_start->xor_check_result) {
 259                                u32 zero_sum_result = 0;
 260                                slot_cnt = grp_start->slot_cnt;
 261                                grp_iter = grp_start;
 262
 263                                list_for_each_entry_from(grp_iter,
 264                                        &iop_chan->chain, chain_node) {
 265                                        zero_sum_result |=
 266                                            iop_desc_get_zero_result(grp_iter);
 267                                            pr_debug("\titer%d result: %d\n",
 268                                            grp_iter->idx, zero_sum_result);
 269                                        slot_cnt -= slots_per_op;
 270                                        if (slot_cnt == 0)
 271                                                break;
 272                                }
 273                                pr_debug("\tgrp_start->xor_check_result: %p\n",
 274                                        grp_start->xor_check_result);
 275                                *grp_start->xor_check_result = zero_sum_result;
 276                        }
 277
 278                        /* clean up the group */
 279                        slot_cnt = grp_start->slot_cnt;
 280                        grp_iter = grp_start;
 281                        list_for_each_entry_safe_from(grp_iter, _grp_iter,
 282                                &iop_chan->chain, chain_node) {
 283                                cookie = iop_adma_run_tx_complete_actions(
 284                                        grp_iter, iop_chan, cookie);
 285
 286                                slot_cnt -= slots_per_op;
 287                                end_of_chain = iop_adma_clean_slot(grp_iter,
 288                                        iop_chan);
 289
 290                                if (slot_cnt == 0 || end_of_chain)
 291                                        break;
 292                        }
 293
 294                        /* the group should be complete at this point */
 295                        BUG_ON(slot_cnt);
 296
 297                        slots_per_op = 0;
 298                        grp_start = NULL;
 299                        if (end_of_chain)
 300                                break;
 301                        else
 302                                continue;
 303                } else if (slots_per_op) /* wait for group completion */
 304                        continue;
 305
 306                /* write back zero sum results (single descriptor case) */
 307                if (iter->xor_check_result && iter->async_tx.cookie)
 308                        *iter->xor_check_result =
 309                                iop_desc_get_zero_result(iter);
 310
 311                cookie = iop_adma_run_tx_complete_actions(
 312                                        iter, iop_chan, cookie);
 313
 314                if (iop_adma_clean_slot(iter, iop_chan))
 315                        break;
 316        }
 317
 318        if (cookie > 0) {
 319                iop_chan->completed_cookie = cookie;
 320                pr_debug("\tcompleted cookie %d\n", cookie);
 321        }
 322}
 323
 324static void
 325iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 326{
 327        spin_lock_bh(&iop_chan->lock);
 328        __iop_adma_slot_cleanup(iop_chan);
 329        spin_unlock_bh(&iop_chan->lock);
 330}
 331
 332static void iop_adma_tasklet(unsigned long data)
 333{
 334        struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
 335
 336        /* lockdep will flag depedency submissions as potentially
 337         * recursive locking, this is not the case as a dependency
 338         * submission will never recurse a channels submit routine.
 339         * There are checks in async_tx.c to prevent this.
 340         */
 341        spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
 342        __iop_adma_slot_cleanup(iop_chan);
 343        spin_unlock(&iop_chan->lock);
 344}
 345
 346static struct iop_adma_desc_slot *
 347iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 348                        int slots_per_op)
 349{
 350        struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
 351        LIST_HEAD(chain);
 352        int slots_found, retry = 0;
 353
 354        /* start search from the last allocated descrtiptor
 355         * if a contiguous allocation can not be found start searching
 356         * from the beginning of the list
 357         */
 358retry:
 359        slots_found = 0;
 360        if (retry == 0)
 361                iter = iop_chan->last_used;
 362        else
 363                iter = list_entry(&iop_chan->all_slots,
 364                        struct iop_adma_desc_slot,
 365                        slot_node);
 366
 367        list_for_each_entry_safe_continue(
 368                iter, _iter, &iop_chan->all_slots, slot_node) {
 369                prefetch(_iter);
 370                prefetch(&_iter->async_tx);
 371                if (iter->slots_per_op) {
 372                        /* give up after finding the first busy slot
 373                         * on the second pass through the list
 374                         */
 375                        if (retry)
 376                                break;
 377
 378                        slots_found = 0;
 379                        continue;
 380                }
 381
 382                /* start the allocation if the slot is correctly aligned */
 383                if (!slots_found++) {
 384                        if (iop_desc_is_aligned(iter, slots_per_op))
 385                                alloc_start = iter;
 386                        else {
 387                                slots_found = 0;
 388                                continue;
 389                        }
 390                }
 391
 392                if (slots_found == num_slots) {
 393                        struct iop_adma_desc_slot *alloc_tail = NULL;
 394                        struct iop_adma_desc_slot *last_used = NULL;
 395                        iter = alloc_start;
 396                        while (num_slots) {
 397                                int i;
 398                                dev_dbg(iop_chan->device->common.dev,
 399                                        "allocated slot: %d "
 400                                        "(desc %p phys: %#x) slots_per_op %d\n",
 401                                        iter->idx, iter->hw_desc,
 402                                        iter->async_tx.phys, slots_per_op);
 403
 404                                /* pre-ack all but the last descriptor */
 405                                if (num_slots != slots_per_op)
 406                                        async_tx_ack(&iter->async_tx);
 407
 408                                list_add_tail(&iter->chain_node, &chain);
 409                                alloc_tail = iter;
 410                                iter->async_tx.cookie = 0;
 411                                iter->slot_cnt = num_slots;
 412                                iter->xor_check_result = NULL;
 413                                for (i = 0; i < slots_per_op; i++) {
 414                                        iter->slots_per_op = slots_per_op - i;
 415                                        last_used = iter;
 416                                        iter = list_entry(iter->slot_node.next,
 417                                                struct iop_adma_desc_slot,
 418                                                slot_node);
 419                                }
 420                                num_slots -= slots_per_op;
 421                        }
 422                        alloc_tail->group_head = alloc_start;
 423                        alloc_tail->async_tx.cookie = -EBUSY;
 424                        list_splice(&chain, &alloc_tail->tx_list);
 425                        iop_chan->last_used = last_used;
 426                        iop_desc_clear_next_desc(alloc_start);
 427                        iop_desc_clear_next_desc(alloc_tail);
 428                        return alloc_tail;
 429                }
 430        }
 431        if (!retry++)
 432                goto retry;
 433
 434        /* perform direct reclaim if the allocation fails */
 435        __iop_adma_slot_cleanup(iop_chan);
 436
 437        return NULL;
 438}
 439
 440static dma_cookie_t
 441iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
 442        struct iop_adma_desc_slot *desc)
 443{
 444        dma_cookie_t cookie = iop_chan->common.cookie;
 445        cookie++;
 446        if (cookie < 0)
 447                cookie = 1;
 448        iop_chan->common.cookie = desc->async_tx.cookie = cookie;
 449        return cookie;
 450}
 451
 452static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
 453{
 454        dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
 455                iop_chan->pending);
 456
 457        if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
 458                iop_chan->pending = 0;
 459                iop_chan_append(iop_chan);
 460        }
 461}
 462
 463static dma_cookie_t
 464iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 465{
 466        struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 467        struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 468        struct iop_adma_desc_slot *grp_start, *old_chain_tail;
 469        int slot_cnt;
 470        int slots_per_op;
 471        dma_cookie_t cookie;
 472        dma_addr_t next_dma;
 473
 474        grp_start = sw_desc->group_head;
 475        slot_cnt = grp_start->slot_cnt;
 476        slots_per_op = grp_start->slots_per_op;
 477
 478        spin_lock_bh(&iop_chan->lock);
 479        cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
 480
 481        old_chain_tail = list_entry(iop_chan->chain.prev,
 482                struct iop_adma_desc_slot, chain_node);
 483        list_splice_init(&sw_desc->tx_list,
 484                         &old_chain_tail->chain_node);
 485
 486        /* fix up the hardware chain */
 487        next_dma = grp_start->async_tx.phys;
 488        iop_desc_set_next_desc(old_chain_tail, next_dma);
 489        BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
 490
 491        /* check for pre-chained descriptors */
 492        iop_paranoia(iop_desc_get_next_desc(sw_desc));
 493
 494        /* increment the pending count by the number of slots
 495         * memcpy operations have a 1:1 (slot:operation) relation
 496         * other operations are heavier and will pop the threshold
 497         * more often.
 498         */
 499        iop_chan->pending += slot_cnt;
 500        iop_adma_check_threshold(iop_chan);
 501        spin_unlock_bh(&iop_chan->lock);
 502
 503        dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
 504                __func__, sw_desc->async_tx.cookie, sw_desc->idx);
 505
 506        return cookie;
 507}
 508
 509static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 510static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 511
 512/**
 513 * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
 514 * @chan - allocate descriptor resources for this channel
 515 * @client - current client requesting the channel be ready for requests
 516 *
 517 * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
 518 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
 519 * greater than 2x the number slots needed to satisfy a device->max_xor
 520 * request.
 521 * */
 522static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 523{
 524        char *hw_desc;
 525        int idx;
 526        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 527        struct iop_adma_desc_slot *slot = NULL;
 528        int init = iop_chan->slots_allocated ? 0 : 1;
 529        struct iop_adma_platform_data *plat_data =
 530                iop_chan->device->pdev->dev.platform_data;
 531        int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 532
 533        /* Allocate descriptor slots */
 534        do {
 535                idx = iop_chan->slots_allocated;
 536                if (idx == num_descs_in_pool)
 537                        break;
 538
 539                slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 540                if (!slot) {
 541                        printk(KERN_INFO "IOP ADMA Channel only initialized"
 542                                " %d descriptor slots", idx);
 543                        break;
 544                }
 545                hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
 546                slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 547
 548                dma_async_tx_descriptor_init(&slot->async_tx, chan);
 549                slot->async_tx.tx_submit = iop_adma_tx_submit;
 550                INIT_LIST_HEAD(&slot->tx_list);
 551                INIT_LIST_HEAD(&slot->chain_node);
 552                INIT_LIST_HEAD(&slot->slot_node);
 553                hw_desc = (char *) iop_chan->device->dma_desc_pool;
 554                slot->async_tx.phys =
 555                        (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 556                slot->idx = idx;
 557
 558                spin_lock_bh(&iop_chan->lock);
 559                iop_chan->slots_allocated++;
 560                list_add_tail(&slot->slot_node, &iop_chan->all_slots);
 561                spin_unlock_bh(&iop_chan->lock);
 562        } while (iop_chan->slots_allocated < num_descs_in_pool);
 563
 564        if (idx && !iop_chan->last_used)
 565                iop_chan->last_used = list_entry(iop_chan->all_slots.next,
 566                                        struct iop_adma_desc_slot,
 567                                        slot_node);
 568
 569        dev_dbg(iop_chan->device->common.dev,
 570                "allocated %d descriptor slots last_used: %p\n",
 571                iop_chan->slots_allocated, iop_chan->last_used);
 572
 573        /* initialize the channel and the chain with a null operation */
 574        if (init) {
 575                if (dma_has_cap(DMA_MEMCPY,
 576                        iop_chan->device->common.cap_mask))
 577                        iop_chan_start_null_memcpy(iop_chan);
 578                else if (dma_has_cap(DMA_XOR,
 579                        iop_chan->device->common.cap_mask))
 580                        iop_chan_start_null_xor(iop_chan);
 581                else
 582                        BUG();
 583        }
 584
 585        return (idx > 0) ? idx : -ENOMEM;
 586}
 587
 588static struct dma_async_tx_descriptor *
 589iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 590{
 591        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 592        struct iop_adma_desc_slot *sw_desc, *grp_start;
 593        int slot_cnt, slots_per_op;
 594
 595        dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 596
 597        spin_lock_bh(&iop_chan->lock);
 598        slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
 599        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 600        if (sw_desc) {
 601                grp_start = sw_desc->group_head;
 602                iop_desc_init_interrupt(grp_start, iop_chan);
 603                grp_start->unmap_len = 0;
 604                sw_desc->async_tx.flags = flags;
 605        }
 606        spin_unlock_bh(&iop_chan->lock);
 607
 608        return sw_desc ? &sw_desc->async_tx : NULL;
 609}
 610
 611static struct dma_async_tx_descriptor *
 612iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
 613                         dma_addr_t dma_src, size_t len, unsigned long flags)
 614{
 615        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 616        struct iop_adma_desc_slot *sw_desc, *grp_start;
 617        int slot_cnt, slots_per_op;
 618
 619        if (unlikely(!len))
 620                return NULL;
 621        BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
 622
 623        dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 624                __func__, len);
 625
 626        spin_lock_bh(&iop_chan->lock);
 627        slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
 628        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 629        if (sw_desc) {
 630                grp_start = sw_desc->group_head;
 631                iop_desc_init_memcpy(grp_start, flags);
 632                iop_desc_set_byte_count(grp_start, iop_chan, len);
 633                iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 634                iop_desc_set_memcpy_src_addr(grp_start, dma_src);
 635                sw_desc->unmap_src_cnt = 1;
 636                sw_desc->unmap_len = len;
 637                sw_desc->async_tx.flags = flags;
 638        }
 639        spin_unlock_bh(&iop_chan->lock);
 640
 641        return sw_desc ? &sw_desc->async_tx : NULL;
 642}
 643
 644static struct dma_async_tx_descriptor *
 645iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
 646                         int value, size_t len, unsigned long flags)
 647{
 648        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 649        struct iop_adma_desc_slot *sw_desc, *grp_start;
 650        int slot_cnt, slots_per_op;
 651
 652        if (unlikely(!len))
 653                return NULL;
 654        BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
 655
 656        dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 657                __func__, len);
 658
 659        spin_lock_bh(&iop_chan->lock);
 660        slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
 661        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 662        if (sw_desc) {
 663                grp_start = sw_desc->group_head;
 664                iop_desc_init_memset(grp_start, flags);
 665                iop_desc_set_byte_count(grp_start, iop_chan, len);
 666                iop_desc_set_block_fill_val(grp_start, value);
 667                iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 668                sw_desc->unmap_src_cnt = 1;
 669                sw_desc->unmap_len = len;
 670                sw_desc->async_tx.flags = flags;
 671        }
 672        spin_unlock_bh(&iop_chan->lock);
 673
 674        return sw_desc ? &sw_desc->async_tx : NULL;
 675}
 676
 677static struct dma_async_tx_descriptor *
 678iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
 679                      dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
 680                      unsigned long flags)
 681{
 682        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 683        struct iop_adma_desc_slot *sw_desc, *grp_start;
 684        int slot_cnt, slots_per_op;
 685
 686        if (unlikely(!len))
 687                return NULL;
 688        BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
 689
 690        dev_dbg(iop_chan->device->common.dev,
 691                "%s src_cnt: %d len: %u flags: %lx\n",
 692                __func__, src_cnt, len, flags);
 693
 694        spin_lock_bh(&iop_chan->lock);
 695        slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
 696        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 697        if (sw_desc) {
 698                grp_start = sw_desc->group_head;
 699                iop_desc_init_xor(grp_start, src_cnt, flags);
 700                iop_desc_set_byte_count(grp_start, iop_chan, len);
 701                iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 702                sw_desc->unmap_src_cnt = src_cnt;
 703                sw_desc->unmap_len = len;
 704                sw_desc->async_tx.flags = flags;
 705                while (src_cnt--)
 706                        iop_desc_set_xor_src_addr(grp_start, src_cnt,
 707                                                  dma_src[src_cnt]);
 708        }
 709        spin_unlock_bh(&iop_chan->lock);
 710
 711        return sw_desc ? &sw_desc->async_tx : NULL;
 712}
 713
 714static struct dma_async_tx_descriptor *
 715iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
 716                          unsigned int src_cnt, size_t len, u32 *result,
 717                          unsigned long flags)
 718{
 719        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 720        struct iop_adma_desc_slot *sw_desc, *grp_start;
 721        int slot_cnt, slots_per_op;
 722
 723        if (unlikely(!len))
 724                return NULL;
 725
 726        dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 727                __func__, src_cnt, len);
 728
 729        spin_lock_bh(&iop_chan->lock);
 730        slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
 731        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 732        if (sw_desc) {
 733                grp_start = sw_desc->group_head;
 734                iop_desc_init_zero_sum(grp_start, src_cnt, flags);
 735                iop_desc_set_zero_sum_byte_count(grp_start, len);
 736                grp_start->xor_check_result = result;
 737                pr_debug("\t%s: grp_start->xor_check_result: %p\n",
 738                        __func__, grp_start->xor_check_result);
 739                sw_desc->unmap_src_cnt = src_cnt;
 740                sw_desc->unmap_len = len;
 741                sw_desc->async_tx.flags = flags;
 742                while (src_cnt--)
 743                        iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
 744                                                       dma_src[src_cnt]);
 745        }
 746        spin_unlock_bh(&iop_chan->lock);
 747
 748        return sw_desc ? &sw_desc->async_tx : NULL;
 749}
 750
 751static struct dma_async_tx_descriptor *
 752iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 753                     unsigned int src_cnt, const unsigned char *scf, size_t len,
 754                     unsigned long flags)
 755{
 756        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 757        struct iop_adma_desc_slot *sw_desc, *g;
 758        int slot_cnt, slots_per_op;
 759        int continue_srcs;
 760
 761        if (unlikely(!len))
 762                return NULL;
 763        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 764
 765        dev_dbg(iop_chan->device->common.dev,
 766                "%s src_cnt: %d len: %u flags: %lx\n",
 767                __func__, src_cnt, len, flags);
 768
 769        if (dmaf_p_disabled_continue(flags))
 770                continue_srcs = 1+src_cnt;
 771        else if (dmaf_continue(flags))
 772                continue_srcs = 3+src_cnt;
 773        else
 774                continue_srcs = 0+src_cnt;
 775
 776        spin_lock_bh(&iop_chan->lock);
 777        slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
 778        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 779        if (sw_desc) {
 780                int i;
 781
 782                g = sw_desc->group_head;
 783                iop_desc_set_byte_count(g, iop_chan, len);
 784
 785                /* even if P is disabled its destination address (bits
 786                 * [3:0]) must match Q.  It is ok if P points to an
 787                 * invalid address, it won't be written.
 788                 */
 789                if (flags & DMA_PREP_PQ_DISABLE_P)
 790                        dst[0] = dst[1] & 0x7;
 791
 792                iop_desc_set_pq_addr(g, dst);
 793                sw_desc->unmap_src_cnt = src_cnt;
 794                sw_desc->unmap_len = len;
 795                sw_desc->async_tx.flags = flags;
 796                for (i = 0; i < src_cnt; i++)
 797                        iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
 798
 799                /* if we are continuing a previous operation factor in
 800                 * the old p and q values, see the comment for dma_maxpq
 801                 * in include/linux/dmaengine.h
 802                 */
 803                if (dmaf_p_disabled_continue(flags))
 804                        iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 805                else if (dmaf_continue(flags)) {
 806                        iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
 807                        iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 808                        iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
 809                }
 810                iop_desc_init_pq(g, i, flags);
 811        }
 812        spin_unlock_bh(&iop_chan->lock);
 813
 814        return sw_desc ? &sw_desc->async_tx : NULL;
 815}
 816
 817static struct dma_async_tx_descriptor *
 818iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 819                         unsigned int src_cnt, const unsigned char *scf,
 820                         size_t len, enum sum_check_flags *pqres,
 821                         unsigned long flags)
 822{
 823        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 824        struct iop_adma_desc_slot *sw_desc, *g;
 825        int slot_cnt, slots_per_op;
 826
 827        if (unlikely(!len))
 828                return NULL;
 829        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 830
 831        dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 832                __func__, src_cnt, len);
 833
 834        spin_lock_bh(&iop_chan->lock);
 835        slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
 836        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 837        if (sw_desc) {
 838                /* for validate operations p and q are tagged onto the
 839                 * end of the source list
 840                 */
 841                int pq_idx = src_cnt;
 842
 843                g = sw_desc->group_head;
 844                iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
 845                iop_desc_set_pq_zero_sum_byte_count(g, len);
 846                g->pq_check_result = pqres;
 847                pr_debug("\t%s: g->pq_check_result: %p\n",
 848                        __func__, g->pq_check_result);
 849                sw_desc->unmap_src_cnt = src_cnt+2;
 850                sw_desc->unmap_len = len;
 851                sw_desc->async_tx.flags = flags;
 852                while (src_cnt--)
 853                        iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
 854                                                          src[src_cnt],
 855                                                          scf[src_cnt]);
 856                iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
 857        }
 858        spin_unlock_bh(&iop_chan->lock);
 859
 860        return sw_desc ? &sw_desc->async_tx : NULL;
 861}
 862
 863static void iop_adma_free_chan_resources(struct dma_chan *chan)
 864{
 865        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 866        struct iop_adma_desc_slot *iter, *_iter;
 867        int in_use_descs = 0;
 868
 869        iop_adma_slot_cleanup(iop_chan);
 870
 871        spin_lock_bh(&iop_chan->lock);
 872        list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 873                                        chain_node) {
 874                in_use_descs++;
 875                list_del(&iter->chain_node);
 876        }
 877        list_for_each_entry_safe_reverse(
 878                iter, _iter, &iop_chan->all_slots, slot_node) {
 879                list_del(&iter->slot_node);
 880                kfree(iter);
 881                iop_chan->slots_allocated--;
 882        }
 883        iop_chan->last_used = NULL;
 884
 885        dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
 886                __func__, iop_chan->slots_allocated);
 887        spin_unlock_bh(&iop_chan->lock);
 888
 889        /* one is ok since we left it on there on purpose */
 890        if (in_use_descs > 1)
 891                printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
 892                        in_use_descs - 1);
 893}
 894
 895/**
 896 * iop_adma_is_complete - poll the status of an ADMA transaction
 897 * @chan: ADMA channel handle
 898 * @cookie: ADMA transaction identifier
 899 */
 900static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
 901                                        dma_cookie_t cookie,
 902                                        dma_cookie_t *done,
 903                                        dma_cookie_t *used)
 904{
 905        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 906        dma_cookie_t last_used;
 907        dma_cookie_t last_complete;
 908        enum dma_status ret;
 909
 910        last_used = chan->cookie;
 911        last_complete = iop_chan->completed_cookie;
 912
 913        if (done)
 914                *done = last_complete;
 915        if (used)
 916                *used = last_used;
 917
 918        ret = dma_async_is_complete(cookie, last_complete, last_used);
 919        if (ret == DMA_SUCCESS)
 920                return ret;
 921
 922        iop_adma_slot_cleanup(iop_chan);
 923
 924        last_used = chan->cookie;
 925        last_complete = iop_chan->completed_cookie;
 926
 927        if (done)
 928                *done = last_complete;
 929        if (used)
 930                *used = last_used;
 931
 932        return dma_async_is_complete(cookie, last_complete, last_used);
 933}
 934
 935static irqreturn_t iop_adma_eot_handler(int irq, void *data)
 936{
 937        struct iop_adma_chan *chan = data;
 938
 939        dev_dbg(chan->device->common.dev, "%s\n", __func__);
 940
 941        tasklet_schedule(&chan->irq_tasklet);
 942
 943        iop_adma_device_clear_eot_status(chan);
 944
 945        return IRQ_HANDLED;
 946}
 947
 948static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
 949{
 950        struct iop_adma_chan *chan = data;
 951
 952        dev_dbg(chan->device->common.dev, "%s\n", __func__);
 953
 954        tasklet_schedule(&chan->irq_tasklet);
 955
 956        iop_adma_device_clear_eoc_status(chan);
 957
 958        return IRQ_HANDLED;
 959}
 960
 961static irqreturn_t iop_adma_err_handler(int irq, void *data)
 962{
 963        struct iop_adma_chan *chan = data;
 964        unsigned long status = iop_chan_get_status(chan);
 965
 966        dev_printk(KERN_ERR, chan->device->common.dev,
 967                "error ( %s%s%s%s%s%s%s)\n",
 968                iop_is_err_int_parity(status, chan) ? "int_parity " : "",
 969                iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
 970                iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
 971                iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
 972                iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
 973                iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
 974                iop_is_err_split_tx(status, chan) ? "split_tx " : "");
 975
 976        iop_adma_device_clear_err_status(chan);
 977
 978        BUG();
 979
 980        return IRQ_HANDLED;
 981}
 982
 983static void iop_adma_issue_pending(struct dma_chan *chan)
 984{
 985        struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 986
 987        if (iop_chan->pending) {
 988                iop_chan->pending = 0;
 989                iop_chan_append(iop_chan);
 990        }
 991}
 992
 993/*
 994 * Perform a transaction to verify the HW works.
 995 */
 996#define IOP_ADMA_TEST_SIZE 2000
 997
 998static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
 999{
1000        int i;
1001        void *src, *dest;
1002        dma_addr_t src_dma, dest_dma;
1003        struct dma_chan *dma_chan;
1004        dma_cookie_t cookie;
1005        struct dma_async_tx_descriptor *tx;
1006        int err = 0;
1007        struct iop_adma_chan *iop_chan;
1008
1009        dev_dbg(device->common.dev, "%s\n", __func__);
1010
1011        src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1012        if (!src)
1013                return -ENOMEM;
1014        dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1015        if (!dest) {
1016                kfree(src);
1017                return -ENOMEM;
1018        }
1019
1020        /* Fill in src buffer */
1021        for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
1022                ((u8 *) src)[i] = (u8)i;
1023
1024        /* Start copy, using first DMA channel */
1025        dma_chan = container_of(device->common.channels.next,
1026                                struct dma_chan,
1027                                device_node);
1028        if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1029                err = -ENODEV;
1030                goto out;
1031        }
1032
1033        dest_dma = dma_map_single(dma_chan->device->dev, dest,
1034                                IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1035        src_dma = dma_map_single(dma_chan->device->dev, src,
1036                                IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
1037        tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
1038                                      IOP_ADMA_TEST_SIZE,
1039                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1040
1041        cookie = iop_adma_tx_submit(tx);
1042        iop_adma_issue_pending(dma_chan);
1043        msleep(1);
1044
1045        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1046                        DMA_SUCCESS) {
1047                dev_printk(KERN_ERR, dma_chan->device->dev,
1048                        "Self-test copy timed out, disabling\n");
1049                err = -ENODEV;
1050                goto free_resources;
1051        }
1052
1053        iop_chan = to_iop_adma_chan(dma_chan);
1054        dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1055                IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1056        if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
1057                dev_printk(KERN_ERR, dma_chan->device->dev,
1058                        "Self-test copy failed compare, disabling\n");
1059                err = -ENODEV;
1060                goto free_resources;
1061        }
1062
1063free_resources:
1064        iop_adma_free_chan_resources(dma_chan);
1065out:
1066        kfree(src);
1067        kfree(dest);
1068        return err;
1069}
1070
1071#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
1072static int __devinit
1073iop_adma_xor_val_self_test(struct iop_adma_device *device)
1074{
1075        int i, src_idx;
1076        struct page *dest;
1077        struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
1078        struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1079        dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1080        dma_addr_t dma_addr, dest_dma;
1081        struct dma_async_tx_descriptor *tx;
1082        struct dma_chan *dma_chan;
1083        dma_cookie_t cookie;
1084        u8 cmp_byte = 0;
1085        u32 cmp_word;
1086        u32 zero_sum_result;
1087        int err = 0;
1088        struct iop_adma_chan *iop_chan;
1089
1090        dev_dbg(device->common.dev, "%s\n", __func__);
1091
1092        for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1093                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1094                if (!xor_srcs[src_idx]) {
1095                        while (src_idx--)
1096                                __free_page(xor_srcs[src_idx]);
1097                        return -ENOMEM;
1098                }
1099        }
1100
1101        dest = alloc_page(GFP_KERNEL);
1102        if (!dest) {
1103                while (src_idx--)
1104                        __free_page(xor_srcs[src_idx]);
1105                return -ENOMEM;
1106        }
1107
1108        /* Fill in src buffers */
1109        for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1110                u8 *ptr = page_address(xor_srcs[src_idx]);
1111                for (i = 0; i < PAGE_SIZE; i++)
1112                        ptr[i] = (1 << src_idx);
1113        }
1114
1115        for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
1116                cmp_byte ^= (u8) (1 << src_idx);
1117
1118        cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1119                        (cmp_byte << 8) | cmp_byte;
1120
1121        memset(page_address(dest), 0, PAGE_SIZE);
1122
1123        dma_chan = container_of(device->common.channels.next,
1124                                struct dma_chan,
1125                                device_node);
1126        if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1127                err = -ENODEV;
1128                goto out;
1129        }
1130
1131        /* test xor */
1132        dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
1133                                PAGE_SIZE, DMA_FROM_DEVICE);
1134        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1135                dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1136                                           0, PAGE_SIZE, DMA_TO_DEVICE);
1137        tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1138                                   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
1139                                   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1140
1141        cookie = iop_adma_tx_submit(tx);
1142        iop_adma_issue_pending(dma_chan);
1143        msleep(8);
1144
1145        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1146                DMA_SUCCESS) {
1147                dev_printk(KERN_ERR, dma_chan->device->dev,
1148                        "Self-test xor timed out, disabling\n");
1149                err = -ENODEV;
1150                goto free_resources;
1151        }
1152
1153        iop_chan = to_iop_adma_chan(dma_chan);
1154        dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1155                PAGE_SIZE, DMA_FROM_DEVICE);
1156        for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1157                u32 *ptr = page_address(dest);
1158                if (ptr[i] != cmp_word) {
1159                        dev_printk(KERN_ERR, dma_chan->device->dev,
1160                                "Self-test xor failed compare, disabling\n");
1161                        err = -ENODEV;
1162                        goto free_resources;
1163                }
1164        }
1165        dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1166                PAGE_SIZE, DMA_TO_DEVICE);
1167
1168        /* skip zero sum if the capability is not present */
1169        if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1170                goto free_resources;
1171
1172        /* zero sum the sources with the destintation page */
1173        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1174                zero_sum_srcs[i] = xor_srcs[i];
1175        zero_sum_srcs[i] = dest;
1176
1177        zero_sum_result = 1;
1178
1179        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1180                dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1181                                           zero_sum_srcs[i], 0, PAGE_SIZE,
1182                                           DMA_TO_DEVICE);
1183        tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1184                                       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1185                                       &zero_sum_result,
1186                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1187
1188        cookie = iop_adma_tx_submit(tx);
1189        iop_adma_issue_pending(dma_chan);
1190        msleep(8);
1191
1192        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1193                dev_printk(KERN_ERR, dma_chan->device->dev,
1194                        "Self-test zero sum timed out, disabling\n");
1195                err = -ENODEV;
1196                goto free_resources;
1197        }
1198
1199        if (zero_sum_result != 0) {
1200                dev_printk(KERN_ERR, dma_chan->device->dev,
1201                        "Self-test zero sum failed compare, disabling\n");
1202                err = -ENODEV;
1203                goto free_resources;
1204        }
1205
1206        /* test memset */
1207        dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1208                        PAGE_SIZE, DMA_FROM_DEVICE);
1209        tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1210                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1211
1212        cookie = iop_adma_tx_submit(tx);
1213        iop_adma_issue_pending(dma_chan);
1214        msleep(8);
1215
1216        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1217                dev_printk(KERN_ERR, dma_chan->device->dev,
1218                        "Self-test memset timed out, disabling\n");
1219                err = -ENODEV;
1220                goto free_resources;
1221        }
1222
1223        for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1224                u32 *ptr = page_address(dest);
1225                if (ptr[i]) {
1226                        dev_printk(KERN_ERR, dma_chan->device->dev,
1227                                "Self-test memset failed compare, disabling\n");
1228                        err = -ENODEV;
1229                        goto free_resources;
1230                }
1231        }
1232
1233        /* test for non-zero parity sum */
1234        zero_sum_result = 0;
1235        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1236                dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1237                                           zero_sum_srcs[i], 0, PAGE_SIZE,
1238                                           DMA_TO_DEVICE);
1239        tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1240                                       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1241                                       &zero_sum_result,
1242                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1243
1244        cookie = iop_adma_tx_submit(tx);
1245        iop_adma_issue_pending(dma_chan);
1246        msleep(8);
1247
1248        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1249                dev_printk(KERN_ERR, dma_chan->device->dev,
1250                        "Self-test non-zero sum timed out, disabling\n");
1251                err = -ENODEV;
1252                goto free_resources;
1253        }
1254
1255        if (zero_sum_result != 1) {
1256                dev_printk(KERN_ERR, dma_chan->device->dev,
1257                        "Self-test non-zero sum failed compare, disabling\n");
1258                err = -ENODEV;
1259                goto free_resources;
1260        }
1261
1262free_resources:
1263        iop_adma_free_chan_resources(dma_chan);
1264out:
1265        src_idx = IOP_ADMA_NUM_SRC_TEST;
1266        while (src_idx--)
1267                __free_page(xor_srcs[src_idx]);
1268        __free_page(dest);
1269        return err;
1270}
1271
1272#ifdef CONFIG_MD_RAID6_PQ
1273static int __devinit
1274iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1275{
1276        /* combined sources, software pq results, and extra hw pq results */
1277        struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1278        /* ptr to the extra hw pq buffers defined above */
1279        struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1280        /* address conversion buffers (dma_map / page_address) */
1281        void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1282        dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
1283        dma_addr_t pq_dest[2];
1284
1285        int i;
1286        struct dma_async_tx_descriptor *tx;
1287        struct dma_chan *dma_chan;
1288        dma_cookie_t cookie;
1289        u32 zero_sum_result;
1290        int err = 0;
1291        struct device *dev;
1292
1293        dev_dbg(device->common.dev, "%s\n", __func__);
1294
1295        for (i = 0; i < ARRAY_SIZE(pq); i++) {
1296                pq[i] = alloc_page(GFP_KERNEL);
1297                if (!pq[i]) {
1298                        while (i--)
1299                                __free_page(pq[i]);
1300                        return -ENOMEM;
1301                }
1302        }
1303
1304        /* Fill in src buffers */
1305        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1306                pq_sw[i] = page_address(pq[i]);
1307                memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1308        }
1309        pq_sw[i] = page_address(pq[i]);
1310        pq_sw[i+1] = page_address(pq[i+1]);
1311
1312        dma_chan = container_of(device->common.channels.next,
1313                                struct dma_chan,
1314                                device_node);
1315        if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1316                err = -ENODEV;
1317                goto out;
1318        }
1319
1320        dev = dma_chan->device->dev;
1321
1322        /* initialize the dests */
1323        memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1324        memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1325
1326        /* test pq */
1327        pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1328        pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1329        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1330                pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1331                                         DMA_TO_DEVICE);
1332
1333        tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1334                                  IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1335                                  PAGE_SIZE,
1336                                  DMA_PREP_INTERRUPT |
1337                                  DMA_CTRL_ACK);
1338
1339        cookie = iop_adma_tx_submit(tx);
1340        iop_adma_issue_pending(dma_chan);
1341        msleep(8);
1342
1343        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1344                DMA_SUCCESS) {
1345                dev_err(dev, "Self-test pq timed out, disabling\n");
1346                err = -ENODEV;
1347                goto free_resources;
1348        }
1349
1350        raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1351
1352        if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1353                   page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1354                dev_err(dev, "Self-test p failed compare, disabling\n");
1355                err = -ENODEV;
1356                goto free_resources;
1357        }
1358        if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1359                   page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1360                dev_err(dev, "Self-test q failed compare, disabling\n");
1361                err = -ENODEV;
1362                goto free_resources;
1363        }
1364
1365        /* test correct zero sum using the software generated pq values */
1366        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1367                pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1368                                         DMA_TO_DEVICE);
1369
1370        zero_sum_result = ~0;
1371        tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1372                                      pq_src, IOP_ADMA_NUM_SRC_TEST,
1373                                      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1374                                      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1375
1376        cookie = iop_adma_tx_submit(tx);
1377        iop_adma_issue_pending(dma_chan);
1378        msleep(8);
1379
1380        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1381                DMA_SUCCESS) {
1382                dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1383                err = -ENODEV;
1384                goto free_resources;
1385        }
1386
1387        if (zero_sum_result != 0) {
1388                dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1389                        zero_sum_result);
1390                err = -ENODEV;
1391                goto free_resources;
1392        }
1393
1394        /* test incorrect zero sum */
1395        i = IOP_ADMA_NUM_SRC_TEST;
1396        memset(pq_sw[i] + 100, 0, 100);
1397        memset(pq_sw[i+1] + 200, 0, 200);
1398        for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1399                pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1400                                         DMA_TO_DEVICE);
1401
1402        zero_sum_result = 0;
1403        tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1404                                      pq_src, IOP_ADMA_NUM_SRC_TEST,
1405                                      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1406                                      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1407
1408        cookie = iop_adma_tx_submit(tx);
1409        iop_adma_issue_pending(dma_chan);
1410        msleep(8);
1411
1412        if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1413                DMA_SUCCESS) {
1414                dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1415                err = -ENODEV;
1416                goto free_resources;
1417        }
1418
1419        if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1420                dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1421                        zero_sum_result);
1422                err = -ENODEV;
1423                goto free_resources;
1424        }
1425
1426free_resources:
1427        iop_adma_free_chan_resources(dma_chan);
1428out:
1429        i = ARRAY_SIZE(pq);
1430        while (i--)
1431                __free_page(pq[i]);
1432        return err;
1433}
1434#endif
1435
1436static int __devexit iop_adma_remove(struct platform_device *dev)
1437{
1438        struct iop_adma_device *device = platform_get_drvdata(dev);
1439        struct dma_chan *chan, *_chan;
1440        struct iop_adma_chan *iop_chan;
1441        struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1442
1443        dma_async_device_unregister(&device->common);
1444
1445        dma_free_coherent(&dev->dev, plat_data->pool_size,
1446                        device->dma_desc_pool_virt, device->dma_desc_pool);
1447
1448        list_for_each_entry_safe(chan, _chan, &device->common.channels,
1449                                device_node) {
1450                iop_chan = to_iop_adma_chan(chan);
1451                list_del(&chan->device_node);
1452                kfree(iop_chan);
1453        }
1454        kfree(device);
1455
1456        return 0;
1457}
1458
1459static int __devinit iop_adma_probe(struct platform_device *pdev)
1460{
1461        struct resource *res;
1462        int ret = 0, i;
1463        struct iop_adma_device *adev;
1464        struct iop_adma_chan *iop_chan;
1465        struct dma_device *dma_dev;
1466        struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1467
1468        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1469        if (!res)
1470                return -ENODEV;
1471
1472        if (!devm_request_mem_region(&pdev->dev, res->start,
1473                                res->end - res->start, pdev->name))
1474                return -EBUSY;
1475
1476        adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1477        if (!adev)
1478                return -ENOMEM;
1479        dma_dev = &adev->common;
1480
1481        /* allocate coherent memory for hardware descriptors
1482         * note: writecombine gives slightly better performance, but
1483         * requires that we explicitly flush the writes
1484         */
1485        if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1486                                        plat_data->pool_size,
1487                                        &adev->dma_desc_pool,
1488                                        GFP_KERNEL)) == NULL) {
1489                ret = -ENOMEM;
1490                goto err_free_adev;
1491        }
1492
1493        dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1494                __func__, adev->dma_desc_pool_virt,
1495                (void *) adev->dma_desc_pool);
1496
1497        adev->id = plat_data->hw_id;
1498
1499        /* discover transaction capabilites from the platform data */
1500        dma_dev->cap_mask = plat_data->cap_mask;
1501
1502        adev->pdev = pdev;
1503        platform_set_drvdata(pdev, adev);
1504
1505        INIT_LIST_HEAD(&dma_dev->channels);
1506
1507        /* set base routines */
1508        dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1509        dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1510        dma_dev->device_is_tx_complete = iop_adma_is_complete;
1511        dma_dev->device_issue_pending = iop_adma_issue_pending;
1512        dma_dev->dev = &pdev->dev;
1513
1514        /* set prep routines based on capability */
1515        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1516                dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1517        if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1518                dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1519        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1520                dma_dev->max_xor = iop_adma_get_max_xor();
1521                dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1522        }
1523        if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1524                dma_dev->device_prep_dma_xor_val =
1525                        iop_adma_prep_dma_xor_val;
1526        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1527                dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1528                dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1529        }
1530        if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1531                dma_dev->device_prep_dma_pq_val =
1532                        iop_adma_prep_dma_pq_val;
1533        if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1534                dma_dev->device_prep_dma_interrupt =
1535                        iop_adma_prep_dma_interrupt;
1536
1537        iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1538        if (!iop_chan) {
1539                ret = -ENOMEM;
1540                goto err_free_dma;
1541        }
1542        iop_chan->device = adev;
1543
1544        iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1545                                        res->end - res->start);
1546        if (!iop_chan->mmr_base) {
1547                ret = -ENOMEM;
1548                goto err_free_iop_chan;
1549        }
1550        tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1551                iop_chan);
1552
1553        /* clear errors before enabling interrupts */
1554        iop_adma_device_clear_err_status(iop_chan);
1555
1556        for (i = 0; i < 3; i++) {
1557                irq_handler_t handler[] = { iop_adma_eot_handler,
1558                                        iop_adma_eoc_handler,
1559                                        iop_adma_err_handler };
1560                int irq = platform_get_irq(pdev, i);
1561                if (irq < 0) {
1562                        ret = -ENXIO;
1563                        goto err_free_iop_chan;
1564                } else {
1565                        ret = devm_request_irq(&pdev->dev, irq,
1566                                        handler[i], 0, pdev->name, iop_chan);
1567                        if (ret)
1568                                goto err_free_iop_chan;
1569                }
1570        }
1571
1572        spin_lock_init(&iop_chan->lock);
1573        INIT_LIST_HEAD(&iop_chan->chain);
1574        INIT_LIST_HEAD(&iop_chan->all_slots);
1575        iop_chan->common.device = dma_dev;
1576        list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1577
1578        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1579                ret = iop_adma_memcpy_self_test(adev);
1580                dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1581                if (ret)
1582                        goto err_free_iop_chan;
1583        }
1584
1585        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1586            dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1587                ret = iop_adma_xor_val_self_test(adev);
1588                dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1589                if (ret)
1590                        goto err_free_iop_chan;
1591        }
1592
1593        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1594            dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1595                #ifdef CONFIG_MD_RAID6_PQ
1596                ret = iop_adma_pq_zero_sum_self_test(adev);
1597                dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1598                #else
1599                /* can not test raid6, so do not publish capability */
1600                dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1601                dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1602                ret = 0;
1603                #endif
1604                if (ret)
1605                        goto err_free_iop_chan;
1606        }
1607
1608        dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1609          "( %s%s%s%s%s%s%s)\n",
1610          dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1611          dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1612          dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1613          dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1614          dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1615          dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1616          dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1617
1618        dma_async_device_register(dma_dev);
1619        goto out;
1620
1621 err_free_iop_chan:
1622        kfree(iop_chan);
1623 err_free_dma:
1624        dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1625                        adev->dma_desc_pool_virt, adev->dma_desc_pool);
1626 err_free_adev:
1627        kfree(adev);
1628 out:
1629        return ret;
1630}
1631
1632static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1633{
1634        struct iop_adma_desc_slot *sw_desc, *grp_start;
1635        dma_cookie_t cookie;
1636        int slot_cnt, slots_per_op;
1637
1638        dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1639
1640        spin_lock_bh(&iop_chan->lock);
1641        slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1642        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1643        if (sw_desc) {
1644                grp_start = sw_desc->group_head;
1645
1646                list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1647                async_tx_ack(&sw_desc->async_tx);
1648                iop_desc_init_memcpy(grp_start, 0);
1649                iop_desc_set_byte_count(grp_start, iop_chan, 0);
1650                iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1651                iop_desc_set_memcpy_src_addr(grp_start, 0);
1652
1653                cookie = iop_chan->common.cookie;
1654                cookie++;
1655                if (cookie <= 1)
1656                        cookie = 2;
1657
1658                /* initialize the completed cookie to be less than
1659                 * the most recently used cookie
1660                 */
1661                iop_chan->completed_cookie = cookie - 1;
1662                iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1663
1664                /* channel should not be busy */
1665                BUG_ON(iop_chan_is_busy(iop_chan));
1666
1667                /* clear any prior error-status bits */
1668                iop_adma_device_clear_err_status(iop_chan);
1669
1670                /* disable operation */
1671                iop_chan_disable(iop_chan);
1672
1673                /* set the descriptor address */
1674                iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1675
1676                /* 1/ don't add pre-chained descriptors
1677                 * 2/ dummy read to flush next_desc write
1678                 */
1679                BUG_ON(iop_desc_get_next_desc(sw_desc));
1680
1681                /* run the descriptor */
1682                iop_chan_enable(iop_chan);
1683        } else
1684                dev_printk(KERN_ERR, iop_chan->device->common.dev,
1685                         "failed to allocate null descriptor\n");
1686        spin_unlock_bh(&iop_chan->lock);
1687}
1688
1689static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1690{
1691        struct iop_adma_desc_slot *sw_desc, *grp_start;
1692        dma_cookie_t cookie;
1693        int slot_cnt, slots_per_op;
1694
1695        dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1696
1697        spin_lock_bh(&iop_chan->lock);
1698        slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1699        sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1700        if (sw_desc) {
1701                grp_start = sw_desc->group_head;
1702                list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1703                async_tx_ack(&sw_desc->async_tx);
1704                iop_desc_init_null_xor(grp_start, 2, 0);
1705                iop_desc_set_byte_count(grp_start, iop_chan, 0);
1706                iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1707                iop_desc_set_xor_src_addr(grp_start, 0, 0);
1708                iop_desc_set_xor_src_addr(grp_start, 1, 0);
1709
1710                cookie = iop_chan->common.cookie;
1711                cookie++;
1712                if (cookie <= 1)
1713                        cookie = 2;
1714
1715                /* initialize the completed cookie to be less than
1716                 * the most recently used cookie
1717                 */
1718                iop_chan->completed_cookie = cookie - 1;
1719                iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1720
1721                /* channel should not be busy */
1722                BUG_ON(iop_chan_is_busy(iop_chan));
1723
1724                /* clear any prior error-status bits */
1725                iop_adma_device_clear_err_status(iop_chan);
1726
1727                /* disable operation */
1728                iop_chan_disable(iop_chan);
1729
1730                /* set the descriptor address */
1731                iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1732
1733                /* 1/ don't add pre-chained descriptors
1734                 * 2/ dummy read to flush next_desc write
1735                 */
1736                BUG_ON(iop_desc_get_next_desc(sw_desc));
1737
1738                /* run the descriptor */
1739                iop_chan_enable(iop_chan);
1740        } else
1741                dev_printk(KERN_ERR, iop_chan->device->common.dev,
1742                        "failed to allocate null descriptor\n");
1743        spin_unlock_bh(&iop_chan->lock);
1744}
1745
1746MODULE_ALIAS("platform:iop-adma");
1747
1748static struct platform_driver iop_adma_driver = {
1749        .probe          = iop_adma_probe,
1750        .remove         = __devexit_p(iop_adma_remove),
1751        .driver         = {
1752                .owner  = THIS_MODULE,
1753                .name   = "iop-adma",
1754        },
1755};
1756
1757static int __init iop_adma_init (void)
1758{
1759        return platform_driver_register(&iop_adma_driver);
1760}
1761
1762static void __exit iop_adma_exit (void)
1763{
1764        platform_driver_unregister(&iop_adma_driver);
1765        return;
1766}
1767module_exit(iop_adma_exit);
1768module_init(iop_adma_init);
1769
1770MODULE_AUTHOR("Intel Corporation");
1771MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1772MODULE_LICENSE("GPL");
1773