linux/drivers/dma/dmaengine.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
  48#include <linux/init.h>
  49#include <linux/module.h>
  50#include <linux/mm.h>
  51#include <linux/device.h>
  52#include <linux/dmaengine.h>
  53#include <linux/hardirq.h>
  54#include <linux/spinlock.h>
  55#include <linux/percpu.h>
  56#include <linux/rcupdate.h>
  57#include <linux/mutex.h>
  58#include <linux/jiffies.h>
  59#include <linux/rculist.h>
  60#include <linux/idr.h>
  61
  62static DEFINE_MUTEX(dma_list_mutex);
  63static LIST_HEAD(dma_device_list);
  64static long dmaengine_ref_count;
  65static struct idr dma_idr;
  66
  67/* --- sysfs implementation --- */
  68
  69/**
  70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  71 * @dev - device node
  72 *
  73 * Must be called under dma_list_mutex
  74 */
  75static struct dma_chan *dev_to_dma_chan(struct device *dev)
  76{
  77        struct dma_chan_dev *chan_dev;
  78
  79        chan_dev = container_of(dev, typeof(*chan_dev), device);
  80        return chan_dev->chan;
  81}
  82
  83static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
  84{
  85        struct dma_chan *chan;
  86        unsigned long count = 0;
  87        int i;
  88        int err;
  89
  90        mutex_lock(&dma_list_mutex);
  91        chan = dev_to_dma_chan(dev);
  92        if (chan) {
  93                for_each_possible_cpu(i)
  94                        count += per_cpu_ptr(chan->local, i)->memcpy_count;
  95                err = sprintf(buf, "%lu\n", count);
  96        } else
  97                err = -ENODEV;
  98        mutex_unlock(&dma_list_mutex);
  99
 100        return err;
 101}
 102
 103static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 104                                      char *buf)
 105{
 106        struct dma_chan *chan;
 107        unsigned long count = 0;
 108        int i;
 109        int err;
 110
 111        mutex_lock(&dma_list_mutex);
 112        chan = dev_to_dma_chan(dev);
 113        if (chan) {
 114                for_each_possible_cpu(i)
 115                        count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 116                err = sprintf(buf, "%lu\n", count);
 117        } else
 118                err = -ENODEV;
 119        mutex_unlock(&dma_list_mutex);
 120
 121        return err;
 122}
 123
 124static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 125{
 126        struct dma_chan *chan;
 127        int err;
 128
 129        mutex_lock(&dma_list_mutex);
 130        chan = dev_to_dma_chan(dev);
 131        if (chan)
 132                err = sprintf(buf, "%d\n", chan->client_count);
 133        else
 134                err = -ENODEV;
 135        mutex_unlock(&dma_list_mutex);
 136
 137        return err;
 138}
 139
 140static struct device_attribute dma_attrs[] = {
 141        __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 142        __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 143        __ATTR(in_use, S_IRUGO, show_in_use, NULL),
 144        __ATTR_NULL
 145};
 146
 147static void chan_dev_release(struct device *dev)
 148{
 149        struct dma_chan_dev *chan_dev;
 150
 151        chan_dev = container_of(dev, typeof(*chan_dev), device);
 152        if (atomic_dec_and_test(chan_dev->idr_ref)) {
 153                mutex_lock(&dma_list_mutex);
 154                idr_remove(&dma_idr, chan_dev->dev_id);
 155                mutex_unlock(&dma_list_mutex);
 156                kfree(chan_dev->idr_ref);
 157        }
 158        kfree(chan_dev);
 159}
 160
 161static struct class dma_devclass = {
 162        .name           = "dma",
 163        .dev_attrs      = dma_attrs,
 164        .dev_release    = chan_dev_release,
 165};
 166
 167/* --- client and device registration --- */
 168
 169#define dma_device_satisfies_mask(device, mask) \
 170        __dma_device_satisfies_mask((device), &(mask))
 171static int
 172__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 173{
 174        dma_cap_mask_t has;
 175
 176        bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 177                DMA_TX_TYPE_END);
 178        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 179}
 180
 181static struct module *dma_chan_to_owner(struct dma_chan *chan)
 182{
 183        return chan->device->dev->driver->owner;
 184}
 185
 186/**
 187 * balance_ref_count - catch up the channel reference count
 188 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 189 *
 190 * balance_ref_count must be called under dma_list_mutex
 191 */
 192static void balance_ref_count(struct dma_chan *chan)
 193{
 194        struct module *owner = dma_chan_to_owner(chan);
 195
 196        while (chan->client_count < dmaengine_ref_count) {
 197                __module_get(owner);
 198                chan->client_count++;
 199        }
 200}
 201
 202/**
 203 * dma_chan_get - try to grab a dma channel's parent driver module
 204 * @chan - channel to grab
 205 *
 206 * Must be called under dma_list_mutex
 207 */
 208static int dma_chan_get(struct dma_chan *chan)
 209{
 210        int err = -ENODEV;
 211        struct module *owner = dma_chan_to_owner(chan);
 212
 213        if (chan->client_count) {
 214                __module_get(owner);
 215                err = 0;
 216        } else if (try_module_get(owner))
 217                err = 0;
 218
 219        if (err == 0)
 220                chan->client_count++;
 221
 222        /* allocate upon first client reference */
 223        if (chan->client_count == 1 && err == 0) {
 224                int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 225
 226                if (desc_cnt < 0) {
 227                        err = desc_cnt;
 228                        chan->client_count = 0;
 229                        module_put(owner);
 230                } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 231                        balance_ref_count(chan);
 232        }
 233
 234        return err;
 235}
 236
 237/**
 238 * dma_chan_put - drop a reference to a dma channel's parent driver module
 239 * @chan - channel to release
 240 *
 241 * Must be called under dma_list_mutex
 242 */
 243static void dma_chan_put(struct dma_chan *chan)
 244{
 245        if (!chan->client_count)
 246                return; /* this channel failed alloc_chan_resources */
 247        chan->client_count--;
 248        module_put(dma_chan_to_owner(chan));
 249        if (chan->client_count == 0)
 250                chan->device->device_free_chan_resources(chan);
 251}
 252
 253enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 254{
 255        enum dma_status status;
 256        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 257
 258        dma_async_issue_pending(chan);
 259        do {
 260                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 261                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 262                        printk(KERN_ERR "dma_sync_wait_timeout!\n");
 263                        return DMA_ERROR;
 264                }
 265        } while (status == DMA_IN_PROGRESS);
 266
 267        return status;
 268}
 269EXPORT_SYMBOL(dma_sync_wait);
 270
 271/**
 272 * dma_cap_mask_all - enable iteration over all operation types
 273 */
 274static dma_cap_mask_t dma_cap_mask_all;
 275
 276/**
 277 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 278 * @chan - associated channel for this entry
 279 */
 280struct dma_chan_tbl_ent {
 281        struct dma_chan *chan;
 282};
 283
 284/**
 285 * channel_table - percpu lookup table for memory-to-memory offload providers
 286 */
 287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
 288
 289static int __init dma_channel_table_init(void)
 290{
 291        enum dma_transaction_type cap;
 292        int err = 0;
 293
 294        bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 295
 296        /* 'interrupt', 'private', and 'slave' are channel capabilities,
 297         * but are not associated with an operation so they do not need
 298         * an entry in the channel_table
 299         */
 300        clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 301        clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 302        clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 303
 304        for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 305                channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 306                if (!channel_table[cap]) {
 307                        err = -ENOMEM;
 308                        break;
 309                }
 310        }
 311
 312        if (err) {
 313                pr_err("dmaengine: initialization failure\n");
 314                for_each_dma_cap_mask(cap, dma_cap_mask_all)
 315                        if (channel_table[cap])
 316                                free_percpu(channel_table[cap]);
 317        }
 318
 319        return err;
 320}
 321arch_initcall(dma_channel_table_init);
 322
 323/**
 324 * dma_find_channel - find a channel to carry out the operation
 325 * @tx_type: transaction type
 326 */
 327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 328{
 329        struct dma_chan *chan;
 330        int cpu;
 331
 332        cpu = get_cpu();
 333        chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
 334        put_cpu();
 335
 336        return chan;
 337}
 338EXPORT_SYMBOL(dma_find_channel);
 339
 340/**
 341 * dma_issue_pending_all - flush all pending operations across all channels
 342 */
 343void dma_issue_pending_all(void)
 344{
 345        struct dma_device *device;
 346        struct dma_chan *chan;
 347
 348        rcu_read_lock();
 349        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 350                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 351                        continue;
 352                list_for_each_entry(chan, &device->channels, device_node)
 353                        if (chan->client_count)
 354                                device->device_issue_pending(chan);
 355        }
 356        rcu_read_unlock();
 357}
 358EXPORT_SYMBOL(dma_issue_pending_all);
 359
 360/**
 361 * nth_chan - returns the nth channel of the given capability
 362 * @cap: capability to match
 363 * @n: nth channel desired
 364 *
 365 * Defaults to returning the channel with the desired capability and the
 366 * lowest reference count when 'n' cannot be satisfied.  Must be called
 367 * under dma_list_mutex.
 368 */
 369static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 370{
 371        struct dma_device *device;
 372        struct dma_chan *chan;
 373        struct dma_chan *ret = NULL;
 374        struct dma_chan *min = NULL;
 375
 376        list_for_each_entry(device, &dma_device_list, global_node) {
 377                if (!dma_has_cap(cap, device->cap_mask) ||
 378                    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 379                        continue;
 380                list_for_each_entry(chan, &device->channels, device_node) {
 381                        if (!chan->client_count)
 382                                continue;
 383                        if (!min)
 384                                min = chan;
 385                        else if (chan->table_count < min->table_count)
 386                                min = chan;
 387
 388                        if (n-- == 0) {
 389                                ret = chan;
 390                                break; /* done */
 391                        }
 392                }
 393                if (ret)
 394                        break; /* done */
 395        }
 396
 397        if (!ret)
 398                ret = min;
 399
 400        if (ret)
 401                ret->table_count++;
 402
 403        return ret;
 404}
 405
 406/**
 407 * dma_channel_rebalance - redistribute the available channels
 408 *
 409 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 410 * operation type) in the SMP case,  and operation isolation (avoid
 411 * multi-tasking channels) in the non-SMP case.  Must be called under
 412 * dma_list_mutex.
 413 */
 414static void dma_channel_rebalance(void)
 415{
 416        struct dma_chan *chan;
 417        struct dma_device *device;
 418        int cpu;
 419        int cap;
 420        int n;
 421
 422        /* undo the last distribution */
 423        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 424                for_each_possible_cpu(cpu)
 425                        per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 426
 427        list_for_each_entry(device, &dma_device_list, global_node) {
 428                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 429                        continue;
 430                list_for_each_entry(chan, &device->channels, device_node)
 431                        chan->table_count = 0;
 432        }
 433
 434        /* don't populate the channel_table if no clients are available */
 435        if (!dmaengine_ref_count)
 436                return;
 437
 438        /* redistribute available channels */
 439        n = 0;
 440        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 441                for_each_online_cpu(cpu) {
 442                        if (num_possible_cpus() > 1)
 443                                chan = nth_chan(cap, n++);
 444                        else
 445                                chan = nth_chan(cap, -1);
 446
 447                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 448                }
 449}
 450
 451static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
 452                                          dma_filter_fn fn, void *fn_param)
 453{
 454        struct dma_chan *chan;
 455
 456        if (!__dma_device_satisfies_mask(dev, mask)) {
 457                pr_debug("%s: wrong capabilities\n", __func__);
 458                return NULL;
 459        }
 460        /* devices with multiple channels need special handling as we need to
 461         * ensure that all channels are either private or public.
 462         */
 463        if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 464                list_for_each_entry(chan, &dev->channels, device_node) {
 465                        /* some channels are already publicly allocated */
 466                        if (chan->client_count)
 467                                return NULL;
 468                }
 469
 470        list_for_each_entry(chan, &dev->channels, device_node) {
 471                if (chan->client_count) {
 472                        pr_debug("%s: %s busy\n",
 473                                 __func__, dma_chan_name(chan));
 474                        continue;
 475                }
 476                if (fn && !fn(chan, fn_param)) {
 477                        pr_debug("%s: %s filter said false\n",
 478                                 __func__, dma_chan_name(chan));
 479                        continue;
 480                }
 481                return chan;
 482        }
 483
 484        return NULL;
 485}
 486
 487/**
 488 * dma_request_channel - try to allocate an exclusive channel
 489 * @mask: capabilities that the channel must satisfy
 490 * @fn: optional callback to disposition available channels
 491 * @fn_param: opaque parameter to pass to dma_filter_fn
 492 */
 493struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
 494{
 495        struct dma_device *device, *_d;
 496        struct dma_chan *chan = NULL;
 497        int err;
 498
 499        /* Find a channel */
 500        mutex_lock(&dma_list_mutex);
 501        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 502                chan = private_candidate(mask, device, fn, fn_param);
 503                if (chan) {
 504                        /* Found a suitable channel, try to grab, prep, and
 505                         * return it.  We first set DMA_PRIVATE to disable
 506                         * balance_ref_count as this channel will not be
 507                         * published in the general-purpose allocator
 508                         */
 509                        dma_cap_set(DMA_PRIVATE, device->cap_mask);
 510                        device->privatecnt++;
 511                        err = dma_chan_get(chan);
 512
 513                        if (err == -ENODEV) {
 514                                pr_debug("%s: %s module removed\n", __func__,
 515                                         dma_chan_name(chan));
 516                                list_del_rcu(&device->global_node);
 517                        } else if (err)
 518                                pr_err("dmaengine: failed to get %s: (%d)\n",
 519                                       dma_chan_name(chan), err);
 520                        else
 521                                break;
 522                        if (--device->privatecnt == 0)
 523                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 524                        chan->private = NULL;
 525                        chan = NULL;
 526                }
 527        }
 528        mutex_unlock(&dma_list_mutex);
 529
 530        pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
 531                 chan ? dma_chan_name(chan) : NULL);
 532
 533        return chan;
 534}
 535EXPORT_SYMBOL_GPL(__dma_request_channel);
 536
 537void dma_release_channel(struct dma_chan *chan)
 538{
 539        mutex_lock(&dma_list_mutex);
 540        WARN_ONCE(chan->client_count != 1,
 541                  "chan reference count %d != 1\n", chan->client_count);
 542        dma_chan_put(chan);
 543        /* drop PRIVATE cap enabled by __dma_request_channel() */
 544        if (--chan->device->privatecnt == 0)
 545                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 546        chan->private = NULL;
 547        mutex_unlock(&dma_list_mutex);
 548}
 549EXPORT_SYMBOL_GPL(dma_release_channel);
 550
 551/**
 552 * dmaengine_get - register interest in dma_channels
 553 */
 554void dmaengine_get(void)
 555{
 556        struct dma_device *device, *_d;
 557        struct dma_chan *chan;
 558        int err;
 559
 560        mutex_lock(&dma_list_mutex);
 561        dmaengine_ref_count++;
 562
 563        /* try to grab channels */
 564        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 565                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 566                        continue;
 567                list_for_each_entry(chan, &device->channels, device_node) {
 568                        err = dma_chan_get(chan);
 569                        if (err == -ENODEV) {
 570                                /* module removed before we could use it */
 571                                list_del_rcu(&device->global_node);
 572                                break;
 573                        } else if (err)
 574                                pr_err("dmaengine: failed to get %s: (%d)\n",
 575                                       dma_chan_name(chan), err);
 576                }
 577        }
 578
 579        /* if this is the first reference and there were channels
 580         * waiting we need to rebalance to get those channels
 581         * incorporated into the channel table
 582         */
 583        if (dmaengine_ref_count == 1)
 584                dma_channel_rebalance();
 585        mutex_unlock(&dma_list_mutex);
 586}
 587EXPORT_SYMBOL(dmaengine_get);
 588
 589/**
 590 * dmaengine_put - let dma drivers be removed when ref_count == 0
 591 */
 592void dmaengine_put(void)
 593{
 594        struct dma_device *device;
 595        struct dma_chan *chan;
 596
 597        mutex_lock(&dma_list_mutex);
 598        dmaengine_ref_count--;
 599        BUG_ON(dmaengine_ref_count < 0);
 600        /* drop channel references */
 601        list_for_each_entry(device, &dma_device_list, global_node) {
 602                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 603                        continue;
 604                list_for_each_entry(chan, &device->channels, device_node)
 605                        dma_chan_put(chan);
 606        }
 607        mutex_unlock(&dma_list_mutex);
 608}
 609EXPORT_SYMBOL(dmaengine_put);
 610
 611static bool device_has_all_tx_types(struct dma_device *device)
 612{
 613        /* A device that satisfies this test has channels that will never cause
 614         * an async_tx channel switch event as all possible operation types can
 615         * be handled.
 616         */
 617        #ifdef CONFIG_ASYNC_TX_DMA
 618        if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 619                return false;
 620        #endif
 621
 622        #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 623        if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 624                return false;
 625        #endif
 626
 627        #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
 628        if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
 629                return false;
 630        #endif
 631
 632        #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 633        if (!dma_has_cap(DMA_XOR, device->cap_mask))
 634                return false;
 635
 636        #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 637        if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 638                return false;
 639        #endif
 640        #endif
 641
 642        #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 643        if (!dma_has_cap(DMA_PQ, device->cap_mask))
 644                return false;
 645
 646        #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 647        if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 648                return false;
 649        #endif
 650        #endif
 651
 652        return true;
 653}
 654
 655static int get_dma_id(struct dma_device *device)
 656{
 657        int rc;
 658
 659 idr_retry:
 660        if (!idr_pre_get(&dma_idr, GFP_KERNEL))
 661                return -ENOMEM;
 662        mutex_lock(&dma_list_mutex);
 663        rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
 664        mutex_unlock(&dma_list_mutex);
 665        if (rc == -EAGAIN)
 666                goto idr_retry;
 667        else if (rc != 0)
 668                return rc;
 669
 670        return 0;
 671}
 672
 673/**
 674 * dma_async_device_register - registers DMA devices found
 675 * @device: &dma_device
 676 */
 677int dma_async_device_register(struct dma_device *device)
 678{
 679        int chancnt = 0, rc;
 680        struct dma_chan* chan;
 681        atomic_t *idr_ref;
 682
 683        if (!device)
 684                return -ENODEV;
 685
 686        /* validate device routines */
 687        BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 688                !device->device_prep_dma_memcpy);
 689        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 690                !device->device_prep_dma_xor);
 691        BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 692                !device->device_prep_dma_xor_val);
 693        BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 694                !device->device_prep_dma_pq);
 695        BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 696                !device->device_prep_dma_pq_val);
 697        BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 698                !device->device_prep_dma_memset);
 699        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 700                !device->device_prep_dma_interrupt);
 701        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 702                !device->device_prep_slave_sg);
 703        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 704                !device->device_terminate_all);
 705
 706        BUG_ON(!device->device_alloc_chan_resources);
 707        BUG_ON(!device->device_free_chan_resources);
 708        BUG_ON(!device->device_is_tx_complete);
 709        BUG_ON(!device->device_issue_pending);
 710        BUG_ON(!device->dev);
 711
 712        /* note: this only matters in the
 713         * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
 714         */
 715        if (device_has_all_tx_types(device))
 716                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 717
 718        idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 719        if (!idr_ref)
 720                return -ENOMEM;
 721        rc = get_dma_id(device);
 722        if (rc != 0) {
 723                kfree(idr_ref);
 724                return rc;
 725        }
 726
 727        atomic_set(idr_ref, 0);
 728
 729        /* represent channels in sysfs. Probably want devs too */
 730        list_for_each_entry(chan, &device->channels, device_node) {
 731                rc = -ENOMEM;
 732                chan->local = alloc_percpu(typeof(*chan->local));
 733                if (chan->local == NULL)
 734                        goto err_out;
 735                chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 736                if (chan->dev == NULL) {
 737                        free_percpu(chan->local);
 738                        chan->local = NULL;
 739                        goto err_out;
 740                }
 741
 742                chan->chan_id = chancnt++;
 743                chan->dev->device.class = &dma_devclass;
 744                chan->dev->device.parent = device->dev;
 745                chan->dev->chan = chan;
 746                chan->dev->idr_ref = idr_ref;
 747                chan->dev->dev_id = device->dev_id;
 748                atomic_inc(idr_ref);
 749                dev_set_name(&chan->dev->device, "dma%dchan%d",
 750                             device->dev_id, chan->chan_id);
 751
 752                rc = device_register(&chan->dev->device);
 753                if (rc) {
 754                        free_percpu(chan->local);
 755                        chan->local = NULL;
 756                        kfree(chan->dev);
 757                        atomic_dec(idr_ref);
 758                        goto err_out;
 759                }
 760                chan->client_count = 0;
 761        }
 762        device->chancnt = chancnt;
 763
 764        mutex_lock(&dma_list_mutex);
 765        /* take references on public channels */
 766        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 767                list_for_each_entry(chan, &device->channels, device_node) {
 768                        /* if clients are already waiting for channels we need
 769                         * to take references on their behalf
 770                         */
 771                        if (dma_chan_get(chan) == -ENODEV) {
 772                                /* note we can only get here for the first
 773                                 * channel as the remaining channels are
 774                                 * guaranteed to get a reference
 775                                 */
 776                                rc = -ENODEV;
 777                                mutex_unlock(&dma_list_mutex);
 778                                goto err_out;
 779                        }
 780                }
 781        list_add_tail_rcu(&device->global_node, &dma_device_list);
 782        if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 783                device->privatecnt++;   /* Always private */
 784        dma_channel_rebalance();
 785        mutex_unlock(&dma_list_mutex);
 786
 787        return 0;
 788
 789err_out:
 790        /* if we never registered a channel just release the idr */
 791        if (atomic_read(idr_ref) == 0) {
 792                mutex_lock(&dma_list_mutex);
 793                idr_remove(&dma_idr, device->dev_id);
 794                mutex_unlock(&dma_list_mutex);
 795                kfree(idr_ref);
 796                return rc;
 797        }
 798
 799        list_for_each_entry(chan, &device->channels, device_node) {
 800                if (chan->local == NULL)
 801                        continue;
 802                mutex_lock(&dma_list_mutex);
 803                chan->dev->chan = NULL;
 804                mutex_unlock(&dma_list_mutex);
 805                device_unregister(&chan->dev->device);
 806                free_percpu(chan->local);
 807        }
 808        return rc;
 809}
 810EXPORT_SYMBOL(dma_async_device_register);
 811
 812/**
 813 * dma_async_device_unregister - unregister a DMA device
 814 * @device: &dma_device
 815 *
 816 * This routine is called by dma driver exit routines, dmaengine holds module
 817 * references to prevent it being called while channels are in use.
 818 */
 819void dma_async_device_unregister(struct dma_device *device)
 820{
 821        struct dma_chan *chan;
 822
 823        mutex_lock(&dma_list_mutex);
 824        list_del_rcu(&device->global_node);
 825        dma_channel_rebalance();
 826        mutex_unlock(&dma_list_mutex);
 827
 828        list_for_each_entry(chan, &device->channels, device_node) {
 829                WARN_ONCE(chan->client_count,
 830                          "%s called while %d clients hold a reference\n",
 831                          __func__, chan->client_count);
 832                mutex_lock(&dma_list_mutex);
 833                chan->dev->chan = NULL;
 834                mutex_unlock(&dma_list_mutex);
 835                device_unregister(&chan->dev->device);
 836        }
 837}
 838EXPORT_SYMBOL(dma_async_device_unregister);
 839
 840/**
 841 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 842 * @chan: DMA channel to offload copy to
 843 * @dest: destination address (virtual)
 844 * @src: source address (virtual)
 845 * @len: length
 846 *
 847 * Both @dest and @src must be mappable to a bus address according to the
 848 * DMA mapping API rules for streaming mappings.
 849 * Both @dest and @src must stay memory resident (kernel memory or locked
 850 * user space pages).
 851 */
 852dma_cookie_t
 853dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 854                        void *src, size_t len)
 855{
 856        struct dma_device *dev = chan->device;
 857        struct dma_async_tx_descriptor *tx;
 858        dma_addr_t dma_dest, dma_src;
 859        dma_cookie_t cookie;
 860        int cpu;
 861        unsigned long flags;
 862
 863        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 864        dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
 865        flags = DMA_CTRL_ACK |
 866                DMA_COMPL_SRC_UNMAP_SINGLE |
 867                DMA_COMPL_DEST_UNMAP_SINGLE;
 868        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 869
 870        if (!tx) {
 871                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 872                dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 873                return -ENOMEM;
 874        }
 875
 876        tx->callback = NULL;
 877        cookie = tx->tx_submit(tx);
 878
 879        cpu = get_cpu();
 880        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
 881        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
 882        put_cpu();
 883
 884        return cookie;
 885}
 886EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 887
 888/**
 889 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 890 * @chan: DMA channel to offload copy to
 891 * @page: destination page
 892 * @offset: offset in page to copy to
 893 * @kdata: source address (virtual)
 894 * @len: length
 895 *
 896 * Both @page/@offset and @kdata must be mappable to a bus address according
 897 * to the DMA mapping API rules for streaming mappings.
 898 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 899 * locked user space pages)
 900 */
 901dma_cookie_t
 902dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 903                        unsigned int offset, void *kdata, size_t len)
 904{
 905        struct dma_device *dev = chan->device;
 906        struct dma_async_tx_descriptor *tx;
 907        dma_addr_t dma_dest, dma_src;
 908        dma_cookie_t cookie;
 909        int cpu;
 910        unsigned long flags;
 911
 912        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 913        dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 914        flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
 915        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 916
 917        if (!tx) {
 918                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 919                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 920                return -ENOMEM;
 921        }
 922
 923        tx->callback = NULL;
 924        cookie = tx->tx_submit(tx);
 925
 926        cpu = get_cpu();
 927        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
 928        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
 929        put_cpu();
 930
 931        return cookie;
 932}
 933EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 934
 935/**
 936 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 937 * @chan: DMA channel to offload copy to
 938 * @dest_pg: destination page
 939 * @dest_off: offset in page to copy to
 940 * @src_pg: source page
 941 * @src_off: offset in page to copy from
 942 * @len: length
 943 *
 944 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 945 * address according to the DMA mapping API rules for streaming mappings.
 946 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 947 * (kernel memory or locked user space pages).
 948 */
 949dma_cookie_t
 950dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 951        unsigned int dest_off, struct page *src_pg, unsigned int src_off,
 952        size_t len)
 953{
 954        struct dma_device *dev = chan->device;
 955        struct dma_async_tx_descriptor *tx;
 956        dma_addr_t dma_dest, dma_src;
 957        dma_cookie_t cookie;
 958        int cpu;
 959        unsigned long flags;
 960
 961        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 962        dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 963                                DMA_FROM_DEVICE);
 964        flags = DMA_CTRL_ACK;
 965        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 966
 967        if (!tx) {
 968                dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
 969                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 970                return -ENOMEM;
 971        }
 972
 973        tx->callback = NULL;
 974        cookie = tx->tx_submit(tx);
 975
 976        cpu = get_cpu();
 977        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
 978        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
 979        put_cpu();
 980
 981        return cookie;
 982}
 983EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
 984
 985void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 986        struct dma_chan *chan)
 987{
 988        tx->chan = chan;
 989        spin_lock_init(&tx->lock);
 990}
 991EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 992
 993/* dma_wait_for_async_tx - spin wait for a transaction to complete
 994 * @tx: in-flight transaction to wait on
 995 */
 996enum dma_status
 997dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 998{
 999        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1000
1001        if (!tx)
1002                return DMA_SUCCESS;
1003
1004        while (tx->cookie == -EBUSY) {
1005                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1006                        pr_err("%s timeout waiting for descriptor submission\n",
1007                                __func__);
1008                        return DMA_ERROR;
1009                }
1010                cpu_relax();
1011        }
1012        return dma_sync_wait(tx->chan, tx->cookie);
1013}
1014EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1015
1016/* dma_run_dependencies - helper routine for dma drivers to process
1017 *      (start) dependent operations on their target channel
1018 * @tx: transaction with dependencies
1019 */
1020void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1021{
1022        struct dma_async_tx_descriptor *dep = tx->next;
1023        struct dma_async_tx_descriptor *dep_next;
1024        struct dma_chan *chan;
1025
1026        if (!dep)
1027                return;
1028
1029        /* we'll submit tx->next now, so clear the link */
1030        tx->next = NULL;
1031        chan = dep->chan;
1032
1033        /* keep submitting up until a channel switch is detected
1034         * in that case we will be called again as a result of
1035         * processing the interrupt from async_tx_channel_switch
1036         */
1037        for (; dep; dep = dep_next) {
1038                spin_lock_bh(&dep->lock);
1039                dep->parent = NULL;
1040                dep_next = dep->next;
1041                if (dep_next && dep_next->chan == chan)
1042                        dep->next = NULL; /* ->next will be submitted */
1043                else
1044                        dep_next = NULL; /* submit current dep and terminate */
1045                spin_unlock_bh(&dep->lock);
1046
1047                dep->tx_submit(dep);
1048        }
1049
1050        chan->device->device_issue_pending(chan);
1051}
1052EXPORT_SYMBOL_GPL(dma_run_dependencies);
1053
1054static int __init dma_bus_init(void)
1055{
1056        idr_init(&dma_idr);
1057        mutex_init(&dma_list_mutex);
1058        return class_register(&dma_devclass);
1059}
1060arch_initcall(dma_bus_init);
1061
1062
1063