linux/drivers/dma/dmaengine.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in the
  15 * file called COPYING.
  16 */
  17
  18/*
  19 * This code implements the DMA subsystem. It provides a HW-neutral interface
  20 * for other kernel code to use asynchronous memory copy capabilities,
  21 * if present, and allows different HW DMA drivers to register as providing
  22 * this capability.
  23 *
  24 * Due to the fact we are accelerating what is already a relatively fast
  25 * operation, the code goes to great lengths to avoid additional overhead,
  26 * such as locking.
  27 *
  28 * LOCKING:
  29 *
  30 * The subsystem keeps a global list of dma_device structs it is protected by a
  31 * mutex, dma_list_mutex.
  32 *
  33 * A subsystem can get access to a channel by calling dmaengine_get() followed
  34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  35 * dma_request_channel().  Once a channel is allocated a reference is taken
  36 * against its corresponding driver to disable removal.
  37 *
  38 * Each device has a channels list, which runs unlocked but is never modified
  39 * once the device is registered, it's just setup by the driver.
  40 *
  41 * See Documentation/dmaengine.txt for more details
  42 */
  43
  44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45
  46#include <linux/platform_device.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/init.h>
  49#include <linux/module.h>
  50#include <linux/mm.h>
  51#include <linux/device.h>
  52#include <linux/dmaengine.h>
  53#include <linux/hardirq.h>
  54#include <linux/spinlock.h>
  55#include <linux/percpu.h>
  56#include <linux/rcupdate.h>
  57#include <linux/mutex.h>
  58#include <linux/jiffies.h>
  59#include <linux/rculist.h>
  60#include <linux/idr.h>
  61#include <linux/slab.h>
  62#include <linux/acpi.h>
  63#include <linux/acpi_dma.h>
  64#include <linux/of_dma.h>
  65#include <linux/mempool.h>
  66
  67static DEFINE_MUTEX(dma_list_mutex);
  68static DEFINE_IDR(dma_idr);
  69static LIST_HEAD(dma_device_list);
  70static long dmaengine_ref_count;
  71
  72/* --- sysfs implementation --- */
  73
  74/**
  75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  76 * @dev - device node
  77 *
  78 * Must be called under dma_list_mutex
  79 */
  80static struct dma_chan *dev_to_dma_chan(struct device *dev)
  81{
  82        struct dma_chan_dev *chan_dev;
  83
  84        chan_dev = container_of(dev, typeof(*chan_dev), device);
  85        return chan_dev->chan;
  86}
  87
  88static ssize_t memcpy_count_show(struct device *dev,
  89                                 struct device_attribute *attr, char *buf)
  90{
  91        struct dma_chan *chan;
  92        unsigned long count = 0;
  93        int i;
  94        int err;
  95
  96        mutex_lock(&dma_list_mutex);
  97        chan = dev_to_dma_chan(dev);
  98        if (chan) {
  99                for_each_possible_cpu(i)
 100                        count += per_cpu_ptr(chan->local, i)->memcpy_count;
 101                err = sprintf(buf, "%lu\n", count);
 102        } else
 103                err = -ENODEV;
 104        mutex_unlock(&dma_list_mutex);
 105
 106        return err;
 107}
 108static DEVICE_ATTR_RO(memcpy_count);
 109
 110static ssize_t bytes_transferred_show(struct device *dev,
 111                                      struct device_attribute *attr, char *buf)
 112{
 113        struct dma_chan *chan;
 114        unsigned long count = 0;
 115        int i;
 116        int err;
 117
 118        mutex_lock(&dma_list_mutex);
 119        chan = dev_to_dma_chan(dev);
 120        if (chan) {
 121                for_each_possible_cpu(i)
 122                        count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 123                err = sprintf(buf, "%lu\n", count);
 124        } else
 125                err = -ENODEV;
 126        mutex_unlock(&dma_list_mutex);
 127
 128        return err;
 129}
 130static DEVICE_ATTR_RO(bytes_transferred);
 131
 132static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 133                           char *buf)
 134{
 135        struct dma_chan *chan;
 136        int err;
 137
 138        mutex_lock(&dma_list_mutex);
 139        chan = dev_to_dma_chan(dev);
 140        if (chan)
 141                err = sprintf(buf, "%d\n", chan->client_count);
 142        else
 143                err = -ENODEV;
 144        mutex_unlock(&dma_list_mutex);
 145
 146        return err;
 147}
 148static DEVICE_ATTR_RO(in_use);
 149
 150static struct attribute *dma_dev_attrs[] = {
 151        &dev_attr_memcpy_count.attr,
 152        &dev_attr_bytes_transferred.attr,
 153        &dev_attr_in_use.attr,
 154        NULL,
 155};
 156ATTRIBUTE_GROUPS(dma_dev);
 157
 158static void chan_dev_release(struct device *dev)
 159{
 160        struct dma_chan_dev *chan_dev;
 161
 162        chan_dev = container_of(dev, typeof(*chan_dev), device);
 163        if (atomic_dec_and_test(chan_dev->idr_ref)) {
 164                mutex_lock(&dma_list_mutex);
 165                idr_remove(&dma_idr, chan_dev->dev_id);
 166                mutex_unlock(&dma_list_mutex);
 167                kfree(chan_dev->idr_ref);
 168        }
 169        kfree(chan_dev);
 170}
 171
 172static struct class dma_devclass = {
 173        .name           = "dma",
 174        .dev_groups     = dma_dev_groups,
 175        .dev_release    = chan_dev_release,
 176};
 177
 178/* --- client and device registration --- */
 179
 180#define dma_device_satisfies_mask(device, mask) \
 181        __dma_device_satisfies_mask((device), &(mask))
 182static int
 183__dma_device_satisfies_mask(struct dma_device *device,
 184                            const dma_cap_mask_t *want)
 185{
 186        dma_cap_mask_t has;
 187
 188        bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 189                DMA_TX_TYPE_END);
 190        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 191}
 192
 193static struct module *dma_chan_to_owner(struct dma_chan *chan)
 194{
 195        return chan->device->dev->driver->owner;
 196}
 197
 198/**
 199 * balance_ref_count - catch up the channel reference count
 200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 201 *
 202 * balance_ref_count must be called under dma_list_mutex
 203 */
 204static void balance_ref_count(struct dma_chan *chan)
 205{
 206        struct module *owner = dma_chan_to_owner(chan);
 207
 208        while (chan->client_count < dmaengine_ref_count) {
 209                __module_get(owner);
 210                chan->client_count++;
 211        }
 212}
 213
 214/**
 215 * dma_chan_get - try to grab a dma channel's parent driver module
 216 * @chan - channel to grab
 217 *
 218 * Must be called under dma_list_mutex
 219 */
 220static int dma_chan_get(struct dma_chan *chan)
 221{
 222        struct module *owner = dma_chan_to_owner(chan);
 223        int ret;
 224
 225        /* The channel is already in use, update client count */
 226        if (chan->client_count) {
 227                __module_get(owner);
 228                goto out;
 229        }
 230
 231        if (!try_module_get(owner))
 232                return -ENODEV;
 233
 234        /* allocate upon first client reference */
 235        if (chan->device->device_alloc_chan_resources) {
 236                ret = chan->device->device_alloc_chan_resources(chan);
 237                if (ret < 0)
 238                        goto err_out;
 239        }
 240
 241        if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 242                balance_ref_count(chan);
 243
 244out:
 245        chan->client_count++;
 246        return 0;
 247
 248err_out:
 249        module_put(owner);
 250        return ret;
 251}
 252
 253/**
 254 * dma_chan_put - drop a reference to a dma channel's parent driver module
 255 * @chan - channel to release
 256 *
 257 * Must be called under dma_list_mutex
 258 */
 259static void dma_chan_put(struct dma_chan *chan)
 260{
 261        /* This channel is not in use, bail out */
 262        if (!chan->client_count)
 263                return;
 264
 265        chan->client_count--;
 266        module_put(dma_chan_to_owner(chan));
 267
 268        /* This channel is not in use anymore, free it */
 269        if (!chan->client_count && chan->device->device_free_chan_resources) {
 270                /* Make sure all operations have completed */
 271                dmaengine_synchronize(chan);
 272                chan->device->device_free_chan_resources(chan);
 273        }
 274
 275        /* If the channel is used via a DMA request router, free the mapping */
 276        if (chan->router && chan->router->route_free) {
 277                chan->router->route_free(chan->router->dev, chan->route_data);
 278                chan->router = NULL;
 279                chan->route_data = NULL;
 280        }
 281}
 282
 283enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 284{
 285        enum dma_status status;
 286        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 287
 288        dma_async_issue_pending(chan);
 289        do {
 290                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 291                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 292                        dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 293                        return DMA_ERROR;
 294                }
 295                if (status != DMA_IN_PROGRESS)
 296                        break;
 297                cpu_relax();
 298        } while (1);
 299
 300        return status;
 301}
 302EXPORT_SYMBOL(dma_sync_wait);
 303
 304/**
 305 * dma_cap_mask_all - enable iteration over all operation types
 306 */
 307static dma_cap_mask_t dma_cap_mask_all;
 308
 309/**
 310 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 311 * @chan - associated channel for this entry
 312 */
 313struct dma_chan_tbl_ent {
 314        struct dma_chan *chan;
 315};
 316
 317/**
 318 * channel_table - percpu lookup table for memory-to-memory offload providers
 319 */
 320static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 321
 322static int __init dma_channel_table_init(void)
 323{
 324        enum dma_transaction_type cap;
 325        int err = 0;
 326
 327        bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 328
 329        /* 'interrupt', 'private', and 'slave' are channel capabilities,
 330         * but are not associated with an operation so they do not need
 331         * an entry in the channel_table
 332         */
 333        clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 334        clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 335        clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 336
 337        for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 338                channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 339                if (!channel_table[cap]) {
 340                        err = -ENOMEM;
 341                        break;
 342                }
 343        }
 344
 345        if (err) {
 346                pr_err("initialization failure\n");
 347                for_each_dma_cap_mask(cap, dma_cap_mask_all)
 348                        free_percpu(channel_table[cap]);
 349        }
 350
 351        return err;
 352}
 353arch_initcall(dma_channel_table_init);
 354
 355/**
 356 * dma_find_channel - find a channel to carry out the operation
 357 * @tx_type: transaction type
 358 */
 359struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 360{
 361        return this_cpu_read(channel_table[tx_type]->chan);
 362}
 363EXPORT_SYMBOL(dma_find_channel);
 364
 365/**
 366 * dma_issue_pending_all - flush all pending operations across all channels
 367 */
 368void dma_issue_pending_all(void)
 369{
 370        struct dma_device *device;
 371        struct dma_chan *chan;
 372
 373        rcu_read_lock();
 374        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 375                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 376                        continue;
 377                list_for_each_entry(chan, &device->channels, device_node)
 378                        if (chan->client_count)
 379                                device->device_issue_pending(chan);
 380        }
 381        rcu_read_unlock();
 382}
 383EXPORT_SYMBOL(dma_issue_pending_all);
 384
 385/**
 386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 387 */
 388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 389{
 390        int node = dev_to_node(chan->device->dev);
 391        return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 392}
 393
 394/**
 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 396 * @cap: capability to match
 397 * @cpu: cpu index which the channel should be close to
 398 *
 399 * If some channels are close to the given cpu, the one with the lowest
 400 * reference count is returned. Otherwise, cpu is ignored and only the
 401 * reference count is taken into account.
 402 * Must be called under dma_list_mutex.
 403 */
 404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 405{
 406        struct dma_device *device;
 407        struct dma_chan *chan;
 408        struct dma_chan *min = NULL;
 409        struct dma_chan *localmin = NULL;
 410
 411        list_for_each_entry(device, &dma_device_list, global_node) {
 412                if (!dma_has_cap(cap, device->cap_mask) ||
 413                    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 414                        continue;
 415                list_for_each_entry(chan, &device->channels, device_node) {
 416                        if (!chan->client_count)
 417                                continue;
 418                        if (!min || chan->table_count < min->table_count)
 419                                min = chan;
 420
 421                        if (dma_chan_is_local(chan, cpu))
 422                                if (!localmin ||
 423                                    chan->table_count < localmin->table_count)
 424                                        localmin = chan;
 425                }
 426        }
 427
 428        chan = localmin ? localmin : min;
 429
 430        if (chan)
 431                chan->table_count++;
 432
 433        return chan;
 434}
 435
 436/**
 437 * dma_channel_rebalance - redistribute the available channels
 438 *
 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 440 * operation type) in the SMP case,  and operation isolation (avoid
 441 * multi-tasking channels) in the non-SMP case.  Must be called under
 442 * dma_list_mutex.
 443 */
 444static void dma_channel_rebalance(void)
 445{
 446        struct dma_chan *chan;
 447        struct dma_device *device;
 448        int cpu;
 449        int cap;
 450
 451        /* undo the last distribution */
 452        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 453                for_each_possible_cpu(cpu)
 454                        per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 455
 456        list_for_each_entry(device, &dma_device_list, global_node) {
 457                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 458                        continue;
 459                list_for_each_entry(chan, &device->channels, device_node)
 460                        chan->table_count = 0;
 461        }
 462
 463        /* don't populate the channel_table if no clients are available */
 464        if (!dmaengine_ref_count)
 465                return;
 466
 467        /* redistribute available channels */
 468        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 469                for_each_online_cpu(cpu) {
 470                        chan = min_chan(cap, cpu);
 471                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 472                }
 473}
 474
 475int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 476{
 477        struct dma_device *device;
 478
 479        if (!chan || !caps)
 480                return -EINVAL;
 481
 482        device = chan->device;
 483
 484        /* check if the channel supports slave transactions */
 485        if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
 486              test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 487                return -ENXIO;
 488
 489        /*
 490         * Check whether it reports it uses the generic slave
 491         * capabilities, if not, that means it doesn't support any
 492         * kind of slave capabilities reporting.
 493         */
 494        if (!device->directions)
 495                return -ENXIO;
 496
 497        caps->src_addr_widths = device->src_addr_widths;
 498        caps->dst_addr_widths = device->dst_addr_widths;
 499        caps->directions = device->directions;
 500        caps->max_burst = device->max_burst;
 501        caps->residue_granularity = device->residue_granularity;
 502        caps->descriptor_reuse = device->descriptor_reuse;
 503
 504        /*
 505         * Some devices implement only pause (e.g. to get residuum) but no
 506         * resume. However cmd_pause is advertised as pause AND resume.
 507         */
 508        caps->cmd_pause = !!(device->device_pause && device->device_resume);
 509        caps->cmd_terminate = !!device->device_terminate_all;
 510
 511        return 0;
 512}
 513EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 514
 515static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 516                                          struct dma_device *dev,
 517                                          dma_filter_fn fn, void *fn_param)
 518{
 519        struct dma_chan *chan;
 520
 521        if (mask && !__dma_device_satisfies_mask(dev, mask)) {
 522                dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 523                return NULL;
 524        }
 525        /* devices with multiple channels need special handling as we need to
 526         * ensure that all channels are either private or public.
 527         */
 528        if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 529                list_for_each_entry(chan, &dev->channels, device_node) {
 530                        /* some channels are already publicly allocated */
 531                        if (chan->client_count)
 532                                return NULL;
 533                }
 534
 535        list_for_each_entry(chan, &dev->channels, device_node) {
 536                if (chan->client_count) {
 537                        dev_dbg(dev->dev, "%s: %s busy\n",
 538                                 __func__, dma_chan_name(chan));
 539                        continue;
 540                }
 541                if (fn && !fn(chan, fn_param)) {
 542                        dev_dbg(dev->dev, "%s: %s filter said false\n",
 543                                 __func__, dma_chan_name(chan));
 544                        continue;
 545                }
 546                return chan;
 547        }
 548
 549        return NULL;
 550}
 551
 552static struct dma_chan *find_candidate(struct dma_device *device,
 553                                       const dma_cap_mask_t *mask,
 554                                       dma_filter_fn fn, void *fn_param)
 555{
 556        struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 557        int err;
 558
 559        if (chan) {
 560                /* Found a suitable channel, try to grab, prep, and return it.
 561                 * We first set DMA_PRIVATE to disable balance_ref_count as this
 562                 * channel will not be published in the general-purpose
 563                 * allocator
 564                 */
 565                dma_cap_set(DMA_PRIVATE, device->cap_mask);
 566                device->privatecnt++;
 567                err = dma_chan_get(chan);
 568
 569                if (err) {
 570                        if (err == -ENODEV) {
 571                                dev_dbg(device->dev, "%s: %s module removed\n",
 572                                        __func__, dma_chan_name(chan));
 573                                list_del_rcu(&device->global_node);
 574                        } else
 575                                dev_dbg(device->dev,
 576                                        "%s: failed to get %s: (%d)\n",
 577                                         __func__, dma_chan_name(chan), err);
 578
 579                        if (--device->privatecnt == 0)
 580                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 581
 582                        chan = ERR_PTR(err);
 583                }
 584        }
 585
 586        return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 587}
 588
 589/**
 590 * dma_get_slave_channel - try to get specific channel exclusively
 591 * @chan: target channel
 592 */
 593struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 594{
 595        int err = -EBUSY;
 596
 597        /* lock against __dma_request_channel */
 598        mutex_lock(&dma_list_mutex);
 599
 600        if (chan->client_count == 0) {
 601                struct dma_device *device = chan->device;
 602
 603                dma_cap_set(DMA_PRIVATE, device->cap_mask);
 604                device->privatecnt++;
 605                err = dma_chan_get(chan);
 606                if (err) {
 607                        dev_dbg(chan->device->dev,
 608                                "%s: failed to get %s: (%d)\n",
 609                                __func__, dma_chan_name(chan), err);
 610                        chan = NULL;
 611                        if (--device->privatecnt == 0)
 612                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 613                }
 614        } else
 615                chan = NULL;
 616
 617        mutex_unlock(&dma_list_mutex);
 618
 619
 620        return chan;
 621}
 622EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 623
 624struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 625{
 626        dma_cap_mask_t mask;
 627        struct dma_chan *chan;
 628
 629        dma_cap_zero(mask);
 630        dma_cap_set(DMA_SLAVE, mask);
 631
 632        /* lock against __dma_request_channel */
 633        mutex_lock(&dma_list_mutex);
 634
 635        chan = find_candidate(device, &mask, NULL, NULL);
 636
 637        mutex_unlock(&dma_list_mutex);
 638
 639        return IS_ERR(chan) ? NULL : chan;
 640}
 641EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 642
 643/**
 644 * __dma_request_channel - try to allocate an exclusive channel
 645 * @mask: capabilities that the channel must satisfy
 646 * @fn: optional callback to disposition available channels
 647 * @fn_param: opaque parameter to pass to dma_filter_fn
 648 *
 649 * Returns pointer to appropriate DMA channel on success or NULL.
 650 */
 651struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 652                                       dma_filter_fn fn, void *fn_param)
 653{
 654        struct dma_device *device, *_d;
 655        struct dma_chan *chan = NULL;
 656
 657        /* Find a channel */
 658        mutex_lock(&dma_list_mutex);
 659        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 660                chan = find_candidate(device, mask, fn, fn_param);
 661                if (!IS_ERR(chan))
 662                        break;
 663
 664                chan = NULL;
 665        }
 666        mutex_unlock(&dma_list_mutex);
 667
 668        pr_debug("%s: %s (%s)\n",
 669                 __func__,
 670                 chan ? "success" : "fail",
 671                 chan ? dma_chan_name(chan) : NULL);
 672
 673        return chan;
 674}
 675EXPORT_SYMBOL_GPL(__dma_request_channel);
 676
 677static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 678                                                    const char *name,
 679                                                    struct device *dev)
 680{
 681        int i;
 682
 683        if (!device->filter.mapcnt)
 684                return NULL;
 685
 686        for (i = 0; i < device->filter.mapcnt; i++) {
 687                const struct dma_slave_map *map = &device->filter.map[i];
 688
 689                if (!strcmp(map->devname, dev_name(dev)) &&
 690                    !strcmp(map->slave, name))
 691                        return map;
 692        }
 693
 694        return NULL;
 695}
 696
 697/**
 698 * dma_request_chan - try to allocate an exclusive slave channel
 699 * @dev:        pointer to client device structure
 700 * @name:       slave channel name
 701 *
 702 * Returns pointer to appropriate DMA channel on success or an error pointer.
 703 */
 704struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 705{
 706        struct dma_device *d, *_d;
 707        struct dma_chan *chan = NULL;
 708
 709        /* If device-tree is present get slave info from here */
 710        if (dev->of_node)
 711                chan = of_dma_request_slave_channel(dev->of_node, name);
 712
 713        /* If device was enumerated by ACPI get slave info from here */
 714        if (has_acpi_companion(dev) && !chan)
 715                chan = acpi_dma_request_slave_chan_by_name(dev, name);
 716
 717        if (chan) {
 718                /* Valid channel found or requester need to be deferred */
 719                if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 720                        return chan;
 721        }
 722
 723        /* Try to find the channel via the DMA filter map(s) */
 724        mutex_lock(&dma_list_mutex);
 725        list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 726                dma_cap_mask_t mask;
 727                const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 728
 729                if (!map)
 730                        continue;
 731
 732                dma_cap_zero(mask);
 733                dma_cap_set(DMA_SLAVE, mask);
 734
 735                chan = find_candidate(d, &mask, d->filter.fn, map->param);
 736                if (!IS_ERR(chan))
 737                        break;
 738        }
 739        mutex_unlock(&dma_list_mutex);
 740
 741        return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 742}
 743EXPORT_SYMBOL_GPL(dma_request_chan);
 744
 745/**
 746 * dma_request_slave_channel - try to allocate an exclusive slave channel
 747 * @dev:        pointer to client device structure
 748 * @name:       slave channel name
 749 *
 750 * Returns pointer to appropriate DMA channel on success or NULL.
 751 */
 752struct dma_chan *dma_request_slave_channel(struct device *dev,
 753                                           const char *name)
 754{
 755        struct dma_chan *ch = dma_request_chan(dev, name);
 756        if (IS_ERR(ch))
 757                return NULL;
 758
 759        return ch;
 760}
 761EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 762
 763/**
 764 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 765 * @mask: capabilities that the channel must satisfy
 766 *
 767 * Returns pointer to appropriate DMA channel on success or an error pointer.
 768 */
 769struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 770{
 771        struct dma_chan *chan;
 772
 773        if (!mask)
 774                return ERR_PTR(-ENODEV);
 775
 776        chan = __dma_request_channel(mask, NULL, NULL);
 777        if (!chan)
 778                chan = ERR_PTR(-ENODEV);
 779
 780        return chan;
 781}
 782EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 783
 784void dma_release_channel(struct dma_chan *chan)
 785{
 786        mutex_lock(&dma_list_mutex);
 787        WARN_ONCE(chan->client_count != 1,
 788                  "chan reference count %d != 1\n", chan->client_count);
 789        dma_chan_put(chan);
 790        /* drop PRIVATE cap enabled by __dma_request_channel() */
 791        if (--chan->device->privatecnt == 0)
 792                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 793        mutex_unlock(&dma_list_mutex);
 794}
 795EXPORT_SYMBOL_GPL(dma_release_channel);
 796
 797/**
 798 * dmaengine_get - register interest in dma_channels
 799 */
 800void dmaengine_get(void)
 801{
 802        struct dma_device *device, *_d;
 803        struct dma_chan *chan;
 804        int err;
 805
 806        mutex_lock(&dma_list_mutex);
 807        dmaengine_ref_count++;
 808
 809        /* try to grab channels */
 810        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 811                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 812                        continue;
 813                list_for_each_entry(chan, &device->channels, device_node) {
 814                        err = dma_chan_get(chan);
 815                        if (err == -ENODEV) {
 816                                /* module removed before we could use it */
 817                                list_del_rcu(&device->global_node);
 818                                break;
 819                        } else if (err)
 820                                dev_dbg(chan->device->dev,
 821                                        "%s: failed to get %s: (%d)\n",
 822                                        __func__, dma_chan_name(chan), err);
 823                }
 824        }
 825
 826        /* if this is the first reference and there were channels
 827         * waiting we need to rebalance to get those channels
 828         * incorporated into the channel table
 829         */
 830        if (dmaengine_ref_count == 1)
 831                dma_channel_rebalance();
 832        mutex_unlock(&dma_list_mutex);
 833}
 834EXPORT_SYMBOL(dmaengine_get);
 835
 836/**
 837 * dmaengine_put - let dma drivers be removed when ref_count == 0
 838 */
 839void dmaengine_put(void)
 840{
 841        struct dma_device *device;
 842        struct dma_chan *chan;
 843
 844        mutex_lock(&dma_list_mutex);
 845        dmaengine_ref_count--;
 846        BUG_ON(dmaengine_ref_count < 0);
 847        /* drop channel references */
 848        list_for_each_entry(device, &dma_device_list, global_node) {
 849                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 850                        continue;
 851                list_for_each_entry(chan, &device->channels, device_node)
 852                        dma_chan_put(chan);
 853        }
 854        mutex_unlock(&dma_list_mutex);
 855}
 856EXPORT_SYMBOL(dmaengine_put);
 857
 858static bool device_has_all_tx_types(struct dma_device *device)
 859{
 860        /* A device that satisfies this test has channels that will never cause
 861         * an async_tx channel switch event as all possible operation types can
 862         * be handled.
 863         */
 864        #ifdef CONFIG_ASYNC_TX_DMA
 865        if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 866                return false;
 867        #endif
 868
 869        #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
 870        if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 871                return false;
 872        #endif
 873
 874        #if IS_ENABLED(CONFIG_ASYNC_XOR)
 875        if (!dma_has_cap(DMA_XOR, device->cap_mask))
 876                return false;
 877
 878        #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 879        if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 880                return false;
 881        #endif
 882        #endif
 883
 884        #if IS_ENABLED(CONFIG_ASYNC_PQ)
 885        if (!dma_has_cap(DMA_PQ, device->cap_mask))
 886                return false;
 887
 888        #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 889        if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 890                return false;
 891        #endif
 892        #endif
 893
 894        return true;
 895}
 896
 897static int get_dma_id(struct dma_device *device)
 898{
 899        int rc;
 900
 901        mutex_lock(&dma_list_mutex);
 902
 903        rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
 904        if (rc >= 0)
 905                device->dev_id = rc;
 906
 907        mutex_unlock(&dma_list_mutex);
 908        return rc < 0 ? rc : 0;
 909}
 910
 911/**
 912 * dma_async_device_register - registers DMA devices found
 913 * @device: &dma_device
 914 */
 915int dma_async_device_register(struct dma_device *device)
 916{
 917        int chancnt = 0, rc;
 918        struct dma_chan* chan;
 919        atomic_t *idr_ref;
 920
 921        if (!device)
 922                return -ENODEV;
 923
 924        /* validate device routines */
 925        BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 926                !device->device_prep_dma_memcpy);
 927        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 928                !device->device_prep_dma_xor);
 929        BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 930                !device->device_prep_dma_xor_val);
 931        BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 932                !device->device_prep_dma_pq);
 933        BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 934                !device->device_prep_dma_pq_val);
 935        BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 936                !device->device_prep_dma_memset);
 937        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 938                !device->device_prep_dma_interrupt);
 939        BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 940                !device->device_prep_dma_sg);
 941        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 942                !device->device_prep_dma_cyclic);
 943        BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 944                !device->device_prep_interleaved_dma);
 945
 946        BUG_ON(!device->device_tx_status);
 947        BUG_ON(!device->device_issue_pending);
 948        BUG_ON(!device->dev);
 949
 950        /* note: this only matters in the
 951         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 952         */
 953        if (device_has_all_tx_types(device))
 954                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 955
 956        idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 957        if (!idr_ref)
 958                return -ENOMEM;
 959        rc = get_dma_id(device);
 960        if (rc != 0) {
 961                kfree(idr_ref);
 962                return rc;
 963        }
 964
 965        atomic_set(idr_ref, 0);
 966
 967        /* represent channels in sysfs. Probably want devs too */
 968        list_for_each_entry(chan, &device->channels, device_node) {
 969                rc = -ENOMEM;
 970                chan->local = alloc_percpu(typeof(*chan->local));
 971                if (chan->local == NULL)
 972                        goto err_out;
 973                chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 974                if (chan->dev == NULL) {
 975                        free_percpu(chan->local);
 976                        chan->local = NULL;
 977                        goto err_out;
 978                }
 979
 980                chan->chan_id = chancnt++;
 981                chan->dev->device.class = &dma_devclass;
 982                chan->dev->device.parent = device->dev;
 983                chan->dev->chan = chan;
 984                chan->dev->idr_ref = idr_ref;
 985                chan->dev->dev_id = device->dev_id;
 986                atomic_inc(idr_ref);
 987                dev_set_name(&chan->dev->device, "dma%dchan%d",
 988                             device->dev_id, chan->chan_id);
 989
 990                rc = device_register(&chan->dev->device);
 991                if (rc) {
 992                        free_percpu(chan->local);
 993                        chan->local = NULL;
 994                        kfree(chan->dev);
 995                        atomic_dec(idr_ref);
 996                        goto err_out;
 997                }
 998                chan->client_count = 0;
 999        }
1000        device->chancnt = chancnt;
1001
1002        mutex_lock(&dma_list_mutex);
1003        /* take references on public channels */
1004        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1005                list_for_each_entry(chan, &device->channels, device_node) {
1006                        /* if clients are already waiting for channels we need
1007                         * to take references on their behalf
1008                         */
1009                        if (dma_chan_get(chan) == -ENODEV) {
1010                                /* note we can only get here for the first
1011                                 * channel as the remaining channels are
1012                                 * guaranteed to get a reference
1013                                 */
1014                                rc = -ENODEV;
1015                                mutex_unlock(&dma_list_mutex);
1016                                goto err_out;
1017                        }
1018                }
1019        list_add_tail_rcu(&device->global_node, &dma_device_list);
1020        if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1021                device->privatecnt++;   /* Always private */
1022        dma_channel_rebalance();
1023        mutex_unlock(&dma_list_mutex);
1024
1025        return 0;
1026
1027err_out:
1028        /* if we never registered a channel just release the idr */
1029        if (atomic_read(idr_ref) == 0) {
1030                mutex_lock(&dma_list_mutex);
1031                idr_remove(&dma_idr, device->dev_id);
1032                mutex_unlock(&dma_list_mutex);
1033                kfree(idr_ref);
1034                return rc;
1035        }
1036
1037        list_for_each_entry(chan, &device->channels, device_node) {
1038                if (chan->local == NULL)
1039                        continue;
1040                mutex_lock(&dma_list_mutex);
1041                chan->dev->chan = NULL;
1042                mutex_unlock(&dma_list_mutex);
1043                device_unregister(&chan->dev->device);
1044                free_percpu(chan->local);
1045        }
1046        return rc;
1047}
1048EXPORT_SYMBOL(dma_async_device_register);
1049
1050/**
1051 * dma_async_device_unregister - unregister a DMA device
1052 * @device: &dma_device
1053 *
1054 * This routine is called by dma driver exit routines, dmaengine holds module
1055 * references to prevent it being called while channels are in use.
1056 */
1057void dma_async_device_unregister(struct dma_device *device)
1058{
1059        struct dma_chan *chan;
1060
1061        mutex_lock(&dma_list_mutex);
1062        list_del_rcu(&device->global_node);
1063        dma_channel_rebalance();
1064        mutex_unlock(&dma_list_mutex);
1065
1066        list_for_each_entry(chan, &device->channels, device_node) {
1067                WARN_ONCE(chan->client_count,
1068                          "%s called while %d clients hold a reference\n",
1069                          __func__, chan->client_count);
1070                mutex_lock(&dma_list_mutex);
1071                chan->dev->chan = NULL;
1072                mutex_unlock(&dma_list_mutex);
1073                device_unregister(&chan->dev->device);
1074                free_percpu(chan->local);
1075        }
1076}
1077EXPORT_SYMBOL(dma_async_device_unregister);
1078
1079struct dmaengine_unmap_pool {
1080        struct kmem_cache *cache;
1081        const char *name;
1082        mempool_t *pool;
1083        size_t size;
1084};
1085
1086#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1087static struct dmaengine_unmap_pool unmap_pool[] = {
1088        __UNMAP_POOL(2),
1089        #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1090        __UNMAP_POOL(16),
1091        __UNMAP_POOL(128),
1092        __UNMAP_POOL(256),
1093        #endif
1094};
1095
1096static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1097{
1098        int order = get_count_order(nr);
1099
1100        switch (order) {
1101        case 0 ... 1:
1102                return &unmap_pool[0];
1103        case 2 ... 4:
1104                return &unmap_pool[1];
1105        case 5 ... 7:
1106                return &unmap_pool[2];
1107        case 8:
1108                return &unmap_pool[3];
1109        default:
1110                BUG();
1111                return NULL;
1112        }
1113}
1114
1115static void dmaengine_unmap(struct kref *kref)
1116{
1117        struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1118        struct device *dev = unmap->dev;
1119        int cnt, i;
1120
1121        cnt = unmap->to_cnt;
1122        for (i = 0; i < cnt; i++)
1123                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1124                               DMA_TO_DEVICE);
1125        cnt += unmap->from_cnt;
1126        for (; i < cnt; i++)
1127                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1128                               DMA_FROM_DEVICE);
1129        cnt += unmap->bidi_cnt;
1130        for (; i < cnt; i++) {
1131                if (unmap->addr[i] == 0)
1132                        continue;
1133                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1134                               DMA_BIDIRECTIONAL);
1135        }
1136        cnt = unmap->map_cnt;
1137        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1138}
1139
1140void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1141{
1142        if (unmap)
1143                kref_put(&unmap->kref, dmaengine_unmap);
1144}
1145EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1146
1147static void dmaengine_destroy_unmap_pool(void)
1148{
1149        int i;
1150
1151        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1152                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1153
1154                mempool_destroy(p->pool);
1155                p->pool = NULL;
1156                kmem_cache_destroy(p->cache);
1157                p->cache = NULL;
1158        }
1159}
1160
1161static int __init dmaengine_init_unmap_pool(void)
1162{
1163        int i;
1164
1165        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1166                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1167                size_t size;
1168
1169                size = sizeof(struct dmaengine_unmap_data) +
1170                       sizeof(dma_addr_t) * p->size;
1171
1172                p->cache = kmem_cache_create(p->name, size, 0,
1173                                             SLAB_HWCACHE_ALIGN, NULL);
1174                if (!p->cache)
1175                        break;
1176                p->pool = mempool_create_slab_pool(1, p->cache);
1177                if (!p->pool)
1178                        break;
1179        }
1180
1181        if (i == ARRAY_SIZE(unmap_pool))
1182                return 0;
1183
1184        dmaengine_destroy_unmap_pool();
1185        return -ENOMEM;
1186}
1187
1188struct dmaengine_unmap_data *
1189dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1190{
1191        struct dmaengine_unmap_data *unmap;
1192
1193        unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1194        if (!unmap)
1195                return NULL;
1196
1197        memset(unmap, 0, sizeof(*unmap));
1198        kref_init(&unmap->kref);
1199        unmap->dev = dev;
1200        unmap->map_cnt = nr;
1201
1202        return unmap;
1203}
1204EXPORT_SYMBOL(dmaengine_get_unmap_data);
1205
1206void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1207        struct dma_chan *chan)
1208{
1209        tx->chan = chan;
1210        #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1211        spin_lock_init(&tx->lock);
1212        #endif
1213}
1214EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1215
1216/* dma_wait_for_async_tx - spin wait for a transaction to complete
1217 * @tx: in-flight transaction to wait on
1218 */
1219enum dma_status
1220dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1221{
1222        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1223
1224        if (!tx)
1225                return DMA_COMPLETE;
1226
1227        while (tx->cookie == -EBUSY) {
1228                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1229                        dev_err(tx->chan->device->dev,
1230                                "%s timeout waiting for descriptor submission\n",
1231                                __func__);
1232                        return DMA_ERROR;
1233                }
1234                cpu_relax();
1235        }
1236        return dma_sync_wait(tx->chan, tx->cookie);
1237}
1238EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1239
1240/* dma_run_dependencies - helper routine for dma drivers to process
1241 *      (start) dependent operations on their target channel
1242 * @tx: transaction with dependencies
1243 */
1244void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1245{
1246        struct dma_async_tx_descriptor *dep = txd_next(tx);
1247        struct dma_async_tx_descriptor *dep_next;
1248        struct dma_chan *chan;
1249
1250        if (!dep)
1251                return;
1252
1253        /* we'll submit tx->next now, so clear the link */
1254        txd_clear_next(tx);
1255        chan = dep->chan;
1256
1257        /* keep submitting up until a channel switch is detected
1258         * in that case we will be called again as a result of
1259         * processing the interrupt from async_tx_channel_switch
1260         */
1261        for (; dep; dep = dep_next) {
1262                txd_lock(dep);
1263                txd_clear_parent(dep);
1264                dep_next = txd_next(dep);
1265                if (dep_next && dep_next->chan == chan)
1266                        txd_clear_next(dep); /* ->next will be submitted */
1267                else
1268                        dep_next = NULL; /* submit current dep and terminate */
1269                txd_unlock(dep);
1270
1271                dep->tx_submit(dep);
1272        }
1273
1274        chan->device->device_issue_pending(chan);
1275}
1276EXPORT_SYMBOL_GPL(dma_run_dependencies);
1277
1278static int __init dma_bus_init(void)
1279{
1280        int err = dmaengine_init_unmap_pool();
1281
1282        if (err)
1283                return err;
1284        return class_register(&dma_devclass);
1285}
1286arch_initcall(dma_bus_init);
1287
1288
1289