linux/drivers/dma/dmaengine.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in the
  15 * file called COPYING.
  16 */
  17
  18/*
  19 * This code implements the DMA subsystem. It provides a HW-neutral interface
  20 * for other kernel code to use asynchronous memory copy capabilities,
  21 * if present, and allows different HW DMA drivers to register as providing
  22 * this capability.
  23 *
  24 * Due to the fact we are accelerating what is already a relatively fast
  25 * operation, the code goes to great lengths to avoid additional overhead,
  26 * such as locking.
  27 *
  28 * LOCKING:
  29 *
  30 * The subsystem keeps a global list of dma_device structs it is protected by a
  31 * mutex, dma_list_mutex.
  32 *
  33 * A subsystem can get access to a channel by calling dmaengine_get() followed
  34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  35 * dma_request_channel().  Once a channel is allocated a reference is taken
  36 * against its corresponding driver to disable removal.
  37 *
  38 * Each device has a channels list, which runs unlocked but is never modified
  39 * once the device is registered, it's just setup by the driver.
  40 *
  41 * See Documentation/dmaengine.txt for more details
  42 */
  43
  44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45
  46#include <linux/dma-mapping.h>
  47#include <linux/init.h>
  48#include <linux/module.h>
  49#include <linux/mm.h>
  50#include <linux/device.h>
  51#include <linux/dmaengine.h>
  52#include <linux/hardirq.h>
  53#include <linux/spinlock.h>
  54#include <linux/percpu.h>
  55#include <linux/rcupdate.h>
  56#include <linux/mutex.h>
  57#include <linux/jiffies.h>
  58#include <linux/rculist.h>
  59#include <linux/idr.h>
  60#include <linux/slab.h>
  61#include <linux/acpi.h>
  62#include <linux/acpi_dma.h>
  63#include <linux/of_dma.h>
  64#include <linux/mempool.h>
  65
  66static DEFINE_MUTEX(dma_list_mutex);
  67static DEFINE_IDR(dma_idr);
  68static LIST_HEAD(dma_device_list);
  69static long dmaengine_ref_count;
  70
  71/* --- sysfs implementation --- */
  72
  73/**
  74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  75 * @dev - device node
  76 *
  77 * Must be called under dma_list_mutex
  78 */
  79static struct dma_chan *dev_to_dma_chan(struct device *dev)
  80{
  81        struct dma_chan_dev *chan_dev;
  82
  83        chan_dev = container_of(dev, typeof(*chan_dev), device);
  84        return chan_dev->chan;
  85}
  86
  87static ssize_t memcpy_count_show(struct device *dev,
  88                                 struct device_attribute *attr, char *buf)
  89{
  90        struct dma_chan *chan;
  91        unsigned long count = 0;
  92        int i;
  93        int err;
  94
  95        mutex_lock(&dma_list_mutex);
  96        chan = dev_to_dma_chan(dev);
  97        if (chan) {
  98                for_each_possible_cpu(i)
  99                        count += per_cpu_ptr(chan->local, i)->memcpy_count;
 100                err = sprintf(buf, "%lu\n", count);
 101        } else
 102                err = -ENODEV;
 103        mutex_unlock(&dma_list_mutex);
 104
 105        return err;
 106}
 107static DEVICE_ATTR_RO(memcpy_count);
 108
 109static ssize_t bytes_transferred_show(struct device *dev,
 110                                      struct device_attribute *attr, char *buf)
 111{
 112        struct dma_chan *chan;
 113        unsigned long count = 0;
 114        int i;
 115        int err;
 116
 117        mutex_lock(&dma_list_mutex);
 118        chan = dev_to_dma_chan(dev);
 119        if (chan) {
 120                for_each_possible_cpu(i)
 121                        count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 122                err = sprintf(buf, "%lu\n", count);
 123        } else
 124                err = -ENODEV;
 125        mutex_unlock(&dma_list_mutex);
 126
 127        return err;
 128}
 129static DEVICE_ATTR_RO(bytes_transferred);
 130
 131static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 132                           char *buf)
 133{
 134        struct dma_chan *chan;
 135        int err;
 136
 137        mutex_lock(&dma_list_mutex);
 138        chan = dev_to_dma_chan(dev);
 139        if (chan)
 140                err = sprintf(buf, "%d\n", chan->client_count);
 141        else
 142                err = -ENODEV;
 143        mutex_unlock(&dma_list_mutex);
 144
 145        return err;
 146}
 147static DEVICE_ATTR_RO(in_use);
 148
 149static struct attribute *dma_dev_attrs[] = {
 150        &dev_attr_memcpy_count.attr,
 151        &dev_attr_bytes_transferred.attr,
 152        &dev_attr_in_use.attr,
 153        NULL,
 154};
 155ATTRIBUTE_GROUPS(dma_dev);
 156
 157static void chan_dev_release(struct device *dev)
 158{
 159        struct dma_chan_dev *chan_dev;
 160
 161        chan_dev = container_of(dev, typeof(*chan_dev), device);
 162        if (atomic_dec_and_test(chan_dev->idr_ref)) {
 163                mutex_lock(&dma_list_mutex);
 164                idr_remove(&dma_idr, chan_dev->dev_id);
 165                mutex_unlock(&dma_list_mutex);
 166                kfree(chan_dev->idr_ref);
 167        }
 168        kfree(chan_dev);
 169}
 170
 171static struct class dma_devclass = {
 172        .name           = "dma",
 173        .dev_groups     = dma_dev_groups,
 174        .dev_release    = chan_dev_release,
 175};
 176
 177/* --- client and device registration --- */
 178
 179#define dma_device_satisfies_mask(device, mask) \
 180        __dma_device_satisfies_mask((device), &(mask))
 181static int
 182__dma_device_satisfies_mask(struct dma_device *device,
 183                            const dma_cap_mask_t *want)
 184{
 185        dma_cap_mask_t has;
 186
 187        bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 188                DMA_TX_TYPE_END);
 189        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 190}
 191
 192static struct module *dma_chan_to_owner(struct dma_chan *chan)
 193{
 194        return chan->device->dev->driver->owner;
 195}
 196
 197/**
 198 * balance_ref_count - catch up the channel reference count
 199 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 200 *
 201 * balance_ref_count must be called under dma_list_mutex
 202 */
 203static void balance_ref_count(struct dma_chan *chan)
 204{
 205        struct module *owner = dma_chan_to_owner(chan);
 206
 207        while (chan->client_count < dmaengine_ref_count) {
 208                __module_get(owner);
 209                chan->client_count++;
 210        }
 211}
 212
 213/**
 214 * dma_chan_get - try to grab a dma channel's parent driver module
 215 * @chan - channel to grab
 216 *
 217 * Must be called under dma_list_mutex
 218 */
 219static int dma_chan_get(struct dma_chan *chan)
 220{
 221        struct module *owner = dma_chan_to_owner(chan);
 222        int ret;
 223
 224        /* The channel is already in use, update client count */
 225        if (chan->client_count) {
 226                __module_get(owner);
 227                goto out;
 228        }
 229
 230        if (!try_module_get(owner))
 231                return -ENODEV;
 232
 233        /* allocate upon first client reference */
 234        if (chan->device->device_alloc_chan_resources) {
 235                ret = chan->device->device_alloc_chan_resources(chan);
 236                if (ret < 0)
 237                        goto err_out;
 238        }
 239
 240        if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 241                balance_ref_count(chan);
 242
 243out:
 244        chan->client_count++;
 245        return 0;
 246
 247err_out:
 248        module_put(owner);
 249        return ret;
 250}
 251
 252/**
 253 * dma_chan_put - drop a reference to a dma channel's parent driver module
 254 * @chan - channel to release
 255 *
 256 * Must be called under dma_list_mutex
 257 */
 258static void dma_chan_put(struct dma_chan *chan)
 259{
 260        /* This channel is not in use, bail out */
 261        if (!chan->client_count)
 262                return;
 263
 264        chan->client_count--;
 265        module_put(dma_chan_to_owner(chan));
 266
 267        /* This channel is not in use anymore, free it */
 268        if (!chan->client_count && chan->device->device_free_chan_resources)
 269                chan->device->device_free_chan_resources(chan);
 270
 271        /* If the channel is used via a DMA request router, free the mapping */
 272        if (chan->router && chan->router->route_free) {
 273                chan->router->route_free(chan->router->dev, chan->route_data);
 274                chan->router = NULL;
 275                chan->route_data = NULL;
 276        }
 277}
 278
 279enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 280{
 281        enum dma_status status;
 282        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 283
 284        dma_async_issue_pending(chan);
 285        do {
 286                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 287                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 288                        pr_err("%s: timeout!\n", __func__);
 289                        return DMA_ERROR;
 290                }
 291                if (status != DMA_IN_PROGRESS)
 292                        break;
 293                cpu_relax();
 294        } while (1);
 295
 296        return status;
 297}
 298EXPORT_SYMBOL(dma_sync_wait);
 299
 300/**
 301 * dma_cap_mask_all - enable iteration over all operation types
 302 */
 303static dma_cap_mask_t dma_cap_mask_all;
 304
 305/**
 306 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 307 * @chan - associated channel for this entry
 308 */
 309struct dma_chan_tbl_ent {
 310        struct dma_chan *chan;
 311};
 312
 313/**
 314 * channel_table - percpu lookup table for memory-to-memory offload providers
 315 */
 316static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 317
 318static int __init dma_channel_table_init(void)
 319{
 320        enum dma_transaction_type cap;
 321        int err = 0;
 322
 323        bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 324
 325        /* 'interrupt', 'private', and 'slave' are channel capabilities,
 326         * but are not associated with an operation so they do not need
 327         * an entry in the channel_table
 328         */
 329        clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 330        clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 331        clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 332
 333        for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 334                channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 335                if (!channel_table[cap]) {
 336                        err = -ENOMEM;
 337                        break;
 338                }
 339        }
 340
 341        if (err) {
 342                pr_err("initialization failure\n");
 343                for_each_dma_cap_mask(cap, dma_cap_mask_all)
 344                        free_percpu(channel_table[cap]);
 345        }
 346
 347        return err;
 348}
 349arch_initcall(dma_channel_table_init);
 350
 351/**
 352 * dma_find_channel - find a channel to carry out the operation
 353 * @tx_type: transaction type
 354 */
 355struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 356{
 357        return this_cpu_read(channel_table[tx_type]->chan);
 358}
 359EXPORT_SYMBOL(dma_find_channel);
 360
 361/**
 362 * dma_issue_pending_all - flush all pending operations across all channels
 363 */
 364void dma_issue_pending_all(void)
 365{
 366        struct dma_device *device;
 367        struct dma_chan *chan;
 368
 369        rcu_read_lock();
 370        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 371                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 372                        continue;
 373                list_for_each_entry(chan, &device->channels, device_node)
 374                        if (chan->client_count)
 375                                device->device_issue_pending(chan);
 376        }
 377        rcu_read_unlock();
 378}
 379EXPORT_SYMBOL(dma_issue_pending_all);
 380
 381/**
 382 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 383 */
 384static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 385{
 386        int node = dev_to_node(chan->device->dev);
 387        return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 388}
 389
 390/**
 391 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 392 * @cap: capability to match
 393 * @cpu: cpu index which the channel should be close to
 394 *
 395 * If some channels are close to the given cpu, the one with the lowest
 396 * reference count is returned. Otherwise, cpu is ignored and only the
 397 * reference count is taken into account.
 398 * Must be called under dma_list_mutex.
 399 */
 400static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 401{
 402        struct dma_device *device;
 403        struct dma_chan *chan;
 404        struct dma_chan *min = NULL;
 405        struct dma_chan *localmin = NULL;
 406
 407        list_for_each_entry(device, &dma_device_list, global_node) {
 408                if (!dma_has_cap(cap, device->cap_mask) ||
 409                    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 410                        continue;
 411                list_for_each_entry(chan, &device->channels, device_node) {
 412                        if (!chan->client_count)
 413                                continue;
 414                        if (!min || chan->table_count < min->table_count)
 415                                min = chan;
 416
 417                        if (dma_chan_is_local(chan, cpu))
 418                                if (!localmin ||
 419                                    chan->table_count < localmin->table_count)
 420                                        localmin = chan;
 421                }
 422        }
 423
 424        chan = localmin ? localmin : min;
 425
 426        if (chan)
 427                chan->table_count++;
 428
 429        return chan;
 430}
 431
 432/**
 433 * dma_channel_rebalance - redistribute the available channels
 434 *
 435 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 436 * operation type) in the SMP case,  and operation isolation (avoid
 437 * multi-tasking channels) in the non-SMP case.  Must be called under
 438 * dma_list_mutex.
 439 */
 440static void dma_channel_rebalance(void)
 441{
 442        struct dma_chan *chan;
 443        struct dma_device *device;
 444        int cpu;
 445        int cap;
 446
 447        /* undo the last distribution */
 448        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 449                for_each_possible_cpu(cpu)
 450                        per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 451
 452        list_for_each_entry(device, &dma_device_list, global_node) {
 453                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 454                        continue;
 455                list_for_each_entry(chan, &device->channels, device_node)
 456                        chan->table_count = 0;
 457        }
 458
 459        /* don't populate the channel_table if no clients are available */
 460        if (!dmaengine_ref_count)
 461                return;
 462
 463        /* redistribute available channels */
 464        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 465                for_each_online_cpu(cpu) {
 466                        chan = min_chan(cap, cpu);
 467                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 468                }
 469}
 470
 471int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 472{
 473        struct dma_device *device;
 474
 475        if (!chan || !caps)
 476                return -EINVAL;
 477
 478        device = chan->device;
 479
 480        /* check if the channel supports slave transactions */
 481        if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
 482                return -ENXIO;
 483
 484        /*
 485         * Check whether it reports it uses the generic slave
 486         * capabilities, if not, that means it doesn't support any
 487         * kind of slave capabilities reporting.
 488         */
 489        if (!device->directions)
 490                return -ENXIO;
 491
 492        caps->src_addr_widths = device->src_addr_widths;
 493        caps->dst_addr_widths = device->dst_addr_widths;
 494        caps->directions = device->directions;
 495        caps->residue_granularity = device->residue_granularity;
 496
 497        /*
 498         * Some devices implement only pause (e.g. to get residuum) but no
 499         * resume. However cmd_pause is advertised as pause AND resume.
 500         */
 501        caps->cmd_pause = !!(device->device_pause && device->device_resume);
 502        caps->cmd_terminate = !!device->device_terminate_all;
 503
 504        return 0;
 505}
 506EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 507
 508static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 509                                          struct dma_device *dev,
 510                                          dma_filter_fn fn, void *fn_param)
 511{
 512        struct dma_chan *chan;
 513
 514        if (!__dma_device_satisfies_mask(dev, mask)) {
 515                pr_debug("%s: wrong capabilities\n", __func__);
 516                return NULL;
 517        }
 518        /* devices with multiple channels need special handling as we need to
 519         * ensure that all channels are either private or public.
 520         */
 521        if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 522                list_for_each_entry(chan, &dev->channels, device_node) {
 523                        /* some channels are already publicly allocated */
 524                        if (chan->client_count)
 525                                return NULL;
 526                }
 527
 528        list_for_each_entry(chan, &dev->channels, device_node) {
 529                if (chan->client_count) {
 530                        pr_debug("%s: %s busy\n",
 531                                 __func__, dma_chan_name(chan));
 532                        continue;
 533                }
 534                if (fn && !fn(chan, fn_param)) {
 535                        pr_debug("%s: %s filter said false\n",
 536                                 __func__, dma_chan_name(chan));
 537                        continue;
 538                }
 539                return chan;
 540        }
 541
 542        return NULL;
 543}
 544
 545/**
 546 * dma_get_slave_channel - try to get specific channel exclusively
 547 * @chan: target channel
 548 */
 549struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 550{
 551        int err = -EBUSY;
 552
 553        /* lock against __dma_request_channel */
 554        mutex_lock(&dma_list_mutex);
 555
 556        if (chan->client_count == 0) {
 557                err = dma_chan_get(chan);
 558                if (err)
 559                        pr_debug("%s: failed to get %s: (%d)\n",
 560                                __func__, dma_chan_name(chan), err);
 561        } else
 562                chan = NULL;
 563
 564        mutex_unlock(&dma_list_mutex);
 565
 566
 567        return chan;
 568}
 569EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 570
 571struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 572{
 573        dma_cap_mask_t mask;
 574        struct dma_chan *chan;
 575        int err;
 576
 577        dma_cap_zero(mask);
 578        dma_cap_set(DMA_SLAVE, mask);
 579
 580        /* lock against __dma_request_channel */
 581        mutex_lock(&dma_list_mutex);
 582
 583        chan = private_candidate(&mask, device, NULL, NULL);
 584        if (chan) {
 585                dma_cap_set(DMA_PRIVATE, device->cap_mask);
 586                device->privatecnt++;
 587                err = dma_chan_get(chan);
 588                if (err) {
 589                        pr_debug("%s: failed to get %s: (%d)\n",
 590                                __func__, dma_chan_name(chan), err);
 591                        chan = NULL;
 592                        if (--device->privatecnt == 0)
 593                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 594                }
 595        }
 596
 597        mutex_unlock(&dma_list_mutex);
 598
 599        return chan;
 600}
 601EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 602
 603/**
 604 * __dma_request_channel - try to allocate an exclusive channel
 605 * @mask: capabilities that the channel must satisfy
 606 * @fn: optional callback to disposition available channels
 607 * @fn_param: opaque parameter to pass to dma_filter_fn
 608 *
 609 * Returns pointer to appropriate DMA channel on success or NULL.
 610 */
 611struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 612                                       dma_filter_fn fn, void *fn_param)
 613{
 614        struct dma_device *device, *_d;
 615        struct dma_chan *chan = NULL;
 616        int err;
 617
 618        /* Find a channel */
 619        mutex_lock(&dma_list_mutex);
 620        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 621                chan = private_candidate(mask, device, fn, fn_param);
 622                if (chan) {
 623                        /* Found a suitable channel, try to grab, prep, and
 624                         * return it.  We first set DMA_PRIVATE to disable
 625                         * balance_ref_count as this channel will not be
 626                         * published in the general-purpose allocator
 627                         */
 628                        dma_cap_set(DMA_PRIVATE, device->cap_mask);
 629                        device->privatecnt++;
 630                        err = dma_chan_get(chan);
 631
 632                        if (err == -ENODEV) {
 633                                pr_debug("%s: %s module removed\n",
 634                                         __func__, dma_chan_name(chan));
 635                                list_del_rcu(&device->global_node);
 636                        } else if (err)
 637                                pr_debug("%s: failed to get %s: (%d)\n",
 638                                         __func__, dma_chan_name(chan), err);
 639                        else
 640                                break;
 641                        if (--device->privatecnt == 0)
 642                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 643                        chan = NULL;
 644                }
 645        }
 646        mutex_unlock(&dma_list_mutex);
 647
 648        pr_debug("%s: %s (%s)\n",
 649                 __func__,
 650                 chan ? "success" : "fail",
 651                 chan ? dma_chan_name(chan) : NULL);
 652
 653        return chan;
 654}
 655EXPORT_SYMBOL_GPL(__dma_request_channel);
 656
 657/**
 658 * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
 659 * @dev:        pointer to client device structure
 660 * @name:       slave channel name
 661 *
 662 * Returns pointer to appropriate DMA channel on success or an error pointer.
 663 */
 664struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
 665                                                  const char *name)
 666{
 667        /* If device-tree is present get slave info from here */
 668        if (dev->of_node)
 669                return of_dma_request_slave_channel(dev->of_node, name);
 670
 671        /* If device was enumerated by ACPI get slave info from here */
 672        if (ACPI_HANDLE(dev))
 673                return acpi_dma_request_slave_chan_by_name(dev, name);
 674
 675        return ERR_PTR(-ENODEV);
 676}
 677EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
 678
 679/**
 680 * dma_request_slave_channel - try to allocate an exclusive slave channel
 681 * @dev:        pointer to client device structure
 682 * @name:       slave channel name
 683 *
 684 * Returns pointer to appropriate DMA channel on success or NULL.
 685 */
 686struct dma_chan *dma_request_slave_channel(struct device *dev,
 687                                           const char *name)
 688{
 689        struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
 690        if (IS_ERR(ch))
 691                return NULL;
 692
 693        dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
 694        ch->device->privatecnt++;
 695
 696        return ch;
 697}
 698EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 699
 700void dma_release_channel(struct dma_chan *chan)
 701{
 702        mutex_lock(&dma_list_mutex);
 703        WARN_ONCE(chan->client_count != 1,
 704                  "chan reference count %d != 1\n", chan->client_count);
 705        dma_chan_put(chan);
 706        /* drop PRIVATE cap enabled by __dma_request_channel() */
 707        if (--chan->device->privatecnt == 0)
 708                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 709        mutex_unlock(&dma_list_mutex);
 710}
 711EXPORT_SYMBOL_GPL(dma_release_channel);
 712
 713/**
 714 * dmaengine_get - register interest in dma_channels
 715 */
 716void dmaengine_get(void)
 717{
 718        struct dma_device *device, *_d;
 719        struct dma_chan *chan;
 720        int err;
 721
 722        mutex_lock(&dma_list_mutex);
 723        dmaengine_ref_count++;
 724
 725        /* try to grab channels */
 726        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 727                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 728                        continue;
 729                list_for_each_entry(chan, &device->channels, device_node) {
 730                        err = dma_chan_get(chan);
 731                        if (err == -ENODEV) {
 732                                /* module removed before we could use it */
 733                                list_del_rcu(&device->global_node);
 734                                break;
 735                        } else if (err)
 736                                pr_debug("%s: failed to get %s: (%d)\n",
 737                                       __func__, dma_chan_name(chan), err);
 738                }
 739        }
 740
 741        /* if this is the first reference and there were channels
 742         * waiting we need to rebalance to get those channels
 743         * incorporated into the channel table
 744         */
 745        if (dmaengine_ref_count == 1)
 746                dma_channel_rebalance();
 747        mutex_unlock(&dma_list_mutex);
 748}
 749EXPORT_SYMBOL(dmaengine_get);
 750
 751/**
 752 * dmaengine_put - let dma drivers be removed when ref_count == 0
 753 */
 754void dmaengine_put(void)
 755{
 756        struct dma_device *device;
 757        struct dma_chan *chan;
 758
 759        mutex_lock(&dma_list_mutex);
 760        dmaengine_ref_count--;
 761        BUG_ON(dmaengine_ref_count < 0);
 762        /* drop channel references */
 763        list_for_each_entry(device, &dma_device_list, global_node) {
 764                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 765                        continue;
 766                list_for_each_entry(chan, &device->channels, device_node)
 767                        dma_chan_put(chan);
 768        }
 769        mutex_unlock(&dma_list_mutex);
 770}
 771EXPORT_SYMBOL(dmaengine_put);
 772
 773static bool device_has_all_tx_types(struct dma_device *device)
 774{
 775        /* A device that satisfies this test has channels that will never cause
 776         * an async_tx channel switch event as all possible operation types can
 777         * be handled.
 778         */
 779        #ifdef CONFIG_ASYNC_TX_DMA
 780        if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 781                return false;
 782        #endif
 783
 784        #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 785        if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 786                return false;
 787        #endif
 788
 789        #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 790        if (!dma_has_cap(DMA_XOR, device->cap_mask))
 791                return false;
 792
 793        #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 794        if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 795                return false;
 796        #endif
 797        #endif
 798
 799        #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 800        if (!dma_has_cap(DMA_PQ, device->cap_mask))
 801                return false;
 802
 803        #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 804        if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 805                return false;
 806        #endif
 807        #endif
 808
 809        return true;
 810}
 811
 812static int get_dma_id(struct dma_device *device)
 813{
 814        int rc;
 815
 816        mutex_lock(&dma_list_mutex);
 817
 818        rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
 819        if (rc >= 0)
 820                device->dev_id = rc;
 821
 822        mutex_unlock(&dma_list_mutex);
 823        return rc < 0 ? rc : 0;
 824}
 825
 826/**
 827 * dma_async_device_register - registers DMA devices found
 828 * @device: &dma_device
 829 */
 830int dma_async_device_register(struct dma_device *device)
 831{
 832        int chancnt = 0, rc;
 833        struct dma_chan* chan;
 834        atomic_t *idr_ref;
 835
 836        if (!device)
 837                return -ENODEV;
 838
 839        /* validate device routines */
 840        BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 841                !device->device_prep_dma_memcpy);
 842        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 843                !device->device_prep_dma_xor);
 844        BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 845                !device->device_prep_dma_xor_val);
 846        BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 847                !device->device_prep_dma_pq);
 848        BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 849                !device->device_prep_dma_pq_val);
 850        BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 851                !device->device_prep_dma_memset);
 852        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 853                !device->device_prep_dma_interrupt);
 854        BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 855                !device->device_prep_dma_sg);
 856        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 857                !device->device_prep_dma_cyclic);
 858        BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 859                !device->device_prep_interleaved_dma);
 860
 861        BUG_ON(!device->device_tx_status);
 862        BUG_ON(!device->device_issue_pending);
 863        BUG_ON(!device->dev);
 864
 865        /* note: this only matters in the
 866         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 867         */
 868        if (device_has_all_tx_types(device))
 869                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 870
 871        idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 872        if (!idr_ref)
 873                return -ENOMEM;
 874        rc = get_dma_id(device);
 875        if (rc != 0) {
 876                kfree(idr_ref);
 877                return rc;
 878        }
 879
 880        atomic_set(idr_ref, 0);
 881
 882        /* represent channels in sysfs. Probably want devs too */
 883        list_for_each_entry(chan, &device->channels, device_node) {
 884                rc = -ENOMEM;
 885                chan->local = alloc_percpu(typeof(*chan->local));
 886                if (chan->local == NULL)
 887                        goto err_out;
 888                chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 889                if (chan->dev == NULL) {
 890                        free_percpu(chan->local);
 891                        chan->local = NULL;
 892                        goto err_out;
 893                }
 894
 895                chan->chan_id = chancnt++;
 896                chan->dev->device.class = &dma_devclass;
 897                chan->dev->device.parent = device->dev;
 898                chan->dev->chan = chan;
 899                chan->dev->idr_ref = idr_ref;
 900                chan->dev->dev_id = device->dev_id;
 901                atomic_inc(idr_ref);
 902                dev_set_name(&chan->dev->device, "dma%dchan%d",
 903                             device->dev_id, chan->chan_id);
 904
 905                rc = device_register(&chan->dev->device);
 906                if (rc) {
 907                        free_percpu(chan->local);
 908                        chan->local = NULL;
 909                        kfree(chan->dev);
 910                        atomic_dec(idr_ref);
 911                        goto err_out;
 912                }
 913                chan->client_count = 0;
 914        }
 915        device->chancnt = chancnt;
 916
 917        mutex_lock(&dma_list_mutex);
 918        /* take references on public channels */
 919        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 920                list_for_each_entry(chan, &device->channels, device_node) {
 921                        /* if clients are already waiting for channels we need
 922                         * to take references on their behalf
 923                         */
 924                        if (dma_chan_get(chan) == -ENODEV) {
 925                                /* note we can only get here for the first
 926                                 * channel as the remaining channels are
 927                                 * guaranteed to get a reference
 928                                 */
 929                                rc = -ENODEV;
 930                                mutex_unlock(&dma_list_mutex);
 931                                goto err_out;
 932                        }
 933                }
 934        list_add_tail_rcu(&device->global_node, &dma_device_list);
 935        if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 936                device->privatecnt++;   /* Always private */
 937        dma_channel_rebalance();
 938        mutex_unlock(&dma_list_mutex);
 939
 940        return 0;
 941
 942err_out:
 943        /* if we never registered a channel just release the idr */
 944        if (atomic_read(idr_ref) == 0) {
 945                mutex_lock(&dma_list_mutex);
 946                idr_remove(&dma_idr, device->dev_id);
 947                mutex_unlock(&dma_list_mutex);
 948                kfree(idr_ref);
 949                return rc;
 950        }
 951
 952        list_for_each_entry(chan, &device->channels, device_node) {
 953                if (chan->local == NULL)
 954                        continue;
 955                mutex_lock(&dma_list_mutex);
 956                chan->dev->chan = NULL;
 957                mutex_unlock(&dma_list_mutex);
 958                device_unregister(&chan->dev->device);
 959                free_percpu(chan->local);
 960        }
 961        return rc;
 962}
 963EXPORT_SYMBOL(dma_async_device_register);
 964
 965/**
 966 * dma_async_device_unregister - unregister a DMA device
 967 * @device: &dma_device
 968 *
 969 * This routine is called by dma driver exit routines, dmaengine holds module
 970 * references to prevent it being called while channels are in use.
 971 */
 972void dma_async_device_unregister(struct dma_device *device)
 973{
 974        struct dma_chan *chan;
 975
 976        mutex_lock(&dma_list_mutex);
 977        list_del_rcu(&device->global_node);
 978        dma_channel_rebalance();
 979        mutex_unlock(&dma_list_mutex);
 980
 981        list_for_each_entry(chan, &device->channels, device_node) {
 982                WARN_ONCE(chan->client_count,
 983                          "%s called while %d clients hold a reference\n",
 984                          __func__, chan->client_count);
 985                mutex_lock(&dma_list_mutex);
 986                chan->dev->chan = NULL;
 987                mutex_unlock(&dma_list_mutex);
 988                device_unregister(&chan->dev->device);
 989                free_percpu(chan->local);
 990        }
 991}
 992EXPORT_SYMBOL(dma_async_device_unregister);
 993
 994struct dmaengine_unmap_pool {
 995        struct kmem_cache *cache;
 996        const char *name;
 997        mempool_t *pool;
 998        size_t size;
 999};
1000
1001#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1002static struct dmaengine_unmap_pool unmap_pool[] = {
1003        __UNMAP_POOL(2),
1004        #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1005        __UNMAP_POOL(16),
1006        __UNMAP_POOL(128),
1007        __UNMAP_POOL(256),
1008        #endif
1009};
1010
1011static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1012{
1013        int order = get_count_order(nr);
1014
1015        switch (order) {
1016        case 0 ... 1:
1017                return &unmap_pool[0];
1018        case 2 ... 4:
1019                return &unmap_pool[1];
1020        case 5 ... 7:
1021                return &unmap_pool[2];
1022        case 8:
1023                return &unmap_pool[3];
1024        default:
1025                BUG();
1026                return NULL;
1027        }
1028}
1029
1030static void dmaengine_unmap(struct kref *kref)
1031{
1032        struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1033        struct device *dev = unmap->dev;
1034        int cnt, i;
1035
1036        cnt = unmap->to_cnt;
1037        for (i = 0; i < cnt; i++)
1038                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1039                               DMA_TO_DEVICE);
1040        cnt += unmap->from_cnt;
1041        for (; i < cnt; i++)
1042                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1043                               DMA_FROM_DEVICE);
1044        cnt += unmap->bidi_cnt;
1045        for (; i < cnt; i++) {
1046                if (unmap->addr[i] == 0)
1047                        continue;
1048                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1049                               DMA_BIDIRECTIONAL);
1050        }
1051        cnt = unmap->map_cnt;
1052        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1053}
1054
1055void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1056{
1057        if (unmap)
1058                kref_put(&unmap->kref, dmaengine_unmap);
1059}
1060EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1061
1062static void dmaengine_destroy_unmap_pool(void)
1063{
1064        int i;
1065
1066        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1067                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1068
1069                if (p->pool)
1070                        mempool_destroy(p->pool);
1071                p->pool = NULL;
1072                if (p->cache)
1073                        kmem_cache_destroy(p->cache);
1074                p->cache = NULL;
1075        }
1076}
1077
1078static int __init dmaengine_init_unmap_pool(void)
1079{
1080        int i;
1081
1082        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1083                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1084                size_t size;
1085
1086                size = sizeof(struct dmaengine_unmap_data) +
1087                       sizeof(dma_addr_t) * p->size;
1088
1089                p->cache = kmem_cache_create(p->name, size, 0,
1090                                             SLAB_HWCACHE_ALIGN, NULL);
1091                if (!p->cache)
1092                        break;
1093                p->pool = mempool_create_slab_pool(1, p->cache);
1094                if (!p->pool)
1095                        break;
1096        }
1097
1098        if (i == ARRAY_SIZE(unmap_pool))
1099                return 0;
1100
1101        dmaengine_destroy_unmap_pool();
1102        return -ENOMEM;
1103}
1104
1105struct dmaengine_unmap_data *
1106dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1107{
1108        struct dmaengine_unmap_data *unmap;
1109
1110        unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1111        if (!unmap)
1112                return NULL;
1113
1114        memset(unmap, 0, sizeof(*unmap));
1115        kref_init(&unmap->kref);
1116        unmap->dev = dev;
1117        unmap->map_cnt = nr;
1118
1119        return unmap;
1120}
1121EXPORT_SYMBOL(dmaengine_get_unmap_data);
1122
1123void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1124        struct dma_chan *chan)
1125{
1126        tx->chan = chan;
1127        #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1128        spin_lock_init(&tx->lock);
1129        #endif
1130}
1131EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1132
1133/* dma_wait_for_async_tx - spin wait for a transaction to complete
1134 * @tx: in-flight transaction to wait on
1135 */
1136enum dma_status
1137dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1138{
1139        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1140
1141        if (!tx)
1142                return DMA_COMPLETE;
1143
1144        while (tx->cookie == -EBUSY) {
1145                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1146                        pr_err("%s timeout waiting for descriptor submission\n",
1147                               __func__);
1148                        return DMA_ERROR;
1149                }
1150                cpu_relax();
1151        }
1152        return dma_sync_wait(tx->chan, tx->cookie);
1153}
1154EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1155
1156/* dma_run_dependencies - helper routine for dma drivers to process
1157 *      (start) dependent operations on their target channel
1158 * @tx: transaction with dependencies
1159 */
1160void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1161{
1162        struct dma_async_tx_descriptor *dep = txd_next(tx);
1163        struct dma_async_tx_descriptor *dep_next;
1164        struct dma_chan *chan;
1165
1166        if (!dep)
1167                return;
1168
1169        /* we'll submit tx->next now, so clear the link */
1170        txd_clear_next(tx);
1171        chan = dep->chan;
1172
1173        /* keep submitting up until a channel switch is detected
1174         * in that case we will be called again as a result of
1175         * processing the interrupt from async_tx_channel_switch
1176         */
1177        for (; dep; dep = dep_next) {
1178                txd_lock(dep);
1179                txd_clear_parent(dep);
1180                dep_next = txd_next(dep);
1181                if (dep_next && dep_next->chan == chan)
1182                        txd_clear_next(dep); /* ->next will be submitted */
1183                else
1184                        dep_next = NULL; /* submit current dep and terminate */
1185                txd_unlock(dep);
1186
1187                dep->tx_submit(dep);
1188        }
1189
1190        chan->device->device_issue_pending(chan);
1191}
1192EXPORT_SYMBOL_GPL(dma_run_dependencies);
1193
1194static int __init dma_bus_init(void)
1195{
1196        int err = dmaengine_init_unmap_pool();
1197
1198        if (err)
1199                return err;
1200        return class_register(&dma_devclass);
1201}
1202arch_initcall(dma_bus_init);
1203
1204
1205