linux/drivers/dma/dmaengine.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   4 */
   5
   6/*
   7 * This code implements the DMA subsystem. It provides a HW-neutral interface
   8 * for other kernel code to use asynchronous memory copy capabilities,
   9 * if present, and allows different HW DMA drivers to register as providing
  10 * this capability.
  11 *
  12 * Due to the fact we are accelerating what is already a relatively fast
  13 * operation, the code goes to great lengths to avoid additional overhead,
  14 * such as locking.
  15 *
  16 * LOCKING:
  17 *
  18 * The subsystem keeps a global list of dma_device structs it is protected by a
  19 * mutex, dma_list_mutex.
  20 *
  21 * A subsystem can get access to a channel by calling dmaengine_get() followed
  22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  23 * dma_request_channel().  Once a channel is allocated a reference is taken
  24 * against its corresponding driver to disable removal.
  25 *
  26 * Each device has a channels list, which runs unlocked but is never modified
  27 * once the device is registered, it's just setup by the driver.
  28 *
  29 * See Documentation/driver-api/dmaengine for more details
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/platform_device.h>
  35#include <linux/dma-mapping.h>
  36#include <linux/init.h>
  37#include <linux/module.h>
  38#include <linux/mm.h>
  39#include <linux/device.h>
  40#include <linux/dmaengine.h>
  41#include <linux/hardirq.h>
  42#include <linux/spinlock.h>
  43#include <linux/percpu.h>
  44#include <linux/rcupdate.h>
  45#include <linux/mutex.h>
  46#include <linux/jiffies.h>
  47#include <linux/rculist.h>
  48#include <linux/idr.h>
  49#include <linux/slab.h>
  50#include <linux/acpi.h>
  51#include <linux/acpi_dma.h>
  52#include <linux/of_dma.h>
  53#include <linux/mempool.h>
  54#include <linux/numa.h>
  55
  56#include "dmaengine.h"
  57
  58static DEFINE_MUTEX(dma_list_mutex);
  59static DEFINE_IDA(dma_ida);
  60static LIST_HEAD(dma_device_list);
  61static long dmaengine_ref_count;
  62
  63/* --- debugfs implementation --- */
  64#ifdef CONFIG_DEBUG_FS
  65#include <linux/debugfs.h>
  66
  67static struct dentry *rootdir;
  68
  69static void dmaengine_debug_register(struct dma_device *dma_dev)
  70{
  71        dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
  72                                                   rootdir);
  73        if (IS_ERR(dma_dev->dbg_dev_root))
  74                dma_dev->dbg_dev_root = NULL;
  75}
  76
  77static void dmaengine_debug_unregister(struct dma_device *dma_dev)
  78{
  79        debugfs_remove_recursive(dma_dev->dbg_dev_root);
  80        dma_dev->dbg_dev_root = NULL;
  81}
  82
  83static void dmaengine_dbg_summary_show(struct seq_file *s,
  84                                       struct dma_device *dma_dev)
  85{
  86        struct dma_chan *chan;
  87
  88        list_for_each_entry(chan, &dma_dev->channels, device_node) {
  89                if (chan->client_count) {
  90                        seq_printf(s, " %-13s| %s", dma_chan_name(chan),
  91                                   chan->dbg_client_name ?: "in-use");
  92
  93                        if (chan->router)
  94                                seq_printf(s, " (via router: %s)\n",
  95                                        dev_name(chan->router->dev));
  96                        else
  97                                seq_puts(s, "\n");
  98                }
  99        }
 100}
 101
 102static int dmaengine_summary_show(struct seq_file *s, void *data)
 103{
 104        struct dma_device *dma_dev = NULL;
 105
 106        mutex_lock(&dma_list_mutex);
 107        list_for_each_entry(dma_dev, &dma_device_list, global_node) {
 108                seq_printf(s, "dma%d (%s): number of channels: %u\n",
 109                           dma_dev->dev_id, dev_name(dma_dev->dev),
 110                           dma_dev->chancnt);
 111
 112                if (dma_dev->dbg_summary_show)
 113                        dma_dev->dbg_summary_show(s, dma_dev);
 114                else
 115                        dmaengine_dbg_summary_show(s, dma_dev);
 116
 117                if (!list_is_last(&dma_dev->global_node, &dma_device_list))
 118                        seq_puts(s, "\n");
 119        }
 120        mutex_unlock(&dma_list_mutex);
 121
 122        return 0;
 123}
 124DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
 125
 126static void __init dmaengine_debugfs_init(void)
 127{
 128        rootdir = debugfs_create_dir("dmaengine", NULL);
 129
 130        /* /sys/kernel/debug/dmaengine/summary */
 131        debugfs_create_file("summary", 0444, rootdir, NULL,
 132                            &dmaengine_summary_fops);
 133}
 134#else
 135static inline void dmaengine_debugfs_init(void) { }
 136static inline int dmaengine_debug_register(struct dma_device *dma_dev)
 137{
 138        return 0;
 139}
 140
 141static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
 142#endif  /* DEBUG_FS */
 143
 144/* --- sysfs implementation --- */
 145
 146#define DMA_SLAVE_NAME  "slave"
 147
 148/**
 149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
 150 * @dev:        device node
 151 *
 152 * Must be called under dma_list_mutex.
 153 */
 154static struct dma_chan *dev_to_dma_chan(struct device *dev)
 155{
 156        struct dma_chan_dev *chan_dev;
 157
 158        chan_dev = container_of(dev, typeof(*chan_dev), device);
 159        return chan_dev->chan;
 160}
 161
 162static ssize_t memcpy_count_show(struct device *dev,
 163                                 struct device_attribute *attr, char *buf)
 164{
 165        struct dma_chan *chan;
 166        unsigned long count = 0;
 167        int i;
 168        int err;
 169
 170        mutex_lock(&dma_list_mutex);
 171        chan = dev_to_dma_chan(dev);
 172        if (chan) {
 173                for_each_possible_cpu(i)
 174                        count += per_cpu_ptr(chan->local, i)->memcpy_count;
 175                err = sprintf(buf, "%lu\n", count);
 176        } else
 177                err = -ENODEV;
 178        mutex_unlock(&dma_list_mutex);
 179
 180        return err;
 181}
 182static DEVICE_ATTR_RO(memcpy_count);
 183
 184static ssize_t bytes_transferred_show(struct device *dev,
 185                                      struct device_attribute *attr, char *buf)
 186{
 187        struct dma_chan *chan;
 188        unsigned long count = 0;
 189        int i;
 190        int err;
 191
 192        mutex_lock(&dma_list_mutex);
 193        chan = dev_to_dma_chan(dev);
 194        if (chan) {
 195                for_each_possible_cpu(i)
 196                        count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 197                err = sprintf(buf, "%lu\n", count);
 198        } else
 199                err = -ENODEV;
 200        mutex_unlock(&dma_list_mutex);
 201
 202        return err;
 203}
 204static DEVICE_ATTR_RO(bytes_transferred);
 205
 206static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 207                           char *buf)
 208{
 209        struct dma_chan *chan;
 210        int err;
 211
 212        mutex_lock(&dma_list_mutex);
 213        chan = dev_to_dma_chan(dev);
 214        if (chan)
 215                err = sprintf(buf, "%d\n", chan->client_count);
 216        else
 217                err = -ENODEV;
 218        mutex_unlock(&dma_list_mutex);
 219
 220        return err;
 221}
 222static DEVICE_ATTR_RO(in_use);
 223
 224static struct attribute *dma_dev_attrs[] = {
 225        &dev_attr_memcpy_count.attr,
 226        &dev_attr_bytes_transferred.attr,
 227        &dev_attr_in_use.attr,
 228        NULL,
 229};
 230ATTRIBUTE_GROUPS(dma_dev);
 231
 232static void chan_dev_release(struct device *dev)
 233{
 234        struct dma_chan_dev *chan_dev;
 235
 236        chan_dev = container_of(dev, typeof(*chan_dev), device);
 237        kfree(chan_dev);
 238}
 239
 240static struct class dma_devclass = {
 241        .name           = "dma",
 242        .dev_groups     = dma_dev_groups,
 243        .dev_release    = chan_dev_release,
 244};
 245
 246/* --- client and device registration --- */
 247
 248/* enable iteration over all operation types */
 249static dma_cap_mask_t dma_cap_mask_all;
 250
 251/**
 252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
 253 * @chan:       associated channel for this entry
 254 */
 255struct dma_chan_tbl_ent {
 256        struct dma_chan *chan;
 257};
 258
 259/* percpu lookup table for memory-to-memory offload providers */
 260static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 261
 262static int __init dma_channel_table_init(void)
 263{
 264        enum dma_transaction_type cap;
 265        int err = 0;
 266
 267        bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 268
 269        /* 'interrupt', 'private', and 'slave' are channel capabilities,
 270         * but are not associated with an operation so they do not need
 271         * an entry in the channel_table
 272         */
 273        clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 274        clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 275        clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 276
 277        for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 278                channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 279                if (!channel_table[cap]) {
 280                        err = -ENOMEM;
 281                        break;
 282                }
 283        }
 284
 285        if (err) {
 286                pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
 287                for_each_dma_cap_mask(cap, dma_cap_mask_all)
 288                        free_percpu(channel_table[cap]);
 289        }
 290
 291        return err;
 292}
 293arch_initcall(dma_channel_table_init);
 294
 295/**
 296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
 297 * @chan:       DMA channel to test
 298 * @cpu:        CPU index which the channel should be close to
 299 *
 300 * Returns true if the channel is in the same NUMA-node as the CPU.
 301 */
 302static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 303{
 304        int node = dev_to_node(chan->device->dev);
 305        return node == NUMA_NO_NODE ||
 306                cpumask_test_cpu(cpu, cpumask_of_node(node));
 307}
 308
 309/**
 310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
 311 * @cap:        capability to match
 312 * @cpu:        CPU index which the channel should be close to
 313 *
 314 * If some channels are close to the given CPU, the one with the lowest
 315 * reference count is returned. Otherwise, CPU is ignored and only the
 316 * reference count is taken into account.
 317 *
 318 * Must be called under dma_list_mutex.
 319 */
 320static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 321{
 322        struct dma_device *device;
 323        struct dma_chan *chan;
 324        struct dma_chan *min = NULL;
 325        struct dma_chan *localmin = NULL;
 326
 327        list_for_each_entry(device, &dma_device_list, global_node) {
 328                if (!dma_has_cap(cap, device->cap_mask) ||
 329                    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 330                        continue;
 331                list_for_each_entry(chan, &device->channels, device_node) {
 332                        if (!chan->client_count)
 333                                continue;
 334                        if (!min || chan->table_count < min->table_count)
 335                                min = chan;
 336
 337                        if (dma_chan_is_local(chan, cpu))
 338                                if (!localmin ||
 339                                    chan->table_count < localmin->table_count)
 340                                        localmin = chan;
 341                }
 342        }
 343
 344        chan = localmin ? localmin : min;
 345
 346        if (chan)
 347                chan->table_count++;
 348
 349        return chan;
 350}
 351
 352/**
 353 * dma_channel_rebalance - redistribute the available channels
 354 *
 355 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
 356 * operation type) in the SMP case, and operation isolation (avoid
 357 * multi-tasking channels) in the non-SMP case.
 358 *
 359 * Must be called under dma_list_mutex.
 360 */
 361static void dma_channel_rebalance(void)
 362{
 363        struct dma_chan *chan;
 364        struct dma_device *device;
 365        int cpu;
 366        int cap;
 367
 368        /* undo the last distribution */
 369        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 370                for_each_possible_cpu(cpu)
 371                        per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 372
 373        list_for_each_entry(device, &dma_device_list, global_node) {
 374                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 375                        continue;
 376                list_for_each_entry(chan, &device->channels, device_node)
 377                        chan->table_count = 0;
 378        }
 379
 380        /* don't populate the channel_table if no clients are available */
 381        if (!dmaengine_ref_count)
 382                return;
 383
 384        /* redistribute available channels */
 385        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 386                for_each_online_cpu(cpu) {
 387                        chan = min_chan(cap, cpu);
 388                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 389                }
 390}
 391
 392static int dma_device_satisfies_mask(struct dma_device *device,
 393                                     const dma_cap_mask_t *want)
 394{
 395        dma_cap_mask_t has;
 396
 397        bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 398                DMA_TX_TYPE_END);
 399        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 400}
 401
 402static struct module *dma_chan_to_owner(struct dma_chan *chan)
 403{
 404        return chan->device->owner;
 405}
 406
 407/**
 408 * balance_ref_count - catch up the channel reference count
 409 * @chan:       channel to balance ->client_count versus dmaengine_ref_count
 410 *
 411 * Must be called under dma_list_mutex.
 412 */
 413static void balance_ref_count(struct dma_chan *chan)
 414{
 415        struct module *owner = dma_chan_to_owner(chan);
 416
 417        while (chan->client_count < dmaengine_ref_count) {
 418                __module_get(owner);
 419                chan->client_count++;
 420        }
 421}
 422
 423static void dma_device_release(struct kref *ref)
 424{
 425        struct dma_device *device = container_of(ref, struct dma_device, ref);
 426
 427        list_del_rcu(&device->global_node);
 428        dma_channel_rebalance();
 429
 430        if (device->device_release)
 431                device->device_release(device);
 432}
 433
 434static void dma_device_put(struct dma_device *device)
 435{
 436        lockdep_assert_held(&dma_list_mutex);
 437        kref_put(&device->ref, dma_device_release);
 438}
 439
 440/**
 441 * dma_chan_get - try to grab a DMA channel's parent driver module
 442 * @chan:       channel to grab
 443 *
 444 * Must be called under dma_list_mutex.
 445 */
 446static int dma_chan_get(struct dma_chan *chan)
 447{
 448        struct module *owner = dma_chan_to_owner(chan);
 449        int ret;
 450
 451        /* The channel is already in use, update client count */
 452        if (chan->client_count) {
 453                __module_get(owner);
 454                goto out;
 455        }
 456
 457        if (!try_module_get(owner))
 458                return -ENODEV;
 459
 460        ret = kref_get_unless_zero(&chan->device->ref);
 461        if (!ret) {
 462                ret = -ENODEV;
 463                goto module_put_out;
 464        }
 465
 466        /* allocate upon first client reference */
 467        if (chan->device->device_alloc_chan_resources) {
 468                ret = chan->device->device_alloc_chan_resources(chan);
 469                if (ret < 0)
 470                        goto err_out;
 471        }
 472
 473        if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 474                balance_ref_count(chan);
 475
 476out:
 477        chan->client_count++;
 478        return 0;
 479
 480err_out:
 481        dma_device_put(chan->device);
 482module_put_out:
 483        module_put(owner);
 484        return ret;
 485}
 486
 487/**
 488 * dma_chan_put - drop a reference to a DMA channel's parent driver module
 489 * @chan:       channel to release
 490 *
 491 * Must be called under dma_list_mutex.
 492 */
 493static void dma_chan_put(struct dma_chan *chan)
 494{
 495        /* This channel is not in use, bail out */
 496        if (!chan->client_count)
 497                return;
 498
 499        chan->client_count--;
 500
 501        /* This channel is not in use anymore, free it */
 502        if (!chan->client_count && chan->device->device_free_chan_resources) {
 503                /* Make sure all operations have completed */
 504                dmaengine_synchronize(chan);
 505                chan->device->device_free_chan_resources(chan);
 506        }
 507
 508        /* If the channel is used via a DMA request router, free the mapping */
 509        if (chan->router && chan->router->route_free) {
 510                chan->router->route_free(chan->router->dev, chan->route_data);
 511                chan->router = NULL;
 512                chan->route_data = NULL;
 513        }
 514
 515        dma_device_put(chan->device);
 516        module_put(dma_chan_to_owner(chan));
 517}
 518
 519enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 520{
 521        enum dma_status status;
 522        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 523
 524        dma_async_issue_pending(chan);
 525        do {
 526                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 527                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 528                        dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 529                        return DMA_ERROR;
 530                }
 531                if (status != DMA_IN_PROGRESS)
 532                        break;
 533                cpu_relax();
 534        } while (1);
 535
 536        return status;
 537}
 538EXPORT_SYMBOL(dma_sync_wait);
 539
 540/**
 541 * dma_find_channel - find a channel to carry out the operation
 542 * @tx_type:    transaction type
 543 */
 544struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 545{
 546        return this_cpu_read(channel_table[tx_type]->chan);
 547}
 548EXPORT_SYMBOL(dma_find_channel);
 549
 550/**
 551 * dma_issue_pending_all - flush all pending operations across all channels
 552 */
 553void dma_issue_pending_all(void)
 554{
 555        struct dma_device *device;
 556        struct dma_chan *chan;
 557
 558        rcu_read_lock();
 559        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 560                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 561                        continue;
 562                list_for_each_entry(chan, &device->channels, device_node)
 563                        if (chan->client_count)
 564                                device->device_issue_pending(chan);
 565        }
 566        rcu_read_unlock();
 567}
 568EXPORT_SYMBOL(dma_issue_pending_all);
 569
 570int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 571{
 572        struct dma_device *device;
 573
 574        if (!chan || !caps)
 575                return -EINVAL;
 576
 577        device = chan->device;
 578
 579        /* check if the channel supports slave transactions */
 580        if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
 581              test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 582                return -ENXIO;
 583
 584        /*
 585         * Check whether it reports it uses the generic slave
 586         * capabilities, if not, that means it doesn't support any
 587         * kind of slave capabilities reporting.
 588         */
 589        if (!device->directions)
 590                return -ENXIO;
 591
 592        caps->src_addr_widths = device->src_addr_widths;
 593        caps->dst_addr_widths = device->dst_addr_widths;
 594        caps->directions = device->directions;
 595        caps->min_burst = device->min_burst;
 596        caps->max_burst = device->max_burst;
 597        caps->max_sg_burst = device->max_sg_burst;
 598        caps->residue_granularity = device->residue_granularity;
 599        caps->descriptor_reuse = device->descriptor_reuse;
 600        caps->cmd_pause = !!device->device_pause;
 601        caps->cmd_resume = !!device->device_resume;
 602        caps->cmd_terminate = !!device->device_terminate_all;
 603
 604        /*
 605         * DMA engine device might be configured with non-uniformly
 606         * distributed slave capabilities per device channels. In this
 607         * case the corresponding driver may provide the device_caps
 608         * callback to override the generic capabilities with
 609         * channel-specific ones.
 610         */
 611        if (device->device_caps)
 612                device->device_caps(chan, caps);
 613
 614        return 0;
 615}
 616EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 617
 618static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 619                                          struct dma_device *dev,
 620                                          dma_filter_fn fn, void *fn_param)
 621{
 622        struct dma_chan *chan;
 623
 624        if (mask && !dma_device_satisfies_mask(dev, mask)) {
 625                dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 626                return NULL;
 627        }
 628        /* devices with multiple channels need special handling as we need to
 629         * ensure that all channels are either private or public.
 630         */
 631        if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 632                list_for_each_entry(chan, &dev->channels, device_node) {
 633                        /* some channels are already publicly allocated */
 634                        if (chan->client_count)
 635                                return NULL;
 636                }
 637
 638        list_for_each_entry(chan, &dev->channels, device_node) {
 639                if (chan->client_count) {
 640                        dev_dbg(dev->dev, "%s: %s busy\n",
 641                                 __func__, dma_chan_name(chan));
 642                        continue;
 643                }
 644                if (fn && !fn(chan, fn_param)) {
 645                        dev_dbg(dev->dev, "%s: %s filter said false\n",
 646                                 __func__, dma_chan_name(chan));
 647                        continue;
 648                }
 649                return chan;
 650        }
 651
 652        return NULL;
 653}
 654
 655static struct dma_chan *find_candidate(struct dma_device *device,
 656                                       const dma_cap_mask_t *mask,
 657                                       dma_filter_fn fn, void *fn_param)
 658{
 659        struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 660        int err;
 661
 662        if (chan) {
 663                /* Found a suitable channel, try to grab, prep, and return it.
 664                 * We first set DMA_PRIVATE to disable balance_ref_count as this
 665                 * channel will not be published in the general-purpose
 666                 * allocator
 667                 */
 668                dma_cap_set(DMA_PRIVATE, device->cap_mask);
 669                device->privatecnt++;
 670                err = dma_chan_get(chan);
 671
 672                if (err) {
 673                        if (err == -ENODEV) {
 674                                dev_dbg(device->dev, "%s: %s module removed\n",
 675                                        __func__, dma_chan_name(chan));
 676                                list_del_rcu(&device->global_node);
 677                        } else
 678                                dev_dbg(device->dev,
 679                                        "%s: failed to get %s: (%d)\n",
 680                                         __func__, dma_chan_name(chan), err);
 681
 682                        if (--device->privatecnt == 0)
 683                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 684
 685                        chan = ERR_PTR(err);
 686                }
 687        }
 688
 689        return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 690}
 691
 692/**
 693 * dma_get_slave_channel - try to get specific channel exclusively
 694 * @chan:       target channel
 695 */
 696struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 697{
 698        /* lock against __dma_request_channel */
 699        mutex_lock(&dma_list_mutex);
 700
 701        if (chan->client_count == 0) {
 702                struct dma_device *device = chan->device;
 703                int err;
 704
 705                dma_cap_set(DMA_PRIVATE, device->cap_mask);
 706                device->privatecnt++;
 707                err = dma_chan_get(chan);
 708                if (err) {
 709                        dev_dbg(chan->device->dev,
 710                                "%s: failed to get %s: (%d)\n",
 711                                __func__, dma_chan_name(chan), err);
 712                        chan = NULL;
 713                        if (--device->privatecnt == 0)
 714                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 715                }
 716        } else
 717                chan = NULL;
 718
 719        mutex_unlock(&dma_list_mutex);
 720
 721
 722        return chan;
 723}
 724EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 725
 726struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 727{
 728        dma_cap_mask_t mask;
 729        struct dma_chan *chan;
 730
 731        dma_cap_zero(mask);
 732        dma_cap_set(DMA_SLAVE, mask);
 733
 734        /* lock against __dma_request_channel */
 735        mutex_lock(&dma_list_mutex);
 736
 737        chan = find_candidate(device, &mask, NULL, NULL);
 738
 739        mutex_unlock(&dma_list_mutex);
 740
 741        return IS_ERR(chan) ? NULL : chan;
 742}
 743EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 744
 745/**
 746 * __dma_request_channel - try to allocate an exclusive channel
 747 * @mask:       capabilities that the channel must satisfy
 748 * @fn:         optional callback to disposition available channels
 749 * @fn_param:   opaque parameter to pass to dma_filter_fn()
 750 * @np:         device node to look for DMA channels
 751 *
 752 * Returns pointer to appropriate DMA channel on success or NULL.
 753 */
 754struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 755                                       dma_filter_fn fn, void *fn_param,
 756                                       struct device_node *np)
 757{
 758        struct dma_device *device, *_d;
 759        struct dma_chan *chan = NULL;
 760
 761        /* Find a channel */
 762        mutex_lock(&dma_list_mutex);
 763        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 764                /* Finds a DMA controller with matching device node */
 765                if (np && device->dev->of_node && np != device->dev->of_node)
 766                        continue;
 767
 768                chan = find_candidate(device, mask, fn, fn_param);
 769                if (!IS_ERR(chan))
 770                        break;
 771
 772                chan = NULL;
 773        }
 774        mutex_unlock(&dma_list_mutex);
 775
 776        pr_debug("%s: %s (%s)\n",
 777                 __func__,
 778                 chan ? "success" : "fail",
 779                 chan ? dma_chan_name(chan) : NULL);
 780
 781        return chan;
 782}
 783EXPORT_SYMBOL_GPL(__dma_request_channel);
 784
 785static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 786                                                    const char *name,
 787                                                    struct device *dev)
 788{
 789        int i;
 790
 791        if (!device->filter.mapcnt)
 792                return NULL;
 793
 794        for (i = 0; i < device->filter.mapcnt; i++) {
 795                const struct dma_slave_map *map = &device->filter.map[i];
 796
 797                if (!strcmp(map->devname, dev_name(dev)) &&
 798                    !strcmp(map->slave, name))
 799                        return map;
 800        }
 801
 802        return NULL;
 803}
 804
 805/**
 806 * dma_request_chan - try to allocate an exclusive slave channel
 807 * @dev:        pointer to client device structure
 808 * @name:       slave channel name
 809 *
 810 * Returns pointer to appropriate DMA channel on success or an error pointer.
 811 */
 812struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 813{
 814        struct dma_device *d, *_d;
 815        struct dma_chan *chan = NULL;
 816
 817        /* If device-tree is present get slave info from here */
 818        if (dev->of_node)
 819                chan = of_dma_request_slave_channel(dev->of_node, name);
 820
 821        /* If device was enumerated by ACPI get slave info from here */
 822        if (has_acpi_companion(dev) && !chan)
 823                chan = acpi_dma_request_slave_chan_by_name(dev, name);
 824
 825        if (PTR_ERR(chan) == -EPROBE_DEFER)
 826                return chan;
 827
 828        if (!IS_ERR_OR_NULL(chan))
 829                goto found;
 830
 831        /* Try to find the channel via the DMA filter map(s) */
 832        mutex_lock(&dma_list_mutex);
 833        list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 834                dma_cap_mask_t mask;
 835                const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 836
 837                if (!map)
 838                        continue;
 839
 840                dma_cap_zero(mask);
 841                dma_cap_set(DMA_SLAVE, mask);
 842
 843                chan = find_candidate(d, &mask, d->filter.fn, map->param);
 844                if (!IS_ERR(chan))
 845                        break;
 846        }
 847        mutex_unlock(&dma_list_mutex);
 848
 849        if (IS_ERR(chan))
 850                return chan;
 851        if (!chan)
 852                return ERR_PTR(-EPROBE_DEFER);
 853
 854found:
 855#ifdef CONFIG_DEBUG_FS
 856        chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
 857                                          name);
 858#endif
 859
 860        chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
 861        if (!chan->name)
 862                return chan;
 863        chan->slave = dev;
 864
 865        if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
 866                              DMA_SLAVE_NAME))
 867                dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
 868        if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
 869                dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
 870
 871        return chan;
 872}
 873EXPORT_SYMBOL_GPL(dma_request_chan);
 874
 875/**
 876 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 877 * @mask:       capabilities that the channel must satisfy
 878 *
 879 * Returns pointer to appropriate DMA channel on success or an error pointer.
 880 */
 881struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 882{
 883        struct dma_chan *chan;
 884
 885        if (!mask)
 886                return ERR_PTR(-ENODEV);
 887
 888        chan = __dma_request_channel(mask, NULL, NULL, NULL);
 889        if (!chan) {
 890                mutex_lock(&dma_list_mutex);
 891                if (list_empty(&dma_device_list))
 892                        chan = ERR_PTR(-EPROBE_DEFER);
 893                else
 894                        chan = ERR_PTR(-ENODEV);
 895                mutex_unlock(&dma_list_mutex);
 896        }
 897
 898        return chan;
 899}
 900EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 901
 902void dma_release_channel(struct dma_chan *chan)
 903{
 904        mutex_lock(&dma_list_mutex);
 905        WARN_ONCE(chan->client_count != 1,
 906                  "chan reference count %d != 1\n", chan->client_count);
 907        dma_chan_put(chan);
 908        /* drop PRIVATE cap enabled by __dma_request_channel() */
 909        if (--chan->device->privatecnt == 0)
 910                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 911
 912        if (chan->slave) {
 913                sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
 914                sysfs_remove_link(&chan->slave->kobj, chan->name);
 915                kfree(chan->name);
 916                chan->name = NULL;
 917                chan->slave = NULL;
 918        }
 919
 920#ifdef CONFIG_DEBUG_FS
 921        kfree(chan->dbg_client_name);
 922        chan->dbg_client_name = NULL;
 923#endif
 924        mutex_unlock(&dma_list_mutex);
 925}
 926EXPORT_SYMBOL_GPL(dma_release_channel);
 927
 928/**
 929 * dmaengine_get - register interest in dma_channels
 930 */
 931void dmaengine_get(void)
 932{
 933        struct dma_device *device, *_d;
 934        struct dma_chan *chan;
 935        int err;
 936
 937        mutex_lock(&dma_list_mutex);
 938        dmaengine_ref_count++;
 939
 940        /* try to grab channels */
 941        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 942                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 943                        continue;
 944                list_for_each_entry(chan, &device->channels, device_node) {
 945                        err = dma_chan_get(chan);
 946                        if (err == -ENODEV) {
 947                                /* module removed before we could use it */
 948                                list_del_rcu(&device->global_node);
 949                                break;
 950                        } else if (err)
 951                                dev_dbg(chan->device->dev,
 952                                        "%s: failed to get %s: (%d)\n",
 953                                        __func__, dma_chan_name(chan), err);
 954                }
 955        }
 956
 957        /* if this is the first reference and there were channels
 958         * waiting we need to rebalance to get those channels
 959         * incorporated into the channel table
 960         */
 961        if (dmaengine_ref_count == 1)
 962                dma_channel_rebalance();
 963        mutex_unlock(&dma_list_mutex);
 964}
 965EXPORT_SYMBOL(dmaengine_get);
 966
 967/**
 968 * dmaengine_put - let DMA drivers be removed when ref_count == 0
 969 */
 970void dmaengine_put(void)
 971{
 972        struct dma_device *device, *_d;
 973        struct dma_chan *chan;
 974
 975        mutex_lock(&dma_list_mutex);
 976        dmaengine_ref_count--;
 977        BUG_ON(dmaengine_ref_count < 0);
 978        /* drop channel references */
 979        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 980                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 981                        continue;
 982                list_for_each_entry(chan, &device->channels, device_node)
 983                        dma_chan_put(chan);
 984        }
 985        mutex_unlock(&dma_list_mutex);
 986}
 987EXPORT_SYMBOL(dmaengine_put);
 988
 989static bool device_has_all_tx_types(struct dma_device *device)
 990{
 991        /* A device that satisfies this test has channels that will never cause
 992         * an async_tx channel switch event as all possible operation types can
 993         * be handled.
 994         */
 995        #ifdef CONFIG_ASYNC_TX_DMA
 996        if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 997                return false;
 998        #endif
 999
1000        #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1001        if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1002                return false;
1003        #endif
1004
1005        #if IS_ENABLED(CONFIG_ASYNC_XOR)
1006        if (!dma_has_cap(DMA_XOR, device->cap_mask))
1007                return false;
1008
1009        #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1010        if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1011                return false;
1012        #endif
1013        #endif
1014
1015        #if IS_ENABLED(CONFIG_ASYNC_PQ)
1016        if (!dma_has_cap(DMA_PQ, device->cap_mask))
1017                return false;
1018
1019        #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1020        if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1021                return false;
1022        #endif
1023        #endif
1024
1025        return true;
1026}
1027
1028static int get_dma_id(struct dma_device *device)
1029{
1030        int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1031
1032        if (rc < 0)
1033                return rc;
1034        device->dev_id = rc;
1035        return 0;
1036}
1037
1038static int __dma_async_device_channel_register(struct dma_device *device,
1039                                               struct dma_chan *chan)
1040{
1041        int rc;
1042
1043        chan->local = alloc_percpu(typeof(*chan->local));
1044        if (!chan->local)
1045                return -ENOMEM;
1046        chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1047        if (!chan->dev) {
1048                rc = -ENOMEM;
1049                goto err_free_local;
1050        }
1051
1052        /*
1053         * When the chan_id is a negative value, we are dynamically adding
1054         * the channel. Otherwise we are static enumerating.
1055         */
1056        mutex_lock(&device->chan_mutex);
1057        chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1058        mutex_unlock(&device->chan_mutex);
1059        if (chan->chan_id < 0) {
1060                pr_err("%s: unable to alloc ida for chan: %d\n",
1061                       __func__, chan->chan_id);
1062                rc = chan->chan_id;
1063                goto err_free_dev;
1064        }
1065
1066        chan->dev->device.class = &dma_devclass;
1067        chan->dev->device.parent = device->dev;
1068        chan->dev->chan = chan;
1069        chan->dev->dev_id = device->dev_id;
1070        dev_set_name(&chan->dev->device, "dma%dchan%d",
1071                     device->dev_id, chan->chan_id);
1072        rc = device_register(&chan->dev->device);
1073        if (rc)
1074                goto err_out_ida;
1075        chan->client_count = 0;
1076        device->chancnt++;
1077
1078        return 0;
1079
1080 err_out_ida:
1081        mutex_lock(&device->chan_mutex);
1082        ida_free(&device->chan_ida, chan->chan_id);
1083        mutex_unlock(&device->chan_mutex);
1084 err_free_dev:
1085        kfree(chan->dev);
1086 err_free_local:
1087        free_percpu(chan->local);
1088        chan->local = NULL;
1089        return rc;
1090}
1091
1092int dma_async_device_channel_register(struct dma_device *device,
1093                                      struct dma_chan *chan)
1094{
1095        int rc;
1096
1097        rc = __dma_async_device_channel_register(device, chan);
1098        if (rc < 0)
1099                return rc;
1100
1101        dma_channel_rebalance();
1102        return 0;
1103}
1104EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1105
1106static void __dma_async_device_channel_unregister(struct dma_device *device,
1107                                                  struct dma_chan *chan)
1108{
1109        WARN_ONCE(!device->device_release && chan->client_count,
1110                  "%s called while %d clients hold a reference\n",
1111                  __func__, chan->client_count);
1112        mutex_lock(&dma_list_mutex);
1113        device->chancnt--;
1114        chan->dev->chan = NULL;
1115        mutex_unlock(&dma_list_mutex);
1116        mutex_lock(&device->chan_mutex);
1117        ida_free(&device->chan_ida, chan->chan_id);
1118        mutex_unlock(&device->chan_mutex);
1119        device_unregister(&chan->dev->device);
1120        free_percpu(chan->local);
1121}
1122
1123void dma_async_device_channel_unregister(struct dma_device *device,
1124                                         struct dma_chan *chan)
1125{
1126        __dma_async_device_channel_unregister(device, chan);
1127        dma_channel_rebalance();
1128}
1129EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1130
1131/**
1132 * dma_async_device_register - registers DMA devices found
1133 * @device:     pointer to &struct dma_device
1134 *
1135 * After calling this routine the structure should not be freed except in the
1136 * device_release() callback which will be called after
1137 * dma_async_device_unregister() is called and no further references are taken.
1138 */
1139int dma_async_device_register(struct dma_device *device)
1140{
1141        int rc;
1142        struct dma_chan* chan;
1143
1144        if (!device)
1145                return -ENODEV;
1146
1147        /* validate device routines */
1148        if (!device->dev) {
1149                pr_err("DMAdevice must have dev\n");
1150                return -EIO;
1151        }
1152
1153        device->owner = device->dev->driver->owner;
1154
1155        if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
1156                dev_err(device->dev,
1157                        "Device claims capability %s, but op is not defined\n",
1158                        "DMA_MEMCPY");
1159                return -EIO;
1160        }
1161
1162        if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
1163                dev_err(device->dev,
1164                        "Device claims capability %s, but op is not defined\n",
1165                        "DMA_MEMCPY_SG");
1166                return -EIO;
1167        }
1168
1169        if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
1170                dev_err(device->dev,
1171                        "Device claims capability %s, but op is not defined\n",
1172                        "DMA_XOR");
1173                return -EIO;
1174        }
1175
1176        if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
1177                dev_err(device->dev,
1178                        "Device claims capability %s, but op is not defined\n",
1179                        "DMA_XOR_VAL");
1180                return -EIO;
1181        }
1182
1183        if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
1184                dev_err(device->dev,
1185                        "Device claims capability %s, but op is not defined\n",
1186                        "DMA_PQ");
1187                return -EIO;
1188        }
1189
1190        if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
1191                dev_err(device->dev,
1192                        "Device claims capability %s, but op is not defined\n",
1193                        "DMA_PQ_VAL");
1194                return -EIO;
1195        }
1196
1197        if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
1198                dev_err(device->dev,
1199                        "Device claims capability %s, but op is not defined\n",
1200                        "DMA_MEMSET");
1201                return -EIO;
1202        }
1203
1204        if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1205                dev_err(device->dev,
1206                        "Device claims capability %s, but op is not defined\n",
1207                        "DMA_INTERRUPT");
1208                return -EIO;
1209        }
1210
1211        if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1212                dev_err(device->dev,
1213                        "Device claims capability %s, but op is not defined\n",
1214                        "DMA_CYCLIC");
1215                return -EIO;
1216        }
1217
1218        if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1219                dev_err(device->dev,
1220                        "Device claims capability %s, but op is not defined\n",
1221                        "DMA_INTERLEAVE");
1222                return -EIO;
1223        }
1224
1225
1226        if (!device->device_tx_status) {
1227                dev_err(device->dev, "Device tx_status is not defined\n");
1228                return -EIO;
1229        }
1230
1231
1232        if (!device->device_issue_pending) {
1233                dev_err(device->dev, "Device issue_pending is not defined\n");
1234                return -EIO;
1235        }
1236
1237        if (!device->device_release)
1238                dev_dbg(device->dev,
1239                         "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1240
1241        kref_init(&device->ref);
1242
1243        /* note: this only matters in the
1244         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1245         */
1246        if (device_has_all_tx_types(device))
1247                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1248
1249        rc = get_dma_id(device);
1250        if (rc != 0)
1251                return rc;
1252
1253        mutex_init(&device->chan_mutex);
1254        ida_init(&device->chan_ida);
1255
1256        /* represent channels in sysfs. Probably want devs too */
1257        list_for_each_entry(chan, &device->channels, device_node) {
1258                rc = __dma_async_device_channel_register(device, chan);
1259                if (rc < 0)
1260                        goto err_out;
1261        }
1262
1263        mutex_lock(&dma_list_mutex);
1264        /* take references on public channels */
1265        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1266                list_for_each_entry(chan, &device->channels, device_node) {
1267                        /* if clients are already waiting for channels we need
1268                         * to take references on their behalf
1269                         */
1270                        if (dma_chan_get(chan) == -ENODEV) {
1271                                /* note we can only get here for the first
1272                                 * channel as the remaining channels are
1273                                 * guaranteed to get a reference
1274                                 */
1275                                rc = -ENODEV;
1276                                mutex_unlock(&dma_list_mutex);
1277                                goto err_out;
1278                        }
1279                }
1280        list_add_tail_rcu(&device->global_node, &dma_device_list);
1281        if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1282                device->privatecnt++;   /* Always private */
1283        dma_channel_rebalance();
1284        mutex_unlock(&dma_list_mutex);
1285
1286        dmaengine_debug_register(device);
1287
1288        return 0;
1289
1290err_out:
1291        /* if we never registered a channel just release the idr */
1292        if (!device->chancnt) {
1293                ida_free(&dma_ida, device->dev_id);
1294                return rc;
1295        }
1296
1297        list_for_each_entry(chan, &device->channels, device_node) {
1298                if (chan->local == NULL)
1299                        continue;
1300                mutex_lock(&dma_list_mutex);
1301                chan->dev->chan = NULL;
1302                mutex_unlock(&dma_list_mutex);
1303                device_unregister(&chan->dev->device);
1304                free_percpu(chan->local);
1305        }
1306        return rc;
1307}
1308EXPORT_SYMBOL(dma_async_device_register);
1309
1310/**
1311 * dma_async_device_unregister - unregister a DMA device
1312 * @device:     pointer to &struct dma_device
1313 *
1314 * This routine is called by dma driver exit routines, dmaengine holds module
1315 * references to prevent it being called while channels are in use.
1316 */
1317void dma_async_device_unregister(struct dma_device *device)
1318{
1319        struct dma_chan *chan, *n;
1320
1321        dmaengine_debug_unregister(device);
1322
1323        list_for_each_entry_safe(chan, n, &device->channels, device_node)
1324                __dma_async_device_channel_unregister(device, chan);
1325
1326        mutex_lock(&dma_list_mutex);
1327        /*
1328         * setting DMA_PRIVATE ensures the device being torn down will not
1329         * be used in the channel_table
1330         */
1331        dma_cap_set(DMA_PRIVATE, device->cap_mask);
1332        dma_channel_rebalance();
1333        ida_free(&dma_ida, device->dev_id);
1334        dma_device_put(device);
1335        mutex_unlock(&dma_list_mutex);
1336}
1337EXPORT_SYMBOL(dma_async_device_unregister);
1338
1339static void dmam_device_release(struct device *dev, void *res)
1340{
1341        struct dma_device *device;
1342
1343        device = *(struct dma_device **)res;
1344        dma_async_device_unregister(device);
1345}
1346
1347/**
1348 * dmaenginem_async_device_register - registers DMA devices found
1349 * @device:     pointer to &struct dma_device
1350 *
1351 * The operation is managed and will be undone on driver detach.
1352 */
1353int dmaenginem_async_device_register(struct dma_device *device)
1354{
1355        void *p;
1356        int ret;
1357
1358        p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1359        if (!p)
1360                return -ENOMEM;
1361
1362        ret = dma_async_device_register(device);
1363        if (!ret) {
1364                *(struct dma_device **)p = device;
1365                devres_add(device->dev, p);
1366        } else {
1367                devres_free(p);
1368        }
1369
1370        return ret;
1371}
1372EXPORT_SYMBOL(dmaenginem_async_device_register);
1373
1374struct dmaengine_unmap_pool {
1375        struct kmem_cache *cache;
1376        const char *name;
1377        mempool_t *pool;
1378        size_t size;
1379};
1380
1381#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1382static struct dmaengine_unmap_pool unmap_pool[] = {
1383        __UNMAP_POOL(2),
1384        #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1385        __UNMAP_POOL(16),
1386        __UNMAP_POOL(128),
1387        __UNMAP_POOL(256),
1388        #endif
1389};
1390
1391static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1392{
1393        int order = get_count_order(nr);
1394
1395        switch (order) {
1396        case 0 ... 1:
1397                return &unmap_pool[0];
1398#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1399        case 2 ... 4:
1400                return &unmap_pool[1];
1401        case 5 ... 7:
1402                return &unmap_pool[2];
1403        case 8:
1404                return &unmap_pool[3];
1405#endif
1406        default:
1407                BUG();
1408                return NULL;
1409        }
1410}
1411
1412static void dmaengine_unmap(struct kref *kref)
1413{
1414        struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1415        struct device *dev = unmap->dev;
1416        int cnt, i;
1417
1418        cnt = unmap->to_cnt;
1419        for (i = 0; i < cnt; i++)
1420                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1421                               DMA_TO_DEVICE);
1422        cnt += unmap->from_cnt;
1423        for (; i < cnt; i++)
1424                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1425                               DMA_FROM_DEVICE);
1426        cnt += unmap->bidi_cnt;
1427        for (; i < cnt; i++) {
1428                if (unmap->addr[i] == 0)
1429                        continue;
1430                dma_unmap_page(dev, unmap->addr[i], unmap->len,
1431                               DMA_BIDIRECTIONAL);
1432        }
1433        cnt = unmap->map_cnt;
1434        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1435}
1436
1437void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1438{
1439        if (unmap)
1440                kref_put(&unmap->kref, dmaengine_unmap);
1441}
1442EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1443
1444static void dmaengine_destroy_unmap_pool(void)
1445{
1446        int i;
1447
1448        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1449                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1450
1451                mempool_destroy(p->pool);
1452                p->pool = NULL;
1453                kmem_cache_destroy(p->cache);
1454                p->cache = NULL;
1455        }
1456}
1457
1458static int __init dmaengine_init_unmap_pool(void)
1459{
1460        int i;
1461
1462        for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1463                struct dmaengine_unmap_pool *p = &unmap_pool[i];
1464                size_t size;
1465
1466                size = sizeof(struct dmaengine_unmap_data) +
1467                       sizeof(dma_addr_t) * p->size;
1468
1469                p->cache = kmem_cache_create(p->name, size, 0,
1470                                             SLAB_HWCACHE_ALIGN, NULL);
1471                if (!p->cache)
1472                        break;
1473                p->pool = mempool_create_slab_pool(1, p->cache);
1474                if (!p->pool)
1475                        break;
1476        }
1477
1478        if (i == ARRAY_SIZE(unmap_pool))
1479                return 0;
1480
1481        dmaengine_destroy_unmap_pool();
1482        return -ENOMEM;
1483}
1484
1485struct dmaengine_unmap_data *
1486dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1487{
1488        struct dmaengine_unmap_data *unmap;
1489
1490        unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1491        if (!unmap)
1492                return NULL;
1493
1494        memset(unmap, 0, sizeof(*unmap));
1495        kref_init(&unmap->kref);
1496        unmap->dev = dev;
1497        unmap->map_cnt = nr;
1498
1499        return unmap;
1500}
1501EXPORT_SYMBOL(dmaengine_get_unmap_data);
1502
1503void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1504        struct dma_chan *chan)
1505{
1506        tx->chan = chan;
1507        #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1508        spin_lock_init(&tx->lock);
1509        #endif
1510}
1511EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1512
1513static inline int desc_check_and_set_metadata_mode(
1514        struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1515{
1516        /* Make sure that the metadata mode is not mixed */
1517        if (!desc->desc_metadata_mode) {
1518                if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1519                        desc->desc_metadata_mode = mode;
1520                else
1521                        return -ENOTSUPP;
1522        } else if (desc->desc_metadata_mode != mode) {
1523                return -EINVAL;
1524        }
1525
1526        return 0;
1527}
1528
1529int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1530                                   void *data, size_t len)
1531{
1532        int ret;
1533
1534        if (!desc)
1535                return -EINVAL;
1536
1537        ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1538        if (ret)
1539                return ret;
1540
1541        if (!desc->metadata_ops || !desc->metadata_ops->attach)
1542                return -ENOTSUPP;
1543
1544        return desc->metadata_ops->attach(desc, data, len);
1545}
1546EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1547
1548void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1549                                      size_t *payload_len, size_t *max_len)
1550{
1551        int ret;
1552
1553        if (!desc)
1554                return ERR_PTR(-EINVAL);
1555
1556        ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1557        if (ret)
1558                return ERR_PTR(ret);
1559
1560        if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1561                return ERR_PTR(-ENOTSUPP);
1562
1563        return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1564}
1565EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1566
1567int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1568                                    size_t payload_len)
1569{
1570        int ret;
1571
1572        if (!desc)
1573                return -EINVAL;
1574
1575        ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1576        if (ret)
1577                return ret;
1578
1579        if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1580                return -ENOTSUPP;
1581
1582        return desc->metadata_ops->set_len(desc, payload_len);
1583}
1584EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1585
1586/**
1587 * dma_wait_for_async_tx - spin wait for a transaction to complete
1588 * @tx:         in-flight transaction to wait on
1589 */
1590enum dma_status
1591dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1592{
1593        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1594
1595        if (!tx)
1596                return DMA_COMPLETE;
1597
1598        while (tx->cookie == -EBUSY) {
1599                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1600                        dev_err(tx->chan->device->dev,
1601                                "%s timeout waiting for descriptor submission\n",
1602                                __func__);
1603                        return DMA_ERROR;
1604                }
1605                cpu_relax();
1606        }
1607        return dma_sync_wait(tx->chan, tx->cookie);
1608}
1609EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1610
1611/**
1612 * dma_run_dependencies - process dependent operations on the target channel
1613 * @tx:         transaction with dependencies
1614 *
1615 * Helper routine for DMA drivers to process (start) dependent operations
1616 * on their target channel.
1617 */
1618void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1619{
1620        struct dma_async_tx_descriptor *dep = txd_next(tx);
1621        struct dma_async_tx_descriptor *dep_next;
1622        struct dma_chan *chan;
1623
1624        if (!dep)
1625                return;
1626
1627        /* we'll submit tx->next now, so clear the link */
1628        txd_clear_next(tx);
1629        chan = dep->chan;
1630
1631        /* keep submitting up until a channel switch is detected
1632         * in that case we will be called again as a result of
1633         * processing the interrupt from async_tx_channel_switch
1634         */
1635        for (; dep; dep = dep_next) {
1636                txd_lock(dep);
1637                txd_clear_parent(dep);
1638                dep_next = txd_next(dep);
1639                if (dep_next && dep_next->chan == chan)
1640                        txd_clear_next(dep); /* ->next will be submitted */
1641                else
1642                        dep_next = NULL; /* submit current dep and terminate */
1643                txd_unlock(dep);
1644
1645                dep->tx_submit(dep);
1646        }
1647
1648        chan->device->device_issue_pending(chan);
1649}
1650EXPORT_SYMBOL_GPL(dma_run_dependencies);
1651
1652static int __init dma_bus_init(void)
1653{
1654        int err = dmaengine_init_unmap_pool();
1655
1656        if (err)
1657                return err;
1658
1659        err = class_register(&dma_devclass);
1660        if (!err)
1661                dmaengine_debugfs_init();
1662
1663        return err;
1664}
1665arch_initcall(dma_bus_init);
1666