linux/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
<<
>>
Prefs
   1//SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Xilinx Scene Change Detection DMA driver
   4 *
   5 * Copyright (C) 2018 Xilinx, Inc.
   6 *
   7 * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
   8 *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/dmaengine.h>
  14#include <linux/of_dma.h>
  15#include <linux/slab.h>
  16
  17#include "../../../dma/dmaengine.h"
  18
  19#include "xilinx-scenechange.h"
  20
  21/**
  22 * xscd_dma_start - Start the SCD core
  23 * @xscd: The SCD device
  24 * @channels: Bitmask of enabled channels
  25 */
  26static void xscd_dma_start(struct xscd_device *xscd, unsigned int channels)
  27{
  28        xscd_write(xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
  29        xscd_write(xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
  30        xscd_write(xscd->iomem, XSCD_CHAN_EN_OFFSET, channels);
  31
  32        xscd_set(xscd->iomem, XSCD_CTRL_OFFSET,
  33                 xscd->memory_based ? XSCD_CTRL_AP_START
  34                                    : XSCD_CTRL_AP_START |
  35                                      XSCD_CTRL_AUTO_RESTART);
  36
  37        xscd->running = true;
  38}
  39
  40/**
  41 * xscd_dma_stop - Stop the SCD core
  42 * @xscd: The SCD device
  43 */
  44static void xscd_dma_stop(struct xscd_device *xscd)
  45{
  46        xscd_clr(xscd->iomem, XSCD_CTRL_OFFSET,
  47                 xscd->memory_based ? XSCD_CTRL_AP_START
  48                                    : XSCD_CTRL_AP_START |
  49                                      XSCD_CTRL_AUTO_RESTART);
  50
  51        xscd->running = false;
  52}
  53
  54/**
  55 * xscd_dma_setup_channel - Setup a channel for transfer
  56 * @chan: Driver specific channel struct pointer
  57 *
  58 * Return: 1 if the channel starts to run for a new transfer. Otherwise, 0.
  59 */
  60static int xscd_dma_setup_channel(struct xscd_dma_chan *chan)
  61{
  62        struct xscd_dma_tx_descriptor *desc;
  63
  64        if (!chan->enabled)
  65                return 0;
  66
  67        if (list_empty(&chan->pending_list))
  68                return 0;
  69
  70        desc = list_first_entry(&chan->pending_list,
  71                                struct xscd_dma_tx_descriptor, node);
  72        list_del(&desc->node);
  73
  74        xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
  75        chan->active_desc = desc;
  76
  77        return 1;
  78}
  79
  80/**
  81 * xscd_dma_kick - Start a run of the SCD core if channels are ready
  82 * @xscd: The SCD device
  83 *
  84 * This function starts a single run of the SCD core when all the following
  85 * conditions are met:
  86 *
  87 * - The SCD is not currently running
  88 * - At least one channel is enabled and has buffers available
  89 *
  90 * It can be used to start the SCD when a buffer is queued, when a channel
  91 * starts streaming, or to start the next run. Calling this function is only
  92 * valid for memory-based mode and is not permitted for stream-based mode.
  93 *
  94 * The running state for all channels is updated. Channels that are being
  95 * stopped are signalled through the channel wait queue.
  96 *
  97 * The function must be called with the xscd_device lock held.
  98 */
  99static void xscd_dma_kick(struct xscd_device *xscd)
 100{
 101        unsigned int channels = 0;
 102        unsigned int i;
 103
 104        lockdep_assert_held(&xscd->lock);
 105
 106        if (xscd->running)
 107                return;
 108
 109        for (i = 0; i < xscd->num_streams; i++) {
 110                struct xscd_dma_chan *chan = xscd->channels[i];
 111                unsigned long flags;
 112                unsigned int running;
 113                bool stopped;
 114
 115                spin_lock_irqsave(&chan->lock, flags);
 116                running = xscd_dma_setup_channel(chan);
 117                stopped = chan->running && !running;
 118                chan->running = running;
 119                spin_unlock_irqrestore(&chan->lock, flags);
 120
 121                channels |= running << chan->id;
 122                if (stopped)
 123                        wake_up(&chan->wait);
 124        }
 125
 126        if (channels)
 127                xscd_dma_start(xscd, channels);
 128        else
 129                xscd_dma_stop(xscd);
 130}
 131
 132/**
 133 * xscd_dma_enable_channel - Enable/disable a channel
 134 * @chan: Driver specific channel struct pointer
 135 * @enable: True to enable the channel, false to disable it
 136 *
 137 * This function enables or disable a channel. When operating in memory-based
 138 * mode, enabling a channel kicks processing if buffers are available for any
 139 * enabled channel and the SCD core is idle. When operating in stream-based
 140 * mode, the SCD core is started or stopped synchronously when then channel is
 141 * enabled or disabled.
 142 *
 143 * This function must be called in non-atomic, non-interrupt context.
 144 */
 145void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable)
 146{
 147        struct xscd_device *xscd = chan->xscd;
 148
 149        if (enable) {
 150                /*
 151                 * FIXME: Don't set chan->enabled to false here, it will be
 152                 * done in xscd_dma_terminate_all(). This works around a bug
 153                 * introduced in commit 2e77607047c6 ("xilinx: v4l2: dma: Add
 154                 * multiple output support") that stops all channels when the
 155                 * first one is stopped, even though they are part of
 156                 * independent pipelines. This workaround should be safe as
 157                 * long as dmaengine_terminate_all() is called after
 158                 * xvip_pipeline_set_stream().
 159                 */
 160                spin_lock_irq(&chan->lock);
 161                chan->enabled = true;
 162                spin_unlock_irq(&chan->lock);
 163        }
 164
 165        if (xscd->memory_based) {
 166                if (enable) {
 167                        spin_lock_irq(&xscd->lock);
 168                        xscd_dma_kick(xscd);
 169                        spin_unlock_irq(&xscd->lock);
 170                }
 171        } else {
 172                if (enable)
 173                        xscd_dma_start(xscd, BIT(chan->id));
 174                else
 175                        xscd_dma_stop(xscd);
 176        }
 177}
 178
 179/**
 180 * xscd_dma_irq_handler - scdma Interrupt handler
 181 * @xscd: Pointer to the SCD device structure
 182 */
 183void xscd_dma_irq_handler(struct xscd_device *xscd)
 184{
 185        unsigned int i;
 186
 187        /*
 188         * Mark the active descriptors as complete, move them to the done list
 189         * and schedule the tasklet to clean them up.
 190         */
 191        for (i = 0; i < xscd->num_streams; ++i) {
 192                struct xscd_dma_chan *chan = xscd->channels[i];
 193                struct xscd_dma_tx_descriptor *desc = chan->active_desc;
 194
 195                if (!desc)
 196                        continue;
 197
 198                dma_cookie_complete(&desc->async_tx);
 199                xscd_chan_event_notify(&xscd->chans[i]);
 200
 201                spin_lock(&chan->lock);
 202                list_add_tail(&desc->node, &chan->done_list);
 203                chan->active_desc = NULL;
 204                spin_unlock(&chan->lock);
 205
 206                tasklet_schedule(&chan->tasklet);
 207        }
 208
 209        /* Start the next run, if any. */
 210        spin_lock(&xscd->lock);
 211        xscd->running = false;
 212        xscd_dma_kick(xscd);
 213        spin_unlock(&xscd->lock);
 214}
 215
 216/* -----------------------------------------------------------------------------
 217 * DMA Engine
 218 */
 219
 220/**
 221 * xscd_dma_tx_submit - Submit DMA transaction
 222 * @tx: Async transaction descriptor
 223 *
 224 * Return: cookie value on success and failure value on error
 225 */
 226static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 227{
 228        struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
 229        struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
 230        dma_cookie_t cookie;
 231        unsigned long flags;
 232
 233        spin_lock_irqsave(&chan->lock, flags);
 234        cookie = dma_cookie_assign(tx);
 235        list_add_tail(&desc->node, &chan->pending_list);
 236        spin_unlock_irqrestore(&chan->lock, flags);
 237
 238        return cookie;
 239}
 240
 241/**
 242 * xscd_dma_free_desc_list - Free descriptors list
 243 * @chan: Driver specific dma channel
 244 * @list: List to parse and delete the descriptor
 245 */
 246static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
 247                                    struct list_head *list)
 248{
 249        struct xscd_dma_tx_descriptor *desc, *next;
 250
 251        list_for_each_entry_safe(desc, next, list, node) {
 252                list_del(&desc->node);
 253                kfree(desc);
 254        }
 255}
 256
 257/**
 258 * xscd_dma_free_descriptors - Free channel descriptors
 259 * @chan: Driver specific dma channel
 260 */
 261static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
 262{
 263        unsigned long flags;
 264
 265        spin_lock_irqsave(&chan->lock, flags);
 266
 267        xscd_dma_free_desc_list(chan, &chan->pending_list);
 268        xscd_dma_free_desc_list(chan, &chan->done_list);
 269        kfree(chan->active_desc);
 270
 271        chan->active_desc = NULL;
 272        INIT_LIST_HEAD(&chan->pending_list);
 273        INIT_LIST_HEAD(&chan->done_list);
 274
 275        spin_unlock_irqrestore(&chan->lock, flags);
 276}
 277
 278/**
 279 * scd_dma_chan_desc_cleanup - Clean channel descriptors
 280 * @chan: Driver specific dma channel
 281 */
 282static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
 283{
 284        struct xscd_dma_tx_descriptor *desc, *next;
 285        unsigned long flags;
 286
 287        spin_lock_irqsave(&chan->lock, flags);
 288
 289        list_for_each_entry_safe(desc, next, &chan->done_list, node) {
 290                dma_async_tx_callback callback;
 291                void *callback_param;
 292
 293                list_del(&desc->node);
 294
 295                /* Run the link descriptor callback function */
 296                callback = desc->async_tx.callback;
 297                callback_param = desc->async_tx.callback_param;
 298                if (callback) {
 299                        spin_unlock_irqrestore(&chan->lock, flags);
 300                        callback(callback_param);
 301                        spin_lock_irqsave(&chan->lock, flags);
 302                }
 303
 304                kfree(desc);
 305        }
 306
 307        spin_unlock_irqrestore(&chan->lock, flags);
 308}
 309
 310/**
 311 * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
 312 * DMA_SLAVE transaction
 313 * @dchan: DMA channel
 314 * @xt: Interleaved template pointer
 315 * @flags: transfer ack flags
 316 *
 317 * Return: Async transaction descriptor on success and NULL on failure
 318 */
 319static struct dma_async_tx_descriptor *
 320xscd_dma_prep_interleaved(struct dma_chan *dchan,
 321                          struct dma_interleaved_template *xt,
 322                          unsigned long flags)
 323{
 324        struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
 325        struct xscd_dma_tx_descriptor *desc;
 326        struct xscd_dma_desc *sw;
 327
 328        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 329        if (!desc)
 330                return NULL;
 331
 332        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
 333        desc->async_tx.tx_submit = xscd_dma_tx_submit;
 334        async_tx_ack(&desc->async_tx);
 335
 336        sw = &desc->sw;
 337        sw->vsize = xt->numf;
 338        sw->hsize = xt->sgl[0].size;
 339        sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
 340        sw->luma_plane_addr = xt->src_start;
 341
 342        return &desc->async_tx;
 343}
 344
 345static bool xscd_dma_is_running(struct xscd_dma_chan *chan)
 346{
 347        bool running;
 348
 349        spin_lock_irq(&chan->lock);
 350        running = chan->running;
 351        spin_unlock_irq(&chan->lock);
 352
 353        return running;
 354}
 355
 356/**
 357 * xscd_dma_terminate_all - Halt the channel and free descriptors
 358 * @dchan: Driver specific dma channel pointer
 359 *
 360 * Return: 0
 361 */
 362static int xscd_dma_terminate_all(struct dma_chan *dchan)
 363{
 364        struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
 365        int ret;
 366
 367        spin_lock_irq(&chan->lock);
 368        chan->enabled = false;
 369        spin_unlock_irq(&chan->lock);
 370
 371        /* Wait for any on-going transfer to complete. */
 372        ret = wait_event_timeout(chan->wait, !xscd_dma_is_running(chan),
 373                                 msecs_to_jiffies(100));
 374        WARN_ON(ret == 0);
 375
 376        xscd_dma_free_descriptors(chan);
 377        return 0;
 378}
 379
 380/**
 381 * xscd_dma_issue_pending - Issue pending transactions
 382 * @dchan: DMA channel
 383 */
 384static void xscd_dma_issue_pending(struct dma_chan *dchan)
 385{
 386        struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
 387        struct xscd_device *xscd = chan->xscd;
 388        unsigned long flags;
 389
 390        spin_lock_irqsave(&xscd->lock, flags);
 391        xscd_dma_kick(xscd);
 392        spin_unlock_irqrestore(&xscd->lock, flags);
 393}
 394
 395static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
 396                                          dma_cookie_t cookie,
 397                                          struct dma_tx_state *txstate)
 398{
 399        return dma_cookie_status(dchan, cookie, txstate);
 400}
 401
 402/**
 403 * xscd_dma_free_chan_resources - Free channel resources
 404 * @dchan: DMA channel
 405 */
 406static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
 407{
 408        struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
 409
 410        xscd_dma_free_descriptors(chan);
 411}
 412
 413/**
 414 * xscd_dma_do_tasklet - Schedule completion tasklet
 415 * @data: Pointer to the Xilinx scdma channel structure
 416 */
 417static void xscd_dma_do_tasklet(unsigned long data)
 418{
 419        struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
 420
 421        xscd_dma_chan_desc_cleanup(chan);
 422}
 423
 424/**
 425 * xscd_dma_alloc_chan_resources - Allocate channel resources
 426 * @dchan: DMA channel
 427 *
 428 * Return: '0' on success and failure value on error
 429 */
 430static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
 431{
 432        dma_cookie_init(dchan);
 433        return 0;
 434}
 435
 436/**
 437 * of_scdma_xilinx_xlate - Translation function
 438 * @dma_spec: Pointer to DMA specifier as found in the device tree
 439 * @ofdma: Pointer to DMA controller data
 440 *
 441 * Return: DMA channel pointer on success and NULL on error
 442 */
 443static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
 444                                              struct of_dma *ofdma)
 445{
 446        struct xscd_device *xscd = ofdma->of_dma_data;
 447        u32 chan_id = dma_spec->args[0];
 448
 449        if (chan_id >= xscd->num_streams)
 450                return NULL;
 451
 452        if (!xscd->channels[chan_id])
 453                return NULL;
 454
 455        return dma_get_slave_channel(&xscd->channels[chan_id]->common);
 456}
 457
 458static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
 459{
 460        struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
 461
 462        chan->id = chan_id;
 463        chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
 464        chan->xscd = xscd;
 465
 466        xscd->channels[chan->id] = chan;
 467
 468        spin_lock_init(&chan->lock);
 469        INIT_LIST_HEAD(&chan->pending_list);
 470        INIT_LIST_HEAD(&chan->done_list);
 471        tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
 472                     (unsigned long)chan);
 473        init_waitqueue_head(&chan->wait);
 474
 475        chan->common.device = &xscd->dma_device;
 476        list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
 477}
 478
 479/**
 480 * xscd_dma_chan_remove - Per Channel remove function
 481 * @chan: Driver specific DMA channel
 482 */
 483static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
 484{
 485        list_del(&chan->common.device_node);
 486}
 487
 488/**
 489 * xscd_dma_init - Initialize the SCD DMA engine
 490 * @xscd: Pointer to the SCD device structure
 491 *
 492 * Return: '0' on success and failure value on error
 493 */
 494int xscd_dma_init(struct xscd_device *xscd)
 495{
 496        struct dma_device *ddev = &xscd->dma_device;
 497        unsigned int chan_id;
 498        int ret;
 499
 500        /* Initialize the DMA engine */
 501        ddev->dev = xscd->dev;
 502        dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
 503
 504        INIT_LIST_HEAD(&ddev->channels);
 505        dma_cap_set(DMA_SLAVE, ddev->cap_mask);
 506        dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
 507        ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
 508        ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
 509        ddev->device_tx_status = xscd_dma_tx_status;
 510        ddev->device_issue_pending = xscd_dma_issue_pending;
 511        ddev->device_terminate_all = xscd_dma_terminate_all;
 512        ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
 513
 514        for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
 515                xscd_dma_chan_init(xscd, chan_id);
 516
 517        ret = dma_async_device_register(ddev);
 518        if (ret) {
 519                dev_err(xscd->dev, "failed to register the dma device\n");
 520                goto error;
 521        }
 522
 523        ret = of_dma_controller_register(xscd->dev->of_node,
 524                                         of_scdma_xilinx_xlate, xscd);
 525        if (ret) {
 526                dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
 527                goto error_of_dma;
 528        }
 529
 530        dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
 531        return 0;
 532
 533error_of_dma:
 534        dma_async_device_unregister(ddev);
 535
 536error:
 537        for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
 538                xscd_dma_chan_remove(xscd->channels[chan_id]);
 539
 540        return ret;
 541}
 542
 543/**
 544 * xscd_dma_cleanup - Clean up the SCD DMA engine
 545 * @xscd: Pointer to the SCD device structure
 546 *
 547 * This function is the counterpart of xscd_dma_init() and cleans up the
 548 * resources related to the DMA engine.
 549 */
 550void xscd_dma_cleanup(struct xscd_device *xscd)
 551{
 552        dma_async_device_unregister(&xscd->dma_device);
 553        of_dma_controller_free(xscd->dev->of_node);
 554}
 555