linux/drivers/media/platform/vsp1/vsp1_dl.c
<<
>>
Prefs
   1/*
   2 * vsp1_dl.h  --  R-Car VSP1 Display List
   3 *
   4 * Copyright (C) 2015 Renesas Corporation
   5 *
   6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/device.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/gfp.h>
  17#include <linux/slab.h>
  18#include <linux/workqueue.h>
  19
  20#include "vsp1.h"
  21#include "vsp1_dl.h"
  22
  23#define VSP1_DL_NUM_ENTRIES             256
  24
  25#define VSP1_DLH_INT_ENABLE             (1 << 1)
  26#define VSP1_DLH_AUTO_START             (1 << 0)
  27
  28struct vsp1_dl_header_list {
  29        u32 num_bytes;
  30        u32 addr;
  31} __attribute__((__packed__));
  32
  33struct vsp1_dl_header {
  34        u32 num_lists;
  35        struct vsp1_dl_header_list lists[8];
  36        u32 next_header;
  37        u32 flags;
  38} __attribute__((__packed__));
  39
  40struct vsp1_dl_entry {
  41        u32 addr;
  42        u32 data;
  43} __attribute__((__packed__));
  44
  45/**
  46 * struct vsp1_dl_body - Display list body
  47 * @list: entry in the display list list of bodies
  48 * @vsp1: the VSP1 device
  49 * @entries: array of entries
  50 * @dma: DMA address of the entries
  51 * @size: size of the DMA memory in bytes
  52 * @num_entries: number of stored entries
  53 */
  54struct vsp1_dl_body {
  55        struct list_head list;
  56        struct vsp1_device *vsp1;
  57
  58        struct vsp1_dl_entry *entries;
  59        dma_addr_t dma;
  60        size_t size;
  61
  62        unsigned int num_entries;
  63};
  64
  65/**
  66 * struct vsp1_dl_list - Display list
  67 * @list: entry in the display list manager lists
  68 * @dlm: the display list manager
  69 * @header: display list header, NULL for headerless lists
  70 * @dma: DMA address for the header
  71 * @body0: first display list body
  72 * @fragments: list of extra display list bodies
  73 * @has_chain: if true, indicates that there's a partition chain
  74 * @chain: entry in the display list partition chain
  75 */
  76struct vsp1_dl_list {
  77        struct list_head list;
  78        struct vsp1_dl_manager *dlm;
  79
  80        struct vsp1_dl_header *header;
  81        dma_addr_t dma;
  82
  83        struct vsp1_dl_body body0;
  84        struct list_head fragments;
  85
  86        bool has_chain;
  87        struct list_head chain;
  88};
  89
  90enum vsp1_dl_mode {
  91        VSP1_DL_MODE_HEADER,
  92        VSP1_DL_MODE_HEADERLESS,
  93};
  94
  95/**
  96 * struct vsp1_dl_manager - Display List manager
  97 * @index: index of the related WPF
  98 * @mode: display list operation mode (header or headerless)
  99 * @singleshot: execute the display list in single-shot mode
 100 * @vsp1: the VSP1 device
 101 * @lock: protects the free, active, queued, pending and gc_fragments lists
 102 * @free: array of all free display lists
 103 * @active: list currently being processed (loaded) by hardware
 104 * @queued: list queued to the hardware (written to the DL registers)
 105 * @pending: list waiting to be queued to the hardware
 106 * @gc_work: fragments garbage collector work struct
 107 * @gc_fragments: array of display list fragments waiting to be freed
 108 */
 109struct vsp1_dl_manager {
 110        unsigned int index;
 111        enum vsp1_dl_mode mode;
 112        bool singleshot;
 113        struct vsp1_device *vsp1;
 114
 115        spinlock_t lock;
 116        struct list_head free;
 117        struct vsp1_dl_list *active;
 118        struct vsp1_dl_list *queued;
 119        struct vsp1_dl_list *pending;
 120
 121        struct work_struct gc_work;
 122        struct list_head gc_fragments;
 123};
 124
 125/* -----------------------------------------------------------------------------
 126 * Display List Body Management
 127 */
 128
 129/*
 130 * Initialize a display list body object and allocate DMA memory for the body
 131 * data. The display list body object is expected to have been initialized to
 132 * 0 when allocated.
 133 */
 134static int vsp1_dl_body_init(struct vsp1_device *vsp1,
 135                             struct vsp1_dl_body *dlb, unsigned int num_entries,
 136                             size_t extra_size)
 137{
 138        size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
 139
 140        dlb->vsp1 = vsp1;
 141        dlb->size = size;
 142
 143        dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
 144                                    GFP_KERNEL);
 145        if (!dlb->entries)
 146                return -ENOMEM;
 147
 148        return 0;
 149}
 150
 151/*
 152 * Cleanup a display list body and free allocated DMA memory allocated.
 153 */
 154static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
 155{
 156        dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
 157}
 158
 159/**
 160 * vsp1_dl_fragment_alloc - Allocate a display list fragment
 161 * @vsp1: The VSP1 device
 162 * @num_entries: The maximum number of entries that the fragment can contain
 163 *
 164 * Allocate a display list fragment with enough memory to contain the requested
 165 * number of entries.
 166 *
 167 * Return a pointer to a fragment on success or NULL if memory can't be
 168 * allocated.
 169 */
 170struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
 171                                            unsigned int num_entries)
 172{
 173        struct vsp1_dl_body *dlb;
 174        int ret;
 175
 176        dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
 177        if (!dlb)
 178                return NULL;
 179
 180        ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
 181        if (ret < 0) {
 182                kfree(dlb);
 183                return NULL;
 184        }
 185
 186        return dlb;
 187}
 188
 189/**
 190 * vsp1_dl_fragment_free - Free a display list fragment
 191 * @dlb: The fragment
 192 *
 193 * Free the given display list fragment and the associated DMA memory.
 194 *
 195 * Fragments must only be freed explicitly if they are not added to a display
 196 * list, as the display list will take ownership of them and free them
 197 * otherwise. Manual free typically happens at cleanup time for fragments that
 198 * have been allocated but not used.
 199 *
 200 * Passing a NULL pointer to this function is safe, in that case no operation
 201 * will be performed.
 202 */
 203void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
 204{
 205        if (!dlb)
 206                return;
 207
 208        vsp1_dl_body_cleanup(dlb);
 209        kfree(dlb);
 210}
 211
 212/**
 213 * vsp1_dl_fragment_write - Write a register to a display list fragment
 214 * @dlb: The fragment
 215 * @reg: The register address
 216 * @data: The register value
 217 *
 218 * Write the given register and value to the display list fragment. The maximum
 219 * number of entries that can be written in a fragment is specified when the
 220 * fragment is allocated by vsp1_dl_fragment_alloc().
 221 */
 222void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
 223{
 224        dlb->entries[dlb->num_entries].addr = reg;
 225        dlb->entries[dlb->num_entries].data = data;
 226        dlb->num_entries++;
 227}
 228
 229/* -----------------------------------------------------------------------------
 230 * Display List Transaction Management
 231 */
 232
 233static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
 234{
 235        struct vsp1_dl_list *dl;
 236        size_t header_size;
 237        int ret;
 238
 239        dl = kzalloc(sizeof(*dl), GFP_KERNEL);
 240        if (!dl)
 241                return NULL;
 242
 243        INIT_LIST_HEAD(&dl->fragments);
 244        dl->dlm = dlm;
 245
 246        /*
 247         * Initialize the display list body and allocate DMA memory for the body
 248         * and the optional header. Both are allocated together to avoid memory
 249         * fragmentation, with the header located right after the body in
 250         * memory.
 251         */
 252        header_size = dlm->mode == VSP1_DL_MODE_HEADER
 253                    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
 254                    : 0;
 255
 256        ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
 257                                header_size);
 258        if (ret < 0) {
 259                kfree(dl);
 260                return NULL;
 261        }
 262
 263        if (dlm->mode == VSP1_DL_MODE_HEADER) {
 264                size_t header_offset = VSP1_DL_NUM_ENTRIES
 265                                     * sizeof(*dl->body0.entries);
 266
 267                dl->header = ((void *)dl->body0.entries) + header_offset;
 268                dl->dma = dl->body0.dma + header_offset;
 269
 270                memset(dl->header, 0, sizeof(*dl->header));
 271                dl->header->lists[0].addr = dl->body0.dma;
 272        }
 273
 274        return dl;
 275}
 276
 277static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
 278{
 279        vsp1_dl_body_cleanup(&dl->body0);
 280        list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
 281        kfree(dl);
 282}
 283
 284/**
 285 * vsp1_dl_list_get - Get a free display list
 286 * @dlm: The display list manager
 287 *
 288 * Get a display list from the pool of free lists and return it.
 289 *
 290 * This function must be called without the display list manager lock held.
 291 */
 292struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
 293{
 294        struct vsp1_dl_list *dl = NULL;
 295        unsigned long flags;
 296
 297        spin_lock_irqsave(&dlm->lock, flags);
 298
 299        if (!list_empty(&dlm->free)) {
 300                dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
 301                list_del(&dl->list);
 302
 303                /*
 304                 * The display list chain must be initialised to ensure every
 305                 * display list can assert list_empty() if it is not in a chain.
 306                 */
 307                INIT_LIST_HEAD(&dl->chain);
 308        }
 309
 310        spin_unlock_irqrestore(&dlm->lock, flags);
 311
 312        return dl;
 313}
 314
 315/* This function must be called with the display list manager lock held.*/
 316static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
 317{
 318        struct vsp1_dl_list *dl_child;
 319
 320        if (!dl)
 321                return;
 322
 323        /*
 324         * Release any linked display-lists which were chained for a single
 325         * hardware operation.
 326         */
 327        if (dl->has_chain) {
 328                list_for_each_entry(dl_child, &dl->chain, chain)
 329                        __vsp1_dl_list_put(dl_child);
 330        }
 331
 332        dl->has_chain = false;
 333
 334        /*
 335         * We can't free fragments here as DMA memory can only be freed in
 336         * interruptible context. Move all fragments to the display list
 337         * manager's list of fragments to be freed, they will be
 338         * garbage-collected by the work queue.
 339         */
 340        if (!list_empty(&dl->fragments)) {
 341                list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
 342                schedule_work(&dl->dlm->gc_work);
 343        }
 344
 345        dl->body0.num_entries = 0;
 346
 347        list_add_tail(&dl->list, &dl->dlm->free);
 348}
 349
 350/**
 351 * vsp1_dl_list_put - Release a display list
 352 * @dl: The display list
 353 *
 354 * Release the display list and return it to the pool of free lists.
 355 *
 356 * Passing a NULL pointer to this function is safe, in that case no operation
 357 * will be performed.
 358 */
 359void vsp1_dl_list_put(struct vsp1_dl_list *dl)
 360{
 361        unsigned long flags;
 362
 363        if (!dl)
 364                return;
 365
 366        spin_lock_irqsave(&dl->dlm->lock, flags);
 367        __vsp1_dl_list_put(dl);
 368        spin_unlock_irqrestore(&dl->dlm->lock, flags);
 369}
 370
 371/**
 372 * vsp1_dl_list_write - Write a register to the display list
 373 * @dl: The display list
 374 * @reg: The register address
 375 * @data: The register value
 376 *
 377 * Write the given register and value to the display list. Up to 256 registers
 378 * can be written per display list.
 379 */
 380void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
 381{
 382        vsp1_dl_fragment_write(&dl->body0, reg, data);
 383}
 384
 385/**
 386 * vsp1_dl_list_add_fragment - Add a fragment to the display list
 387 * @dl: The display list
 388 * @dlb: The fragment
 389 *
 390 * Add a display list body as a fragment to a display list. Registers contained
 391 * in fragments are processed after registers contained in the main display
 392 * list, in the order in which fragments are added.
 393 *
 394 * Adding a fragment to a display list passes ownership of the fragment to the
 395 * list. The caller must not touch the fragment after this call, and must not
 396 * free it explicitly with vsp1_dl_fragment_free().
 397 *
 398 * Fragments are only usable for display lists in header mode. Attempt to
 399 * add a fragment to a header-less display list will return an error.
 400 */
 401int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
 402                              struct vsp1_dl_body *dlb)
 403{
 404        /* Multi-body lists are only available in header mode. */
 405        if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
 406                return -EINVAL;
 407
 408        list_add_tail(&dlb->list, &dl->fragments);
 409        return 0;
 410}
 411
 412/**
 413 * vsp1_dl_list_add_chain - Add a display list to a chain
 414 * @head: The head display list
 415 * @dl: The new display list
 416 *
 417 * Add a display list to an existing display list chain. The chained lists
 418 * will be automatically processed by the hardware without intervention from
 419 * the CPU. A display list end interrupt will only complete after the last
 420 * display list in the chain has completed processing.
 421 *
 422 * Adding a display list to a chain passes ownership of the display list to
 423 * the head display list item. The chain is released when the head dl item is
 424 * put back with __vsp1_dl_list_put().
 425 *
 426 * Chained display lists are only usable in header mode. Attempts to add a
 427 * display list to a chain in header-less mode will return an error.
 428 */
 429int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
 430                           struct vsp1_dl_list *dl)
 431{
 432        /* Chained lists are only available in header mode. */
 433        if (head->dlm->mode != VSP1_DL_MODE_HEADER)
 434                return -EINVAL;
 435
 436        head->has_chain = true;
 437        list_add_tail(&dl->chain, &head->chain);
 438        return 0;
 439}
 440
 441static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
 442{
 443        struct vsp1_dl_manager *dlm = dl->dlm;
 444        struct vsp1_dl_header_list *hdr = dl->header->lists;
 445        struct vsp1_dl_body *dlb;
 446        unsigned int num_lists = 0;
 447
 448        /*
 449         * Fill the header with the display list bodies addresses and sizes. The
 450         * address of the first body has already been filled when the display
 451         * list was allocated.
 452         */
 453
 454        hdr->num_bytes = dl->body0.num_entries
 455                       * sizeof(*dl->header->lists);
 456
 457        list_for_each_entry(dlb, &dl->fragments, list) {
 458                num_lists++;
 459                hdr++;
 460
 461                hdr->addr = dlb->dma;
 462                hdr->num_bytes = dlb->num_entries
 463                               * sizeof(*dl->header->lists);
 464        }
 465
 466        dl->header->num_lists = num_lists;
 467
 468        if (!list_empty(&dl->chain) && !is_last) {
 469                /*
 470                 * If this display list's chain is not empty, we are on a list,
 471                 * and the next item is the display list that we must queue for
 472                 * automatic processing by the hardware.
 473                 */
 474                struct vsp1_dl_list *next = list_next_entry(dl, chain);
 475
 476                dl->header->next_header = next->dma;
 477                dl->header->flags = VSP1_DLH_AUTO_START;
 478        } else if (!dlm->singleshot) {
 479                /*
 480                 * if the display list manager works in continuous mode, the VSP
 481                 * should loop over the display list continuously until
 482                 * instructed to do otherwise.
 483                 */
 484                dl->header->next_header = dl->dma;
 485                dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
 486        } else {
 487                /*
 488                 * Otherwise, in mem-to-mem mode, we work in single-shot mode
 489                 * and the next display list must not be started automatically.
 490                 */
 491                dl->header->flags = VSP1_DLH_INT_ENABLE;
 492        }
 493}
 494
 495static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
 496{
 497        struct vsp1_device *vsp1 = dlm->vsp1;
 498
 499        if (!dlm->queued)
 500                return false;
 501
 502        /*
 503         * Check whether the VSP1 has taken the update. In headerless mode the
 504         * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
 505         * register, and in header mode by clearing the UPDHDR bit in the CMD
 506         * register.
 507         */
 508        if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
 509                return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
 510                          & VI6_DL_BODY_SIZE_UPD);
 511        else
 512                return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
 513}
 514
 515static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
 516{
 517        struct vsp1_dl_manager *dlm = dl->dlm;
 518        struct vsp1_device *vsp1 = dlm->vsp1;
 519
 520        if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
 521                /*
 522                 * In headerless mode, program the hardware directly with the
 523                 * display list body address and size and set the UPD bit. The
 524                 * bit will be cleared by the hardware when the display list
 525                 * processing starts.
 526                 */
 527                vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
 528                vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
 529                           (dl->body0.num_entries * sizeof(*dl->header->lists)));
 530        } else {
 531                /*
 532                 * In header mode, program the display list header address. If
 533                 * the hardware is idle (single-shot mode or first frame in
 534                 * continuous mode) it will then be started independently. If
 535                 * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
 536                 * will be updated with the display list address.
 537                 */
 538                vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
 539        }
 540}
 541
 542static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
 543{
 544        struct vsp1_dl_manager *dlm = dl->dlm;
 545
 546        /*
 547         * If a previous display list has been queued to the hardware but not
 548         * processed yet, the VSP can start processing it at any time. In that
 549         * case we can't replace the queued list by the new one, as we could
 550         * race with the hardware. We thus mark the update as pending, it will
 551         * be queued up to the hardware by the frame end interrupt handler.
 552         */
 553        if (vsp1_dl_list_hw_update_pending(dlm)) {
 554                __vsp1_dl_list_put(dlm->pending);
 555                dlm->pending = dl;
 556                return;
 557        }
 558
 559        /*
 560         * Pass the new display list to the hardware and mark it as queued. It
 561         * will become active when the hardware starts processing it.
 562         */
 563        vsp1_dl_list_hw_enqueue(dl);
 564
 565        __vsp1_dl_list_put(dlm->queued);
 566        dlm->queued = dl;
 567}
 568
 569static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
 570{
 571        struct vsp1_dl_manager *dlm = dl->dlm;
 572
 573        /*
 574         * When working in single-shot mode, the caller guarantees that the
 575         * hardware is idle at this point. Just commit the head display list
 576         * to hardware. Chained lists will be started automatically.
 577         */
 578        vsp1_dl_list_hw_enqueue(dl);
 579
 580        dlm->active = dl;
 581}
 582
 583void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
 584{
 585        struct vsp1_dl_manager *dlm = dl->dlm;
 586        struct vsp1_dl_list *dl_child;
 587        unsigned long flags;
 588
 589        if (dlm->mode == VSP1_DL_MODE_HEADER) {
 590                /* Fill the header for the head and chained display lists. */
 591                vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
 592
 593                list_for_each_entry(dl_child, &dl->chain, chain) {
 594                        bool last = list_is_last(&dl_child->chain, &dl->chain);
 595
 596                        vsp1_dl_list_fill_header(dl_child, last);
 597                }
 598        }
 599
 600        spin_lock_irqsave(&dlm->lock, flags);
 601
 602        if (dlm->singleshot)
 603                vsp1_dl_list_commit_singleshot(dl);
 604        else
 605                vsp1_dl_list_commit_continuous(dl);
 606
 607        spin_unlock_irqrestore(&dlm->lock, flags);
 608}
 609
 610/* -----------------------------------------------------------------------------
 611 * Display List Manager
 612 */
 613
 614/**
 615 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
 616 * @dlm: the display list manager
 617 *
 618 * Return true if the previous display list has completed at frame end, or false
 619 * if it has been delayed by one frame because the display list commit raced
 620 * with the frame end interrupt. The function always returns true in header mode
 621 * as display list processing is then not continuous and races never occur.
 622 */
 623bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
 624{
 625        bool completed = false;
 626
 627        spin_lock(&dlm->lock);
 628
 629        /*
 630         * The mem-to-mem pipelines work in single-shot mode. No new display
 631         * list can be queued, we don't have to do anything.
 632         */
 633        if (dlm->singleshot) {
 634                __vsp1_dl_list_put(dlm->active);
 635                dlm->active = NULL;
 636                completed = true;
 637                goto done;
 638        }
 639
 640        /*
 641         * If the commit operation raced with the interrupt and occurred after
 642         * the frame end event but before interrupt processing, the hardware
 643         * hasn't taken the update into account yet. We have to skip one frame
 644         * and retry.
 645         */
 646        if (vsp1_dl_list_hw_update_pending(dlm))
 647                goto done;
 648
 649        /*
 650         * The device starts processing the queued display list right after the
 651         * frame end interrupt. The display list thus becomes active.
 652         */
 653        if (dlm->queued) {
 654                __vsp1_dl_list_put(dlm->active);
 655                dlm->active = dlm->queued;
 656                dlm->queued = NULL;
 657                completed = true;
 658        }
 659
 660        /*
 661         * Now that the VSP has started processing the queued display list, we
 662         * can queue the pending display list to the hardware if one has been
 663         * prepared.
 664         */
 665        if (dlm->pending) {
 666                vsp1_dl_list_hw_enqueue(dlm->pending);
 667                dlm->queued = dlm->pending;
 668                dlm->pending = NULL;
 669        }
 670
 671done:
 672        spin_unlock(&dlm->lock);
 673
 674        return completed;
 675}
 676
 677/* Hardware Setup */
 678void vsp1_dlm_setup(struct vsp1_device *vsp1)
 679{
 680        u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
 681                 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
 682                 | VI6_DL_CTRL_DLE;
 683
 684        /*
 685         * The DRM pipeline operates with display lists in Continuous Frame
 686         * Mode, all other pipelines use manual start.
 687         */
 688        if (vsp1->drm)
 689                ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
 690
 691        vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
 692        vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
 693}
 694
 695void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
 696{
 697        unsigned long flags;
 698
 699        spin_lock_irqsave(&dlm->lock, flags);
 700
 701        __vsp1_dl_list_put(dlm->active);
 702        __vsp1_dl_list_put(dlm->queued);
 703        __vsp1_dl_list_put(dlm->pending);
 704
 705        spin_unlock_irqrestore(&dlm->lock, flags);
 706
 707        dlm->active = NULL;
 708        dlm->queued = NULL;
 709        dlm->pending = NULL;
 710}
 711
 712/*
 713 * Free all fragments awaiting to be garbage-collected.
 714 *
 715 * This function must be called without the display list manager lock held.
 716 */
 717static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
 718{
 719        unsigned long flags;
 720
 721        spin_lock_irqsave(&dlm->lock, flags);
 722
 723        while (!list_empty(&dlm->gc_fragments)) {
 724                struct vsp1_dl_body *dlb;
 725
 726                dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
 727                                       list);
 728                list_del(&dlb->list);
 729
 730                spin_unlock_irqrestore(&dlm->lock, flags);
 731                vsp1_dl_fragment_free(dlb);
 732                spin_lock_irqsave(&dlm->lock, flags);
 733        }
 734
 735        spin_unlock_irqrestore(&dlm->lock, flags);
 736}
 737
 738static void vsp1_dlm_garbage_collect(struct work_struct *work)
 739{
 740        struct vsp1_dl_manager *dlm =
 741                container_of(work, struct vsp1_dl_manager, gc_work);
 742
 743        vsp1_dlm_fragments_free(dlm);
 744}
 745
 746struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
 747                                        unsigned int index,
 748                                        unsigned int prealloc)
 749{
 750        struct vsp1_dl_manager *dlm;
 751        unsigned int i;
 752
 753        dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
 754        if (!dlm)
 755                return NULL;
 756
 757        dlm->index = index;
 758        dlm->mode = index == 0 && !vsp1->info->uapi
 759                  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
 760        dlm->singleshot = vsp1->info->uapi;
 761        dlm->vsp1 = vsp1;
 762
 763        spin_lock_init(&dlm->lock);
 764        INIT_LIST_HEAD(&dlm->free);
 765        INIT_LIST_HEAD(&dlm->gc_fragments);
 766        INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
 767
 768        for (i = 0; i < prealloc; ++i) {
 769                struct vsp1_dl_list *dl;
 770
 771                dl = vsp1_dl_list_alloc(dlm);
 772                if (!dl)
 773                        return NULL;
 774
 775                list_add_tail(&dl->list, &dlm->free);
 776        }
 777
 778        return dlm;
 779}
 780
 781void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
 782{
 783        struct vsp1_dl_list *dl, *next;
 784
 785        if (!dlm)
 786                return;
 787
 788        cancel_work_sync(&dlm->gc_work);
 789
 790        list_for_each_entry_safe(dl, next, &dlm->free, list) {
 791                list_del(&dl->list);
 792                vsp1_dl_list_free(dl);
 793        }
 794
 795        vsp1_dlm_fragments_free(dlm);
 796}
 797