linux/drivers/media/pci/cx18/cx18-queue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  cx18 buffer queues
   4 *
   5 *  Derived from ivtv-queue.c
   6 *
   7 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
   8 *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
   9 */
  10
  11#include "cx18-driver.h"
  12#include "cx18-queue.h"
  13#include "cx18-streams.h"
  14#include "cx18-scb.h"
  15#include "cx18-io.h"
  16
  17void cx18_buf_swap(struct cx18_buffer *buf)
  18{
  19        int i;
  20
  21        for (i = 0; i < buf->bytesused; i += 4)
  22                swab32s((u32 *)(buf->buf + i));
  23}
  24
  25void _cx18_mdl_swap(struct cx18_mdl *mdl)
  26{
  27        struct cx18_buffer *buf;
  28
  29        list_for_each_entry(buf, &mdl->buf_list, list) {
  30                if (buf->bytesused == 0)
  31                        break;
  32                cx18_buf_swap(buf);
  33        }
  34}
  35
  36void cx18_queue_init(struct cx18_queue *q)
  37{
  38        INIT_LIST_HEAD(&q->list);
  39        atomic_set(&q->depth, 0);
  40        q->bytesused = 0;
  41}
  42
  43struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
  44                                 struct cx18_queue *q, int to_front)
  45{
  46        /* clear the mdl if it is not to be enqueued to the full queue */
  47        if (q != &s->q_full) {
  48                mdl->bytesused = 0;
  49                mdl->readpos = 0;
  50                mdl->m_flags = 0;
  51                mdl->skipped = 0;
  52                mdl->curr_buf = NULL;
  53        }
  54
  55        /* q_busy is restricted to a max buffer count imposed by firmware */
  56        if (q == &s->q_busy &&
  57            atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
  58                q = &s->q_free;
  59
  60        spin_lock(&q->lock);
  61
  62        if (to_front)
  63                list_add(&mdl->list, &q->list); /* LIFO */
  64        else
  65                list_add_tail(&mdl->list, &q->list); /* FIFO */
  66        q->bytesused += mdl->bytesused - mdl->readpos;
  67        atomic_inc(&q->depth);
  68
  69        spin_unlock(&q->lock);
  70        return q;
  71}
  72
  73struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
  74{
  75        struct cx18_mdl *mdl = NULL;
  76
  77        spin_lock(&q->lock);
  78        if (!list_empty(&q->list)) {
  79                mdl = list_first_entry(&q->list, struct cx18_mdl, list);
  80                list_del_init(&mdl->list);
  81                q->bytesused -= mdl->bytesused - mdl->readpos;
  82                mdl->skipped = 0;
  83                atomic_dec(&q->depth);
  84        }
  85        spin_unlock(&q->lock);
  86        return mdl;
  87}
  88
  89static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
  90                                          struct cx18_mdl *mdl)
  91{
  92        struct cx18_buffer *buf;
  93        u32 buf_size = s->buf_size;
  94        u32 bytesused = mdl->bytesused;
  95
  96        list_for_each_entry(buf, &mdl->buf_list, list) {
  97                buf->readpos = 0;
  98                if (bytesused >= buf_size) {
  99                        buf->bytesused = buf_size;
 100                        bytesused -= buf_size;
 101                } else {
 102                        buf->bytesused = bytesused;
 103                        bytesused = 0;
 104                }
 105                cx18_buf_sync_for_cpu(s, buf);
 106        }
 107}
 108
 109static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
 110                                                struct cx18_mdl *mdl)
 111{
 112        struct cx18_buffer *buf;
 113
 114        if (list_is_singular(&mdl->buf_list)) {
 115                buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
 116                                       list);
 117                buf->bytesused = mdl->bytesused;
 118                buf->readpos = 0;
 119                cx18_buf_sync_for_cpu(s, buf);
 120        } else {
 121                _cx18_mdl_update_bufs_for_cpu(s, mdl);
 122        }
 123}
 124
 125struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
 126        u32 bytesused)
 127{
 128        struct cx18 *cx = s->cx;
 129        struct cx18_mdl *mdl;
 130        struct cx18_mdl *tmp;
 131        struct cx18_mdl *ret = NULL;
 132        LIST_HEAD(sweep_up);
 133
 134        /*
 135         * We don't have to acquire multiple q locks here, because we are
 136         * serialized by the single threaded work handler.
 137         * MDLs from the firmware will thus remain in order as
 138         * they are moved from q_busy to q_full or to the dvb ring buffer.
 139         */
 140        spin_lock(&s->q_busy.lock);
 141        list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
 142                /*
 143                 * We should find what the firmware told us is done,
 144                 * right at the front of the queue.  If we don't, we likely have
 145                 * missed an mdl done message from the firmware.
 146                 * Once we skip an mdl repeatedly, relative to the size of
 147                 * q_busy, we have high confidence we've missed it.
 148                 */
 149                if (mdl->id != id) {
 150                        mdl->skipped++;
 151                        if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
 152                                /* mdl must have fallen out of rotation */
 153                                CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
 154                                          s->name, mdl->id,
 155                                          mdl->skipped);
 156                                /* Sweep it up to put it back into rotation */
 157                                list_move_tail(&mdl->list, &sweep_up);
 158                                atomic_dec(&s->q_busy.depth);
 159                        }
 160                        continue;
 161                }
 162                /*
 163                 * We pull the desired mdl off of the queue here.  Something
 164                 * will have to put it back on a queue later.
 165                 */
 166                list_del_init(&mdl->list);
 167                atomic_dec(&s->q_busy.depth);
 168                ret = mdl;
 169                break;
 170        }
 171        spin_unlock(&s->q_busy.lock);
 172
 173        /*
 174         * We found the mdl for which we were looking.  Get it ready for
 175         * the caller to put on q_full or in the dvb ring buffer.
 176         */
 177        if (ret != NULL) {
 178                ret->bytesused = bytesused;
 179                ret->skipped = 0;
 180                /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
 181                cx18_mdl_update_bufs_for_cpu(s, ret);
 182                if (s->type != CX18_ENC_STREAM_TYPE_TS)
 183                        set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
 184        }
 185
 186        /* Put any mdls the firmware is ignoring back into normal rotation */
 187        list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
 188                list_del_init(&mdl->list);
 189                cx18_enqueue(s, mdl, &s->q_free);
 190        }
 191        return ret;
 192}
 193
 194/* Move all mdls of a queue, while flushing the mdl */
 195static void cx18_queue_flush(struct cx18_stream *s,
 196                             struct cx18_queue *q_src, struct cx18_queue *q_dst)
 197{
 198        struct cx18_mdl *mdl;
 199
 200        /* It only makes sense to flush to q_free or q_idle */
 201        if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
 202                return;
 203
 204        spin_lock(&q_src->lock);
 205        spin_lock(&q_dst->lock);
 206        while (!list_empty(&q_src->list)) {
 207                mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
 208                list_move_tail(&mdl->list, &q_dst->list);
 209                mdl->bytesused = 0;
 210                mdl->readpos = 0;
 211                mdl->m_flags = 0;
 212                mdl->skipped = 0;
 213                mdl->curr_buf = NULL;
 214                atomic_inc(&q_dst->depth);
 215        }
 216        cx18_queue_init(q_src);
 217        spin_unlock(&q_src->lock);
 218        spin_unlock(&q_dst->lock);
 219}
 220
 221void cx18_flush_queues(struct cx18_stream *s)
 222{
 223        cx18_queue_flush(s, &s->q_busy, &s->q_free);
 224        cx18_queue_flush(s, &s->q_full, &s->q_free);
 225}
 226
 227/*
 228 * Note, s->buf_pool is not protected by a lock,
 229 * the stream better not have *anything* going on when calling this
 230 */
 231void cx18_unload_queues(struct cx18_stream *s)
 232{
 233        struct cx18_queue *q_idle = &s->q_idle;
 234        struct cx18_mdl *mdl;
 235        struct cx18_buffer *buf;
 236
 237        /* Move all MDLS to q_idle */
 238        cx18_queue_flush(s, &s->q_busy, q_idle);
 239        cx18_queue_flush(s, &s->q_full, q_idle);
 240        cx18_queue_flush(s, &s->q_free, q_idle);
 241
 242        /* Reset MDL id's and move all buffers back to the stream's buf_pool */
 243        spin_lock(&q_idle->lock);
 244        list_for_each_entry(mdl, &q_idle->list, list) {
 245                while (!list_empty(&mdl->buf_list)) {
 246                        buf = list_first_entry(&mdl->buf_list,
 247                                               struct cx18_buffer, list);
 248                        list_move_tail(&buf->list, &s->buf_pool);
 249                        buf->bytesused = 0;
 250                        buf->readpos = 0;
 251                }
 252                mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
 253                /* all other mdl fields were cleared by cx18_queue_flush() */
 254        }
 255        spin_unlock(&q_idle->lock);
 256}
 257
 258/*
 259 * Note, s->buf_pool is not protected by a lock,
 260 * the stream better not have *anything* going on when calling this
 261 */
 262void cx18_load_queues(struct cx18_stream *s)
 263{
 264        struct cx18 *cx = s->cx;
 265        struct cx18_mdl *mdl;
 266        struct cx18_buffer *buf;
 267        int mdl_id;
 268        int i;
 269        u32 partial_buf_size;
 270
 271        /*
 272         * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
 273         * Excess MDLs are left on q_idle
 274         * Excess buffers are left in buf_pool and/or on an MDL in q_idle
 275         */
 276        mdl_id = s->mdl_base_idx;
 277        for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
 278             mdl != NULL && i == s->bufs_per_mdl;
 279             mdl = cx18_dequeue(s, &s->q_idle)) {
 280
 281                mdl->id = mdl_id;
 282
 283                for (i = 0; i < s->bufs_per_mdl; i++) {
 284                        if (list_empty(&s->buf_pool))
 285                                break;
 286
 287                        buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
 288                                               list);
 289                        list_move_tail(&buf->list, &mdl->buf_list);
 290
 291                        /* update the firmware's MDL array with this buffer */
 292                        cx18_writel(cx, buf->dma_handle,
 293                                    &cx->scb->cpu_mdl[mdl_id + i].paddr);
 294                        cx18_writel(cx, s->buf_size,
 295                                    &cx->scb->cpu_mdl[mdl_id + i].length);
 296                }
 297
 298                if (i == s->bufs_per_mdl) {
 299                        /*
 300                         * The encoder doesn't honor s->mdl_size.  So in the
 301                         * case of a non-integral number of buffers to meet
 302                         * mdl_size, we lie about the size of the last buffer
 303                         * in the MDL to get the encoder to really only send
 304                         * us mdl_size bytes per MDL transfer.
 305                         */
 306                        partial_buf_size = s->mdl_size % s->buf_size;
 307                        if (partial_buf_size) {
 308                                cx18_writel(cx, partial_buf_size,
 309                                      &cx->scb->cpu_mdl[mdl_id + i - 1].length);
 310                        }
 311                        cx18_enqueue(s, mdl, &s->q_free);
 312                } else {
 313                        /* Not enough buffers for this MDL; we won't use it */
 314                        cx18_push(s, mdl, &s->q_idle);
 315                }
 316                mdl_id += i;
 317        }
 318}
 319
 320void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
 321{
 322        int dma = s->dma;
 323        u32 buf_size = s->buf_size;
 324        struct pci_dev *pci_dev = s->cx->pci_dev;
 325        struct cx18_buffer *buf;
 326
 327        list_for_each_entry(buf, &mdl->buf_list, list)
 328                dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle,
 329                                           buf_size, dma);
 330}
 331
 332int cx18_stream_alloc(struct cx18_stream *s)
 333{
 334        struct cx18 *cx = s->cx;
 335        int i;
 336
 337        if (s->buffers == 0)
 338                return 0;
 339
 340        CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
 341                s->name, s->buffers, s->buf_size,
 342                s->buffers * s->buf_size / 1024,
 343                (s->buffers * s->buf_size * 100 / 1024) % 100);
 344
 345        if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -
 346                                (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
 347                unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
 348                                        ((char __iomem *)cx->scb->cpu_mdl));
 349
 350                CX18_ERR("Too many buffers, cannot fit in SCB area\n");
 351                CX18_ERR("Max buffers = %zu\n",
 352                        bufsz / sizeof(struct cx18_mdl_ent));
 353                return -ENOMEM;
 354        }
 355
 356        s->mdl_base_idx = cx->free_mdl_idx;
 357
 358        /* allocate stream buffers and MDLs */
 359        for (i = 0; i < s->buffers; i++) {
 360                struct cx18_mdl *mdl;
 361                struct cx18_buffer *buf;
 362
 363                /* 1 MDL per buffer to handle the worst & also default case */
 364                mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
 365                if (mdl == NULL)
 366                        break;
 367
 368                buf = kzalloc(sizeof(struct cx18_buffer),
 369                                GFP_KERNEL|__GFP_NOWARN);
 370                if (buf == NULL) {
 371                        kfree(mdl);
 372                        break;
 373                }
 374
 375                buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
 376                if (buf->buf == NULL) {
 377                        kfree(mdl);
 378                        kfree(buf);
 379                        break;
 380                }
 381
 382                INIT_LIST_HEAD(&mdl->list);
 383                INIT_LIST_HEAD(&mdl->buf_list);
 384                mdl->id = s->mdl_base_idx; /* a somewhat safe value */
 385                cx18_enqueue(s, mdl, &s->q_idle);
 386
 387                INIT_LIST_HEAD(&buf->list);
 388                buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
 389                                                 buf->buf, s->buf_size,
 390                                                 s->dma);
 391                cx18_buf_sync_for_cpu(s, buf);
 392                list_add_tail(&buf->list, &s->buf_pool);
 393        }
 394        if (i == s->buffers) {
 395                cx->free_mdl_idx += s->buffers;
 396                return 0;
 397        }
 398        CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
 399        cx18_stream_free(s);
 400        return -ENOMEM;
 401}
 402
 403void cx18_stream_free(struct cx18_stream *s)
 404{
 405        struct cx18_mdl *mdl;
 406        struct cx18_buffer *buf;
 407        struct cx18 *cx = s->cx;
 408
 409        CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);
 410
 411        /* move all buffers to buf_pool and all MDLs to q_idle */
 412        cx18_unload_queues(s);
 413
 414        /* empty q_idle */
 415        while ((mdl = cx18_dequeue(s, &s->q_idle)))
 416                kfree(mdl);
 417
 418        /* empty buf_pool */
 419        while (!list_empty(&s->buf_pool)) {
 420                buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
 421                list_del_init(&buf->list);
 422
 423                dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle,
 424                                 s->buf_size, s->dma);
 425                kfree(buf->buf);
 426                kfree(buf);
 427        }
 428}
 429