linux/drivers/media/pci/cx18/cx18-queue.c
<<
>>
Prefs
   1/*
   2 *  cx18 buffer queues
   3 *
   4 *  Derived from ivtv-queue.c
   5 *
   6 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
   7 *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
   8 *
   9 *  This program is free software; you can redistribute it and/or modify
  10 *  it under the terms of the GNU General Public License as published by
  11 *  the Free Software Foundation; either version 2 of the License, or
  12 *  (at your option) any later version.
  13 *
  14 *  This program is distributed in the hope that it will be useful,
  15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 *  GNU General Public License for more details.
  18 *
  19 *  You should have received a copy of the GNU General Public License
  20 *  along with this program; if not, write to the Free Software
  21 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  22 *  02111-1307  USA
  23 */
  24
  25#include "cx18-driver.h"
  26#include "cx18-queue.h"
  27#include "cx18-streams.h"
  28#include "cx18-scb.h"
  29#include "cx18-io.h"
  30
  31void cx18_buf_swap(struct cx18_buffer *buf)
  32{
  33        int i;
  34
  35        for (i = 0; i < buf->bytesused; i += 4)
  36                swab32s((u32 *)(buf->buf + i));
  37}
  38
  39void _cx18_mdl_swap(struct cx18_mdl *mdl)
  40{
  41        struct cx18_buffer *buf;
  42
  43        list_for_each_entry(buf, &mdl->buf_list, list) {
  44                if (buf->bytesused == 0)
  45                        break;
  46                cx18_buf_swap(buf);
  47        }
  48}
  49
  50void cx18_queue_init(struct cx18_queue *q)
  51{
  52        INIT_LIST_HEAD(&q->list);
  53        atomic_set(&q->depth, 0);
  54        q->bytesused = 0;
  55}
  56
  57struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
  58                                 struct cx18_queue *q, int to_front)
  59{
  60        /* clear the mdl if it is not to be enqueued to the full queue */
  61        if (q != &s->q_full) {
  62                mdl->bytesused = 0;
  63                mdl->readpos = 0;
  64                mdl->m_flags = 0;
  65                mdl->skipped = 0;
  66                mdl->curr_buf = NULL;
  67        }
  68
  69        /* q_busy is restricted to a max buffer count imposed by firmware */
  70        if (q == &s->q_busy &&
  71            atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
  72                q = &s->q_free;
  73
  74        spin_lock(&q->lock);
  75
  76        if (to_front)
  77                list_add(&mdl->list, &q->list); /* LIFO */
  78        else
  79                list_add_tail(&mdl->list, &q->list); /* FIFO */
  80        q->bytesused += mdl->bytesused - mdl->readpos;
  81        atomic_inc(&q->depth);
  82
  83        spin_unlock(&q->lock);
  84        return q;
  85}
  86
  87struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
  88{
  89        struct cx18_mdl *mdl = NULL;
  90
  91        spin_lock(&q->lock);
  92        if (!list_empty(&q->list)) {
  93                mdl = list_first_entry(&q->list, struct cx18_mdl, list);
  94                list_del_init(&mdl->list);
  95                q->bytesused -= mdl->bytesused - mdl->readpos;
  96                mdl->skipped = 0;
  97                atomic_dec(&q->depth);
  98        }
  99        spin_unlock(&q->lock);
 100        return mdl;
 101}
 102
 103static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
 104                                          struct cx18_mdl *mdl)
 105{
 106        struct cx18_buffer *buf;
 107        u32 buf_size = s->buf_size;
 108        u32 bytesused = mdl->bytesused;
 109
 110        list_for_each_entry(buf, &mdl->buf_list, list) {
 111                buf->readpos = 0;
 112                if (bytesused >= buf_size) {
 113                        buf->bytesused = buf_size;
 114                        bytesused -= buf_size;
 115                } else {
 116                        buf->bytesused = bytesused;
 117                        bytesused = 0;
 118                }
 119                cx18_buf_sync_for_cpu(s, buf);
 120        }
 121}
 122
 123static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
 124                                                struct cx18_mdl *mdl)
 125{
 126        struct cx18_buffer *buf;
 127
 128        if (list_is_singular(&mdl->buf_list)) {
 129                buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
 130                                       list);
 131                buf->bytesused = mdl->bytesused;
 132                buf->readpos = 0;
 133                cx18_buf_sync_for_cpu(s, buf);
 134        } else {
 135                _cx18_mdl_update_bufs_for_cpu(s, mdl);
 136        }
 137}
 138
 139struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
 140        u32 bytesused)
 141{
 142        struct cx18 *cx = s->cx;
 143        struct cx18_mdl *mdl;
 144        struct cx18_mdl *tmp;
 145        struct cx18_mdl *ret = NULL;
 146        LIST_HEAD(sweep_up);
 147
 148        /*
 149         * We don't have to acquire multiple q locks here, because we are
 150         * serialized by the single threaded work handler.
 151         * MDLs from the firmware will thus remain in order as
 152         * they are moved from q_busy to q_full or to the dvb ring buffer.
 153         */
 154        spin_lock(&s->q_busy.lock);
 155        list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
 156                /*
 157                 * We should find what the firmware told us is done,
 158                 * right at the front of the queue.  If we don't, we likely have
 159                 * missed an mdl done message from the firmware.
 160                 * Once we skip an mdl repeatedly, relative to the size of
 161                 * q_busy, we have high confidence we've missed it.
 162                 */
 163                if (mdl->id != id) {
 164                        mdl->skipped++;
 165                        if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
 166                                /* mdl must have fallen out of rotation */
 167                                CX18_WARN("Skipped %s, MDL %d, %d "
 168                                          "times - it must have dropped out of "
 169                                          "rotation\n", s->name, mdl->id,
 170                                          mdl->skipped);
 171                                /* Sweep it up to put it back into rotation */
 172                                list_move_tail(&mdl->list, &sweep_up);
 173                                atomic_dec(&s->q_busy.depth);
 174                        }
 175                        continue;
 176                }
 177                /*
 178                 * We pull the desired mdl off of the queue here.  Something
 179                 * will have to put it back on a queue later.
 180                 */
 181                list_del_init(&mdl->list);
 182                atomic_dec(&s->q_busy.depth);
 183                ret = mdl;
 184                break;
 185        }
 186        spin_unlock(&s->q_busy.lock);
 187
 188        /*
 189         * We found the mdl for which we were looking.  Get it ready for
 190         * the caller to put on q_full or in the dvb ring buffer.
 191         */
 192        if (ret != NULL) {
 193                ret->bytesused = bytesused;
 194                ret->skipped = 0;
 195                /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
 196                cx18_mdl_update_bufs_for_cpu(s, ret);
 197                if (s->type != CX18_ENC_STREAM_TYPE_TS)
 198                        set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
 199        }
 200
 201        /* Put any mdls the firmware is ignoring back into normal rotation */
 202        list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
 203                list_del_init(&mdl->list);
 204                cx18_enqueue(s, mdl, &s->q_free);
 205        }
 206        return ret;
 207}
 208
 209/* Move all mdls of a queue, while flushing the mdl */
 210static void cx18_queue_flush(struct cx18_stream *s,
 211                             struct cx18_queue *q_src, struct cx18_queue *q_dst)
 212{
 213        struct cx18_mdl *mdl;
 214
 215        /* It only makes sense to flush to q_free or q_idle */
 216        if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
 217                return;
 218
 219        spin_lock(&q_src->lock);
 220        spin_lock(&q_dst->lock);
 221        while (!list_empty(&q_src->list)) {
 222                mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
 223                list_move_tail(&mdl->list, &q_dst->list);
 224                mdl->bytesused = 0;
 225                mdl->readpos = 0;
 226                mdl->m_flags = 0;
 227                mdl->skipped = 0;
 228                mdl->curr_buf = NULL;
 229                atomic_inc(&q_dst->depth);
 230        }
 231        cx18_queue_init(q_src);
 232        spin_unlock(&q_src->lock);
 233        spin_unlock(&q_dst->lock);
 234}
 235
 236void cx18_flush_queues(struct cx18_stream *s)
 237{
 238        cx18_queue_flush(s, &s->q_busy, &s->q_free);
 239        cx18_queue_flush(s, &s->q_full, &s->q_free);
 240}
 241
 242/*
 243 * Note, s->buf_pool is not protected by a lock,
 244 * the stream better not have *anything* going on when calling this
 245 */
 246void cx18_unload_queues(struct cx18_stream *s)
 247{
 248        struct cx18_queue *q_idle = &s->q_idle;
 249        struct cx18_mdl *mdl;
 250        struct cx18_buffer *buf;
 251
 252        /* Move all MDLS to q_idle */
 253        cx18_queue_flush(s, &s->q_busy, q_idle);
 254        cx18_queue_flush(s, &s->q_full, q_idle);
 255        cx18_queue_flush(s, &s->q_free, q_idle);
 256
 257        /* Reset MDL id's and move all buffers back to the stream's buf_pool */
 258        spin_lock(&q_idle->lock);
 259        list_for_each_entry(mdl, &q_idle->list, list) {
 260                while (!list_empty(&mdl->buf_list)) {
 261                        buf = list_first_entry(&mdl->buf_list,
 262                                               struct cx18_buffer, list);
 263                        list_move_tail(&buf->list, &s->buf_pool);
 264                        buf->bytesused = 0;
 265                        buf->readpos = 0;
 266                }
 267                mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
 268                /* all other mdl fields were cleared by cx18_queue_flush() */
 269        }
 270        spin_unlock(&q_idle->lock);
 271}
 272
 273/*
 274 * Note, s->buf_pool is not protected by a lock,
 275 * the stream better not have *anything* going on when calling this
 276 */
 277void cx18_load_queues(struct cx18_stream *s)
 278{
 279        struct cx18 *cx = s->cx;
 280        struct cx18_mdl *mdl;
 281        struct cx18_buffer *buf;
 282        int mdl_id;
 283        int i;
 284        u32 partial_buf_size;
 285
 286        /*
 287         * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
 288         * Excess MDLs are left on q_idle
 289         * Excess buffers are left in buf_pool and/or on an MDL in q_idle
 290         */
 291        mdl_id = s->mdl_base_idx;
 292        for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
 293             mdl != NULL && i == s->bufs_per_mdl;
 294             mdl = cx18_dequeue(s, &s->q_idle)) {
 295
 296                mdl->id = mdl_id;
 297
 298                for (i = 0; i < s->bufs_per_mdl; i++) {
 299                        if (list_empty(&s->buf_pool))
 300                                break;
 301
 302                        buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
 303                                               list);
 304                        list_move_tail(&buf->list, &mdl->buf_list);
 305
 306                        /* update the firmware's MDL array with this buffer */
 307                        cx18_writel(cx, buf->dma_handle,
 308                                    &cx->scb->cpu_mdl[mdl_id + i].paddr);
 309                        cx18_writel(cx, s->buf_size,
 310                                    &cx->scb->cpu_mdl[mdl_id + i].length);
 311                }
 312
 313                if (i == s->bufs_per_mdl) {
 314                        /*
 315                         * The encoder doesn't honor s->mdl_size.  So in the
 316                         * case of a non-integral number of buffers to meet
 317                         * mdl_size, we lie about the size of the last buffer
 318                         * in the MDL to get the encoder to really only send
 319                         * us mdl_size bytes per MDL transfer.
 320                         */
 321                        partial_buf_size = s->mdl_size % s->buf_size;
 322                        if (partial_buf_size) {
 323                                cx18_writel(cx, partial_buf_size,
 324                                      &cx->scb->cpu_mdl[mdl_id + i - 1].length);
 325                        }
 326                        cx18_enqueue(s, mdl, &s->q_free);
 327                } else {
 328                        /* Not enough buffers for this MDL; we won't use it */
 329                        cx18_push(s, mdl, &s->q_idle);
 330                }
 331                mdl_id += i;
 332        }
 333}
 334
 335void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
 336{
 337        int dma = s->dma;
 338        u32 buf_size = s->buf_size;
 339        struct pci_dev *pci_dev = s->cx->pci_dev;
 340        struct cx18_buffer *buf;
 341
 342        list_for_each_entry(buf, &mdl->buf_list, list)
 343                pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
 344                                               buf_size, dma);
 345}
 346
 347int cx18_stream_alloc(struct cx18_stream *s)
 348{
 349        struct cx18 *cx = s->cx;
 350        int i;
 351
 352        if (s->buffers == 0)
 353                return 0;
 354
 355        CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
 356                        "(%d.%02d kB total)\n",
 357                s->name, s->buffers, s->buf_size,
 358                s->buffers * s->buf_size / 1024,
 359                (s->buffers * s->buf_size * 100 / 1024) % 100);
 360
 361        if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -
 362                                (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
 363                unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
 364                                        ((char __iomem *)cx->scb->cpu_mdl));
 365
 366                CX18_ERR("Too many buffers, cannot fit in SCB area\n");
 367                CX18_ERR("Max buffers = %zd\n",
 368                        bufsz / sizeof(struct cx18_mdl_ent));
 369                return -ENOMEM;
 370        }
 371
 372        s->mdl_base_idx = cx->free_mdl_idx;
 373
 374        /* allocate stream buffers and MDLs */
 375        for (i = 0; i < s->buffers; i++) {
 376                struct cx18_mdl *mdl;
 377                struct cx18_buffer *buf;
 378
 379                /* 1 MDL per buffer to handle the worst & also default case */
 380                mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
 381                if (mdl == NULL)
 382                        break;
 383
 384                buf = kzalloc(sizeof(struct cx18_buffer),
 385                                GFP_KERNEL|__GFP_NOWARN);
 386                if (buf == NULL) {
 387                        kfree(mdl);
 388                        break;
 389                }
 390
 391                buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
 392                if (buf->buf == NULL) {
 393                        kfree(mdl);
 394                        kfree(buf);
 395                        break;
 396                }
 397
 398                INIT_LIST_HEAD(&mdl->list);
 399                INIT_LIST_HEAD(&mdl->buf_list);
 400                mdl->id = s->mdl_base_idx; /* a somewhat safe value */
 401                cx18_enqueue(s, mdl, &s->q_idle);
 402
 403                INIT_LIST_HEAD(&buf->list);
 404                buf->dma_handle = pci_map_single(s->cx->pci_dev,
 405                                buf->buf, s->buf_size, s->dma);
 406                cx18_buf_sync_for_cpu(s, buf);
 407                list_add_tail(&buf->list, &s->buf_pool);
 408        }
 409        if (i == s->buffers) {
 410                cx->free_mdl_idx += s->buffers;
 411                return 0;
 412        }
 413        CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
 414        cx18_stream_free(s);
 415        return -ENOMEM;
 416}
 417
 418void cx18_stream_free(struct cx18_stream *s)
 419{
 420        struct cx18_mdl *mdl;
 421        struct cx18_buffer *buf;
 422        struct cx18 *cx = s->cx;
 423
 424        CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);
 425
 426        /* move all buffers to buf_pool and all MDLs to q_idle */
 427        cx18_unload_queues(s);
 428
 429        /* empty q_idle */
 430        while ((mdl = cx18_dequeue(s, &s->q_idle)))
 431                kfree(mdl);
 432
 433        /* empty buf_pool */
 434        while (!list_empty(&s->buf_pool)) {
 435                buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
 436                list_del_init(&buf->list);
 437
 438                pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
 439                                s->buf_size, s->dma);
 440                kfree(buf->buf);
 441                kfree(buf);
 442        }
 443}
 444