linux/drivers/media/platform/s5p-mfc/s5p_mfc.c
<<
>>
Prefs
   1/*
   2 * Samsung S5P Multi Format Codec v 5.1
   3 *
   4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   5 * Kamil Debski, <k.debski@samsung.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/clk.h>
  14#include <linux/delay.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/videodev2.h>
  22#include <media/v4l2-event.h>
  23#include <linux/workqueue.h>
  24#include <linux/of.h>
  25#include <media/videobuf2-core.h>
  26#include "s5p_mfc_common.h"
  27#include "s5p_mfc_ctrl.h"
  28#include "s5p_mfc_debug.h"
  29#include "s5p_mfc_dec.h"
  30#include "s5p_mfc_enc.h"
  31#include "s5p_mfc_intr.h"
  32#include "s5p_mfc_opr.h"
  33#include "s5p_mfc_cmd.h"
  34#include "s5p_mfc_pm.h"
  35
  36#define S5P_MFC_NAME            "s5p-mfc"
  37#define S5P_MFC_DEC_NAME        "s5p-mfc-dec"
  38#define S5P_MFC_ENC_NAME        "s5p-mfc-enc"
  39
  40int mfc_debug_level;
  41module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
  42MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
  43
  44/* Helper functions for interrupt processing */
  45
  46/* Remove from hw execution round robin */
  47void clear_work_bit(struct s5p_mfc_ctx *ctx)
  48{
  49        struct s5p_mfc_dev *dev = ctx->dev;
  50
  51        spin_lock(&dev->condlock);
  52        __clear_bit(ctx->num, &dev->ctx_work_bits);
  53        spin_unlock(&dev->condlock);
  54}
  55
  56/* Add to hw execution round robin */
  57void set_work_bit(struct s5p_mfc_ctx *ctx)
  58{
  59        struct s5p_mfc_dev *dev = ctx->dev;
  60
  61        spin_lock(&dev->condlock);
  62        __set_bit(ctx->num, &dev->ctx_work_bits);
  63        spin_unlock(&dev->condlock);
  64}
  65
  66/* Remove from hw execution round robin */
  67void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
  68{
  69        struct s5p_mfc_dev *dev = ctx->dev;
  70        unsigned long flags;
  71
  72        spin_lock_irqsave(&dev->condlock, flags);
  73        __clear_bit(ctx->num, &dev->ctx_work_bits);
  74        spin_unlock_irqrestore(&dev->condlock, flags);
  75}
  76
  77/* Add to hw execution round robin */
  78void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
  79{
  80        struct s5p_mfc_dev *dev = ctx->dev;
  81        unsigned long flags;
  82
  83        spin_lock_irqsave(&dev->condlock, flags);
  84        __set_bit(ctx->num, &dev->ctx_work_bits);
  85        spin_unlock_irqrestore(&dev->condlock, flags);
  86}
  87
  88/* Wake up context wait_queue */
  89static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
  90                        unsigned int err)
  91{
  92        ctx->int_cond = 1;
  93        ctx->int_type = reason;
  94        ctx->int_err = err;
  95        wake_up(&ctx->queue);
  96}
  97
  98/* Wake up device wait_queue */
  99static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
 100                        unsigned int err)
 101{
 102        dev->int_cond = 1;
 103        dev->int_type = reason;
 104        dev->int_err = err;
 105        wake_up(&dev->queue);
 106}
 107
 108static void s5p_mfc_watchdog(unsigned long arg)
 109{
 110        struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
 111
 112        if (test_bit(0, &dev->hw_lock))
 113                atomic_inc(&dev->watchdog_cnt);
 114        if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
 115                /* This means that hw is busy and no interrupts were
 116                 * generated by hw for the Nth time of running this
 117                 * watchdog timer. This usually means a serious hw
 118                 * error. Now it is time to kill all instances and
 119                 * reset the MFC. */
 120                mfc_err("Time out during waiting for HW\n");
 121                queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
 122        }
 123        dev->watchdog_timer.expires = jiffies +
 124                                        msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
 125        add_timer(&dev->watchdog_timer);
 126}
 127
 128static void s5p_mfc_watchdog_worker(struct work_struct *work)
 129{
 130        struct s5p_mfc_dev *dev;
 131        struct s5p_mfc_ctx *ctx;
 132        unsigned long flags;
 133        int mutex_locked;
 134        int i, ret;
 135
 136        dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
 137
 138        mfc_err("Driver timeout error handling\n");
 139        /* Lock the mutex that protects open and release.
 140         * This is necessary as they may load and unload firmware. */
 141        mutex_locked = mutex_trylock(&dev->mfc_mutex);
 142        if (!mutex_locked)
 143                mfc_err("Error: some instance may be closing/opening\n");
 144        spin_lock_irqsave(&dev->irqlock, flags);
 145
 146        s5p_mfc_clock_off();
 147
 148        for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
 149                ctx = dev->ctx[i];
 150                if (!ctx)
 151                        continue;
 152                ctx->state = MFCINST_ERROR;
 153                s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
 154                                                &ctx->dst_queue, &ctx->vq_dst);
 155                s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
 156                                                &ctx->src_queue, &ctx->vq_src);
 157                clear_work_bit(ctx);
 158                wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
 159        }
 160        clear_bit(0, &dev->hw_lock);
 161        spin_unlock_irqrestore(&dev->irqlock, flags);
 162        /* Double check if there is at least one instance running.
 163         * If no instance is in memory than no firmware should be present */
 164        if (dev->num_inst > 0) {
 165                ret = s5p_mfc_load_firmware(dev);
 166                if (ret) {
 167                        mfc_err("Failed to reload FW\n");
 168                        goto unlock;
 169                }
 170                s5p_mfc_clock_on();
 171                ret = s5p_mfc_init_hw(dev);
 172                if (ret)
 173                        mfc_err("Failed to reinit FW\n");
 174        }
 175unlock:
 176        if (mutex_locked)
 177                mutex_unlock(&dev->mfc_mutex);
 178}
 179
 180static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
 181{
 182        mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
 183        mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
 184        mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
 185}
 186
 187static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
 188{
 189        struct s5p_mfc_buf *dst_buf;
 190        struct s5p_mfc_dev *dev = ctx->dev;
 191
 192        ctx->state = MFCINST_FINISHED;
 193        ctx->sequence++;
 194        while (!list_empty(&ctx->dst_queue)) {
 195                dst_buf = list_entry(ctx->dst_queue.next,
 196                                     struct s5p_mfc_buf, list);
 197                mfc_debug(2, "Cleaning up buffer: %d\n",
 198                                          dst_buf->b->v4l2_buf.index);
 199                vb2_set_plane_payload(dst_buf->b, 0, 0);
 200                vb2_set_plane_payload(dst_buf->b, 1, 0);
 201                list_del(&dst_buf->list);
 202                ctx->dst_queue_cnt--;
 203                dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
 204
 205                if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
 206                        s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
 207                        dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
 208                else
 209                        dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
 210
 211                ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
 212                vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
 213        }
 214}
 215
 216static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
 217{
 218        struct s5p_mfc_dev *dev = ctx->dev;
 219        struct s5p_mfc_buf  *dst_buf, *src_buf;
 220        size_t dec_y_addr;
 221        unsigned int frame_type;
 222
 223        dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
 224        frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
 225
 226        /* Copy timestamp / timecode from decoded src to dst and set
 227           appropriate flags */
 228        src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
 229        list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
 230                if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
 231                        dst_buf->b->v4l2_buf.timecode =
 232                                                src_buf->b->v4l2_buf.timecode;
 233                        dst_buf->b->v4l2_buf.timestamp =
 234                                                src_buf->b->v4l2_buf.timestamp;
 235                        dst_buf->b->v4l2_buf.flags &=
 236                                ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 237                        dst_buf->b->v4l2_buf.flags |=
 238                                src_buf->b->v4l2_buf.flags
 239                                & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 240                        switch (frame_type) {
 241                        case S5P_FIMV_DECODE_FRAME_I_FRAME:
 242                                dst_buf->b->v4l2_buf.flags |=
 243                                                V4L2_BUF_FLAG_KEYFRAME;
 244                                break;
 245                        case S5P_FIMV_DECODE_FRAME_P_FRAME:
 246                                dst_buf->b->v4l2_buf.flags |=
 247                                                V4L2_BUF_FLAG_PFRAME;
 248                                break;
 249                        case S5P_FIMV_DECODE_FRAME_B_FRAME:
 250                                dst_buf->b->v4l2_buf.flags |=
 251                                                V4L2_BUF_FLAG_BFRAME;
 252                                break;
 253                        }
 254                        break;
 255                }
 256        }
 257}
 258
 259static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
 260{
 261        struct s5p_mfc_dev *dev = ctx->dev;
 262        struct s5p_mfc_buf  *dst_buf;
 263        size_t dspl_y_addr;
 264        unsigned int frame_type;
 265
 266        dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
 267        if (IS_MFCV6_PLUS(dev))
 268                frame_type = s5p_mfc_hw_call(dev->mfc_ops,
 269                        get_disp_frame_type, ctx);
 270        else
 271                frame_type = s5p_mfc_hw_call(dev->mfc_ops,
 272                        get_dec_frame_type, dev);
 273
 274        /* If frame is same as previous then skip and do not dequeue */
 275        if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
 276                if (!ctx->after_packed_pb)
 277                        ctx->sequence++;
 278                ctx->after_packed_pb = 0;
 279                return;
 280        }
 281        ctx->sequence++;
 282        /* The MFC returns address of the buffer, now we have to
 283         * check which videobuf does it correspond to */
 284        list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
 285                /* Check if this is the buffer we're looking for */
 286                if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
 287                        list_del(&dst_buf->list);
 288                        ctx->dst_queue_cnt--;
 289                        dst_buf->b->v4l2_buf.sequence = ctx->sequence;
 290                        if (s5p_mfc_hw_call(dev->mfc_ops,
 291                                        get_pic_type_top, ctx) ==
 292                                s5p_mfc_hw_call(dev->mfc_ops,
 293                                        get_pic_type_bot, ctx))
 294                                dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
 295                        else
 296                                dst_buf->b->v4l2_buf.field =
 297                                                        V4L2_FIELD_INTERLACED;
 298                        vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
 299                        vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
 300                        clear_bit(dst_buf->b->v4l2_buf.index,
 301                                                        &ctx->dec_dst_flag);
 302
 303                        vb2_buffer_done(dst_buf->b,
 304                                err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
 305
 306                        break;
 307                }
 308        }
 309}
 310
 311/* Handle frame decoding interrupt */
 312static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
 313                                        unsigned int reason, unsigned int err)
 314{
 315        struct s5p_mfc_dev *dev = ctx->dev;
 316        unsigned int dst_frame_status;
 317        unsigned int dec_frame_status;
 318        struct s5p_mfc_buf *src_buf;
 319        unsigned long flags;
 320        unsigned int res_change;
 321
 322        dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
 323                                & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
 324        dec_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dec_status, dev)
 325                                & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
 326        res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
 327                                & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
 328                                >> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
 329        mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
 330        if (ctx->state == MFCINST_RES_CHANGE_INIT)
 331                ctx->state = MFCINST_RES_CHANGE_FLUSH;
 332        if (res_change == S5P_FIMV_RES_INCREASE ||
 333                res_change == S5P_FIMV_RES_DECREASE) {
 334                ctx->state = MFCINST_RES_CHANGE_INIT;
 335                s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 336                wake_up_ctx(ctx, reason, err);
 337                if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 338                        BUG();
 339                s5p_mfc_clock_off();
 340                s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 341                return;
 342        }
 343        if (ctx->dpb_flush_flag)
 344                ctx->dpb_flush_flag = 0;
 345
 346        spin_lock_irqsave(&dev->irqlock, flags);
 347        /* All frames remaining in the buffer have been extracted  */
 348        if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
 349                if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
 350                        static const struct v4l2_event ev_src_ch = {
 351                                .type = V4L2_EVENT_SOURCE_CHANGE,
 352                                .u.src_change.changes =
 353                                        V4L2_EVENT_SRC_CH_RESOLUTION,
 354                        };
 355
 356                        s5p_mfc_handle_frame_all_extracted(ctx);
 357                        ctx->state = MFCINST_RES_CHANGE_END;
 358                        v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
 359
 360                        goto leave_handle_frame;
 361                } else {
 362                        s5p_mfc_handle_frame_all_extracted(ctx);
 363                }
 364        }
 365
 366        if (dec_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY)
 367                s5p_mfc_handle_frame_copy_time(ctx);
 368
 369        /* A frame has been decoded and is in the buffer  */
 370        if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
 371            dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
 372                s5p_mfc_handle_frame_new(ctx, err);
 373        } else {
 374                mfc_debug(2, "No frame decode\n");
 375        }
 376        /* Mark source buffer as complete */
 377        if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
 378                && !list_empty(&ctx->src_queue)) {
 379                src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
 380                                                                list);
 381                ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
 382                                                get_consumed_stream, dev);
 383                if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
 384                        ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
 385                        ctx->consumed_stream + STUFF_BYTE <
 386                        src_buf->b->v4l2_planes[0].bytesused) {
 387                        /* Run MFC again on the same buffer */
 388                        mfc_debug(2, "Running again the same buffer\n");
 389                        ctx->after_packed_pb = 1;
 390                } else {
 391                        mfc_debug(2, "MFC needs next buffer\n");
 392                        ctx->consumed_stream = 0;
 393                        if (src_buf->flags & MFC_BUF_FLAG_EOS)
 394                                ctx->state = MFCINST_FINISHING;
 395                        list_del(&src_buf->list);
 396                        ctx->src_queue_cnt--;
 397                        if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
 398                                vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
 399                        else
 400                                vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
 401                }
 402        }
 403leave_handle_frame:
 404        spin_unlock_irqrestore(&dev->irqlock, flags);
 405        if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
 406                                    || ctx->dst_queue_cnt < ctx->pb_count)
 407                clear_work_bit(ctx);
 408        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 409        wake_up_ctx(ctx, reason, err);
 410        if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 411                BUG();
 412        s5p_mfc_clock_off();
 413        /* if suspending, wake up device and do not try_run again*/
 414        if (test_bit(0, &dev->enter_suspend))
 415                wake_up_dev(dev, reason, err);
 416        else
 417                s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 418}
 419
 420/* Error handling for interrupt */
 421static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
 422                struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
 423{
 424        unsigned long flags;
 425
 426        mfc_err("Interrupt Error: %08x\n", err);
 427
 428        if (ctx != NULL) {
 429                /* Error recovery is dependent on the state of context */
 430                switch (ctx->state) {
 431                case MFCINST_RES_CHANGE_INIT:
 432                case MFCINST_RES_CHANGE_FLUSH:
 433                case MFCINST_RES_CHANGE_END:
 434                case MFCINST_FINISHING:
 435                case MFCINST_FINISHED:
 436                case MFCINST_RUNNING:
 437                        /* It is highly probable that an error occurred
 438                         * while decoding a frame */
 439                        clear_work_bit(ctx);
 440                        ctx->state = MFCINST_ERROR;
 441                        /* Mark all dst buffers as having an error */
 442                        spin_lock_irqsave(&dev->irqlock, flags);
 443                        s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
 444                                                &ctx->dst_queue, &ctx->vq_dst);
 445                        /* Mark all src buffers as having an error */
 446                        s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
 447                                                &ctx->src_queue, &ctx->vq_src);
 448                        spin_unlock_irqrestore(&dev->irqlock, flags);
 449                        wake_up_ctx(ctx, reason, err);
 450                        break;
 451                default:
 452                        clear_work_bit(ctx);
 453                        ctx->state = MFCINST_ERROR;
 454                        wake_up_ctx(ctx, reason, err);
 455                        break;
 456                }
 457        }
 458        if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 459                BUG();
 460        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 461        s5p_mfc_clock_off();
 462        wake_up_dev(dev, reason, err);
 463        return;
 464}
 465
 466/* Header parsing interrupt handling */
 467static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
 468                                 unsigned int reason, unsigned int err)
 469{
 470        struct s5p_mfc_dev *dev;
 471
 472        if (ctx == NULL)
 473                return;
 474        dev = ctx->dev;
 475        if (ctx->c_ops->post_seq_start) {
 476                if (ctx->c_ops->post_seq_start(ctx))
 477                        mfc_err("post_seq_start() failed\n");
 478        } else {
 479                ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
 480                                dev);
 481                ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
 482                                dev);
 483
 484                s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx);
 485
 486                ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
 487                                dev);
 488                ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
 489                                dev);
 490                if (ctx->img_width == 0 || ctx->img_height == 0)
 491                        ctx->state = MFCINST_ERROR;
 492                else
 493                        ctx->state = MFCINST_HEAD_PARSED;
 494
 495                if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
 496                        ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
 497                                !list_empty(&ctx->src_queue)) {
 498                        struct s5p_mfc_buf *src_buf;
 499                        src_buf = list_entry(ctx->src_queue.next,
 500                                        struct s5p_mfc_buf, list);
 501                        if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
 502                                                dev) <
 503                                        src_buf->b->v4l2_planes[0].bytesused)
 504                                ctx->head_processed = 0;
 505                        else
 506                                ctx->head_processed = 1;
 507                } else {
 508                        ctx->head_processed = 1;
 509                }
 510        }
 511        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 512        clear_work_bit(ctx);
 513        if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 514                BUG();
 515        s5p_mfc_clock_off();
 516        s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 517        wake_up_ctx(ctx, reason, err);
 518}
 519
 520/* Header parsing interrupt handling */
 521static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
 522                                 unsigned int reason, unsigned int err)
 523{
 524        struct s5p_mfc_buf *src_buf;
 525        struct s5p_mfc_dev *dev;
 526        unsigned long flags;
 527
 528        if (ctx == NULL)
 529                return;
 530        dev = ctx->dev;
 531        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 532        ctx->int_type = reason;
 533        ctx->int_err = err;
 534        ctx->int_cond = 1;
 535        clear_work_bit(ctx);
 536        if (err == 0) {
 537                ctx->state = MFCINST_RUNNING;
 538                if (!ctx->dpb_flush_flag && ctx->head_processed) {
 539                        spin_lock_irqsave(&dev->irqlock, flags);
 540                        if (!list_empty(&ctx->src_queue)) {
 541                                src_buf = list_entry(ctx->src_queue.next,
 542                                             struct s5p_mfc_buf, list);
 543                                list_del(&src_buf->list);
 544                                ctx->src_queue_cnt--;
 545                                vb2_buffer_done(src_buf->b,
 546                                                VB2_BUF_STATE_DONE);
 547                        }
 548                        spin_unlock_irqrestore(&dev->irqlock, flags);
 549                } else {
 550                        ctx->dpb_flush_flag = 0;
 551                }
 552                if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 553                        BUG();
 554
 555                s5p_mfc_clock_off();
 556
 557                wake_up(&ctx->queue);
 558                s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 559        } else {
 560                if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 561                        BUG();
 562
 563                s5p_mfc_clock_off();
 564
 565                wake_up(&ctx->queue);
 566        }
 567}
 568
 569static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
 570                                 unsigned int reason, unsigned int err)
 571{
 572        struct s5p_mfc_dev *dev = ctx->dev;
 573        struct s5p_mfc_buf *mb_entry;
 574
 575        mfc_debug(2, "Stream completed\n");
 576
 577        s5p_mfc_clear_int_flags(dev);
 578        ctx->int_type = reason;
 579        ctx->int_err = err;
 580        ctx->state = MFCINST_FINISHED;
 581
 582        spin_lock(&dev->irqlock);
 583        if (!list_empty(&ctx->dst_queue)) {
 584                mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
 585                                                                        list);
 586                list_del(&mb_entry->list);
 587                ctx->dst_queue_cnt--;
 588                vb2_set_plane_payload(mb_entry->b, 0, 0);
 589                vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
 590        }
 591        spin_unlock(&dev->irqlock);
 592
 593        clear_work_bit(ctx);
 594
 595        WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
 596
 597        s5p_mfc_clock_off();
 598        wake_up(&ctx->queue);
 599        s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 600}
 601
 602/* Interrupt processing */
 603static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 604{
 605        struct s5p_mfc_dev *dev = priv;
 606        struct s5p_mfc_ctx *ctx;
 607        unsigned int reason;
 608        unsigned int err;
 609
 610        mfc_debug_enter();
 611        /* Reset the timeout watchdog */
 612        atomic_set(&dev->watchdog_cnt, 0);
 613        ctx = dev->ctx[dev->curr_ctx];
 614        /* Get the reason of interrupt and the error code */
 615        reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
 616        err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
 617        mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
 618        switch (reason) {
 619        case S5P_MFC_R2H_CMD_ERR_RET:
 620                /* An error has occurred */
 621                if (ctx->state == MFCINST_RUNNING &&
 622                        s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
 623                                dev->warn_start)
 624                        s5p_mfc_handle_frame(ctx, reason, err);
 625                else
 626                        s5p_mfc_handle_error(dev, ctx, reason, err);
 627                clear_bit(0, &dev->enter_suspend);
 628                break;
 629
 630        case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
 631        case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
 632        case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
 633                if (ctx->c_ops->post_frame_start) {
 634                        if (ctx->c_ops->post_frame_start(ctx))
 635                                mfc_err("post_frame_start() failed\n");
 636                        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 637                        wake_up_ctx(ctx, reason, err);
 638                        if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 639                                BUG();
 640                        s5p_mfc_clock_off();
 641                        s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 642                } else {
 643                        s5p_mfc_handle_frame(ctx, reason, err);
 644                }
 645                break;
 646
 647        case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
 648                s5p_mfc_handle_seq_done(ctx, reason, err);
 649                break;
 650
 651        case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
 652                ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
 653                ctx->state = MFCINST_GOT_INST;
 654                clear_work_bit(ctx);
 655                wake_up(&ctx->queue);
 656                goto irq_cleanup_hw;
 657
 658        case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
 659                clear_work_bit(ctx);
 660                ctx->inst_no = MFC_NO_INSTANCE_SET;
 661                ctx->state = MFCINST_FREE;
 662                wake_up(&ctx->queue);
 663                goto irq_cleanup_hw;
 664
 665        case S5P_MFC_R2H_CMD_SYS_INIT_RET:
 666        case S5P_MFC_R2H_CMD_FW_STATUS_RET:
 667        case S5P_MFC_R2H_CMD_SLEEP_RET:
 668        case S5P_MFC_R2H_CMD_WAKEUP_RET:
 669                if (ctx)
 670                        clear_work_bit(ctx);
 671                s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 672                wake_up_dev(dev, reason, err);
 673                clear_bit(0, &dev->hw_lock);
 674                clear_bit(0, &dev->enter_suspend);
 675                break;
 676
 677        case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
 678                s5p_mfc_handle_init_buffers(ctx, reason, err);
 679                break;
 680
 681        case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
 682                s5p_mfc_handle_stream_complete(ctx, reason, err);
 683                break;
 684
 685        case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
 686                clear_work_bit(ctx);
 687                ctx->state = MFCINST_RUNNING;
 688                wake_up(&ctx->queue);
 689                goto irq_cleanup_hw;
 690
 691        default:
 692                mfc_debug(2, "Unknown int reason\n");
 693                s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 694        }
 695        mfc_debug_leave();
 696        return IRQ_HANDLED;
 697irq_cleanup_hw:
 698        s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
 699        ctx->int_type = reason;
 700        ctx->int_err = err;
 701        ctx->int_cond = 1;
 702        if (test_and_clear_bit(0, &dev->hw_lock) == 0)
 703                mfc_err("Failed to unlock hw\n");
 704
 705        s5p_mfc_clock_off();
 706
 707        s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
 708        mfc_debug(2, "Exit via irq_cleanup_hw\n");
 709        return IRQ_HANDLED;
 710}
 711
 712/* Open an MFC node */
 713static int s5p_mfc_open(struct file *file)
 714{
 715        struct video_device *vdev = video_devdata(file);
 716        struct s5p_mfc_dev *dev = video_drvdata(file);
 717        struct s5p_mfc_ctx *ctx = NULL;
 718        struct vb2_queue *q;
 719        int ret = 0;
 720
 721        mfc_debug_enter();
 722        if (mutex_lock_interruptible(&dev->mfc_mutex))
 723                return -ERESTARTSYS;
 724        dev->num_inst++;        /* It is guarded by mfc_mutex in vfd */
 725        /* Allocate memory for context */
 726        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 727        if (!ctx) {
 728                mfc_err("Not enough memory\n");
 729                ret = -ENOMEM;
 730                goto err_alloc;
 731        }
 732        v4l2_fh_init(&ctx->fh, vdev);
 733        file->private_data = &ctx->fh;
 734        v4l2_fh_add(&ctx->fh);
 735        ctx->dev = dev;
 736        INIT_LIST_HEAD(&ctx->src_queue);
 737        INIT_LIST_HEAD(&ctx->dst_queue);
 738        ctx->src_queue_cnt = 0;
 739        ctx->dst_queue_cnt = 0;
 740        /* Get context number */
 741        ctx->num = 0;
 742        while (dev->ctx[ctx->num]) {
 743                ctx->num++;
 744                if (ctx->num >= MFC_NUM_CONTEXTS) {
 745                        mfc_err("Too many open contexts\n");
 746                        ret = -EBUSY;
 747                        goto err_no_ctx;
 748                }
 749        }
 750        /* Mark context as idle */
 751        clear_work_bit_irqsave(ctx);
 752        dev->ctx[ctx->num] = ctx;
 753        if (vdev == dev->vfd_dec) {
 754                ctx->type = MFCINST_DECODER;
 755                ctx->c_ops = get_dec_codec_ops();
 756                s5p_mfc_dec_init(ctx);
 757                /* Setup ctrl handler */
 758                ret = s5p_mfc_dec_ctrls_setup(ctx);
 759                if (ret) {
 760                        mfc_err("Failed to setup mfc controls\n");
 761                        goto err_ctrls_setup;
 762                }
 763        } else if (vdev == dev->vfd_enc) {
 764                ctx->type = MFCINST_ENCODER;
 765                ctx->c_ops = get_enc_codec_ops();
 766                /* only for encoder */
 767                INIT_LIST_HEAD(&ctx->ref_queue);
 768                ctx->ref_queue_cnt = 0;
 769                s5p_mfc_enc_init(ctx);
 770                /* Setup ctrl handler */
 771                ret = s5p_mfc_enc_ctrls_setup(ctx);
 772                if (ret) {
 773                        mfc_err("Failed to setup mfc controls\n");
 774                        goto err_ctrls_setup;
 775                }
 776        } else {
 777                ret = -ENOENT;
 778                goto err_bad_node;
 779        }
 780        ctx->fh.ctrl_handler = &ctx->ctrl_handler;
 781        ctx->inst_no = MFC_NO_INSTANCE_SET;
 782        /* Load firmware if this is the first instance */
 783        if (dev->num_inst == 1) {
 784                dev->watchdog_timer.expires = jiffies +
 785                                        msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
 786                add_timer(&dev->watchdog_timer);
 787                ret = s5p_mfc_power_on();
 788                if (ret < 0) {
 789                        mfc_err("power on failed\n");
 790                        goto err_pwr_enable;
 791                }
 792                s5p_mfc_clock_on();
 793                ret = s5p_mfc_load_firmware(dev);
 794                if (ret) {
 795                        s5p_mfc_clock_off();
 796                        goto err_load_fw;
 797                }
 798                /* Init the FW */
 799                ret = s5p_mfc_init_hw(dev);
 800                s5p_mfc_clock_off();
 801                if (ret)
 802                        goto err_init_hw;
 803        }
 804        /* Init videobuf2 queue for CAPTURE */
 805        q = &ctx->vq_dst;
 806        q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
 807        q->drv_priv = &ctx->fh;
 808        if (vdev == dev->vfd_dec) {
 809                q->io_modes = VB2_MMAP;
 810                q->ops = get_dec_queue_ops();
 811        } else if (vdev == dev->vfd_enc) {
 812                q->io_modes = VB2_MMAP | VB2_USERPTR;
 813                q->ops = get_enc_queue_ops();
 814        } else {
 815                ret = -ENOENT;
 816                goto err_queue_init;
 817        }
 818        q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
 819        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 820        ret = vb2_queue_init(q);
 821        if (ret) {
 822                mfc_err("Failed to initialize videobuf2 queue(capture)\n");
 823                goto err_queue_init;
 824        }
 825        /* Init videobuf2 queue for OUTPUT */
 826        q = &ctx->vq_src;
 827        q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
 828        q->io_modes = VB2_MMAP;
 829        q->drv_priv = &ctx->fh;
 830        if (vdev == dev->vfd_dec) {
 831                q->io_modes = VB2_MMAP;
 832                q->ops = get_dec_queue_ops();
 833        } else if (vdev == dev->vfd_enc) {
 834                q->io_modes = VB2_MMAP | VB2_USERPTR;
 835                q->ops = get_enc_queue_ops();
 836        } else {
 837                ret = -ENOENT;
 838                goto err_queue_init;
 839        }
 840        q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
 841        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 842        ret = vb2_queue_init(q);
 843        if (ret) {
 844                mfc_err("Failed to initialize videobuf2 queue(output)\n");
 845                goto err_queue_init;
 846        }
 847        init_waitqueue_head(&ctx->queue);
 848        mutex_unlock(&dev->mfc_mutex);
 849        mfc_debug_leave();
 850        return ret;
 851        /* Deinit when failure occurred */
 852err_queue_init:
 853        if (dev->num_inst == 1)
 854                s5p_mfc_deinit_hw(dev);
 855err_init_hw:
 856err_load_fw:
 857err_pwr_enable:
 858        if (dev->num_inst == 1) {
 859                if (s5p_mfc_power_off() < 0)
 860                        mfc_err("power off failed\n");
 861                del_timer_sync(&dev->watchdog_timer);
 862        }
 863err_ctrls_setup:
 864        s5p_mfc_dec_ctrls_delete(ctx);
 865err_bad_node:
 866        dev->ctx[ctx->num] = NULL;
 867err_no_ctx:
 868        v4l2_fh_del(&ctx->fh);
 869        v4l2_fh_exit(&ctx->fh);
 870        kfree(ctx);
 871err_alloc:
 872        dev->num_inst--;
 873        mutex_unlock(&dev->mfc_mutex);
 874        mfc_debug_leave();
 875        return ret;
 876}
 877
 878/* Release MFC context */
 879static int s5p_mfc_release(struct file *file)
 880{
 881        struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
 882        struct s5p_mfc_dev *dev = ctx->dev;
 883
 884        mfc_debug_enter();
 885        mutex_lock(&dev->mfc_mutex);
 886        s5p_mfc_clock_on();
 887        vb2_queue_release(&ctx->vq_src);
 888        vb2_queue_release(&ctx->vq_dst);
 889        /* Mark context as idle */
 890        clear_work_bit_irqsave(ctx);
 891        /* If instance was initialised and not yet freed,
 892         * return instance and free resources */
 893        if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
 894                mfc_debug(2, "Has to free instance\n");
 895                s5p_mfc_close_mfc_inst(dev, ctx);
 896        }
 897        /* hardware locking scheme */
 898        if (dev->curr_ctx == ctx->num)
 899                clear_bit(0, &dev->hw_lock);
 900        dev->num_inst--;
 901        if (dev->num_inst == 0) {
 902                mfc_debug(2, "Last instance\n");
 903                s5p_mfc_deinit_hw(dev);
 904                del_timer_sync(&dev->watchdog_timer);
 905                if (s5p_mfc_power_off() < 0)
 906                        mfc_err("Power off failed\n");
 907        }
 908        mfc_debug(2, "Shutting down clock\n");
 909        s5p_mfc_clock_off();
 910        dev->ctx[ctx->num] = NULL;
 911        s5p_mfc_dec_ctrls_delete(ctx);
 912        v4l2_fh_del(&ctx->fh);
 913        v4l2_fh_exit(&ctx->fh);
 914        kfree(ctx);
 915        mfc_debug_leave();
 916        mutex_unlock(&dev->mfc_mutex);
 917        return 0;
 918}
 919
 920/* Poll */
 921static unsigned int s5p_mfc_poll(struct file *file,
 922                                 struct poll_table_struct *wait)
 923{
 924        struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
 925        struct s5p_mfc_dev *dev = ctx->dev;
 926        struct vb2_queue *src_q, *dst_q;
 927        struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
 928        unsigned int rc = 0;
 929        unsigned long flags;
 930
 931        mutex_lock(&dev->mfc_mutex);
 932        src_q = &ctx->vq_src;
 933        dst_q = &ctx->vq_dst;
 934        /*
 935         * There has to be at least one buffer queued on each queued_list, which
 936         * means either in driver already or waiting for driver to claim it
 937         * and start processing.
 938         */
 939        if ((!src_q->streaming || list_empty(&src_q->queued_list))
 940                && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
 941                rc = POLLERR;
 942                goto end;
 943        }
 944        mutex_unlock(&dev->mfc_mutex);
 945        poll_wait(file, &ctx->fh.wait, wait);
 946        poll_wait(file, &src_q->done_wq, wait);
 947        poll_wait(file, &dst_q->done_wq, wait);
 948        mutex_lock(&dev->mfc_mutex);
 949        if (v4l2_event_pending(&ctx->fh))
 950                rc |= POLLPRI;
 951        spin_lock_irqsave(&src_q->done_lock, flags);
 952        if (!list_empty(&src_q->done_list))
 953                src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
 954                                                                done_entry);
 955        if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
 956                                || src_vb->state == VB2_BUF_STATE_ERROR))
 957                rc |= POLLOUT | POLLWRNORM;
 958        spin_unlock_irqrestore(&src_q->done_lock, flags);
 959        spin_lock_irqsave(&dst_q->done_lock, flags);
 960        if (!list_empty(&dst_q->done_list))
 961                dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
 962                                                                done_entry);
 963        if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
 964                                || dst_vb->state == VB2_BUF_STATE_ERROR))
 965                rc |= POLLIN | POLLRDNORM;
 966        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 967end:
 968        mutex_unlock(&dev->mfc_mutex);
 969        return rc;
 970}
 971
 972/* Mmap */
 973static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
 974{
 975        struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
 976        struct s5p_mfc_dev *dev = ctx->dev;
 977        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 978        int ret;
 979
 980        if (mutex_lock_interruptible(&dev->mfc_mutex))
 981                return -ERESTARTSYS;
 982        if (offset < DST_QUEUE_OFF_BASE) {
 983                mfc_debug(2, "mmaping source\n");
 984                ret = vb2_mmap(&ctx->vq_src, vma);
 985        } else {                /* capture */
 986                mfc_debug(2, "mmaping destination\n");
 987                vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
 988                ret = vb2_mmap(&ctx->vq_dst, vma);
 989        }
 990        mutex_unlock(&dev->mfc_mutex);
 991        return ret;
 992}
 993
 994/* v4l2 ops */
 995static const struct v4l2_file_operations s5p_mfc_fops = {
 996        .owner = THIS_MODULE,
 997        .open = s5p_mfc_open,
 998        .release = s5p_mfc_release,
 999        .poll = s5p_mfc_poll,
1000        .unlocked_ioctl = video_ioctl2,
1001        .mmap = s5p_mfc_mmap,
1002};
1003
1004static int match_child(struct device *dev, void *data)
1005{
1006        if (!dev_name(dev))
1007                return 0;
1008        return !strcmp(dev_name(dev), (char *)data);
1009}
1010
1011static void *mfc_get_drv_data(struct platform_device *pdev);
1012
1013static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1014{
1015        unsigned int mem_info[2] = { };
1016
1017        dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
1018                        sizeof(struct device), GFP_KERNEL);
1019        if (!dev->mem_dev_l) {
1020                mfc_err("Not enough memory\n");
1021                return -ENOMEM;
1022        }
1023        device_initialize(dev->mem_dev_l);
1024        of_property_read_u32_array(dev->plat_dev->dev.of_node,
1025                        "samsung,mfc-l", mem_info, 2);
1026        if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
1027                                mem_info[0], mem_info[1],
1028                                DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1029                mfc_err("Failed to declare coherent memory for\n"
1030                "MFC device\n");
1031                return -ENOMEM;
1032        }
1033
1034        dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
1035                        sizeof(struct device), GFP_KERNEL);
1036        if (!dev->mem_dev_r) {
1037                mfc_err("Not enough memory\n");
1038                return -ENOMEM;
1039        }
1040        device_initialize(dev->mem_dev_r);
1041        of_property_read_u32_array(dev->plat_dev->dev.of_node,
1042                        "samsung,mfc-r", mem_info, 2);
1043        if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
1044                                mem_info[0], mem_info[1],
1045                                DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1046                pr_err("Failed to declare coherent memory for\n"
1047                "MFC device\n");
1048                return -ENOMEM;
1049        }
1050        return 0;
1051}
1052
1053/* MFC probe function */
1054static int s5p_mfc_probe(struct platform_device *pdev)
1055{
1056        struct s5p_mfc_dev *dev;
1057        struct video_device *vfd;
1058        struct resource *res;
1059        int ret;
1060
1061        pr_debug("%s++\n", __func__);
1062        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1063        if (!dev) {
1064                dev_err(&pdev->dev, "Not enough memory for MFC device\n");
1065                return -ENOMEM;
1066        }
1067
1068        spin_lock_init(&dev->irqlock);
1069        spin_lock_init(&dev->condlock);
1070        dev->plat_dev = pdev;
1071        if (!dev->plat_dev) {
1072                dev_err(&pdev->dev, "No platform data specified\n");
1073                return -ENODEV;
1074        }
1075
1076        dev->variant = mfc_get_drv_data(pdev);
1077
1078        ret = s5p_mfc_init_pm(dev);
1079        if (ret < 0) {
1080                dev_err(&pdev->dev, "failed to get mfc clock source\n");
1081                return ret;
1082        }
1083
1084        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1085
1086        dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1087        if (IS_ERR(dev->regs_base))
1088                return PTR_ERR(dev->regs_base);
1089
1090        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1091        if (res == NULL) {
1092                dev_err(&pdev->dev, "failed to get irq resource\n");
1093                ret = -ENOENT;
1094                goto err_res;
1095        }
1096        dev->irq = res->start;
1097        ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
1098                                        0, pdev->name, dev);
1099        if (ret) {
1100                dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1101                goto err_res;
1102        }
1103
1104        if (pdev->dev.of_node) {
1105                ret = s5p_mfc_alloc_memdevs(dev);
1106                if (ret < 0)
1107                        goto err_res;
1108        } else {
1109                dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
1110                                "s5p-mfc-l", match_child);
1111                if (!dev->mem_dev_l) {
1112                        mfc_err("Mem child (L) device get failed\n");
1113                        ret = -ENODEV;
1114                        goto err_res;
1115                }
1116                dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
1117                                "s5p-mfc-r", match_child);
1118                if (!dev->mem_dev_r) {
1119                        mfc_err("Mem child (R) device get failed\n");
1120                        ret = -ENODEV;
1121                        goto err_res;
1122                }
1123        }
1124
1125        dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
1126        if (IS_ERR(dev->alloc_ctx[0])) {
1127                ret = PTR_ERR(dev->alloc_ctx[0]);
1128                goto err_res;
1129        }
1130        dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
1131        if (IS_ERR(dev->alloc_ctx[1])) {
1132                ret = PTR_ERR(dev->alloc_ctx[1]);
1133                goto err_mem_init_ctx_1;
1134        }
1135
1136        mutex_init(&dev->mfc_mutex);
1137
1138        ret = s5p_mfc_alloc_firmware(dev);
1139        if (ret)
1140                goto err_alloc_fw;
1141
1142        ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1143        if (ret)
1144                goto err_v4l2_dev_reg;
1145        init_waitqueue_head(&dev->queue);
1146
1147        /* decoder */
1148        vfd = video_device_alloc();
1149        if (!vfd) {
1150                v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1151                ret = -ENOMEM;
1152                goto err_dec_alloc;
1153        }
1154        vfd->fops       = &s5p_mfc_fops;
1155        vfd->ioctl_ops  = get_dec_v4l2_ioctl_ops();
1156        vfd->release    = video_device_release;
1157        vfd->lock       = &dev->mfc_mutex;
1158        vfd->v4l2_dev   = &dev->v4l2_dev;
1159        vfd->vfl_dir    = VFL_DIR_M2M;
1160        snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
1161        dev->vfd_dec    = vfd;
1162        ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1163        if (ret) {
1164                v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1165                video_device_release(vfd);
1166                goto err_dec_reg;
1167        }
1168        v4l2_info(&dev->v4l2_dev,
1169                  "decoder registered as /dev/video%d\n", vfd->num);
1170        video_set_drvdata(vfd, dev);
1171
1172        /* encoder */
1173        vfd = video_device_alloc();
1174        if (!vfd) {
1175                v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1176                ret = -ENOMEM;
1177                goto err_enc_alloc;
1178        }
1179        vfd->fops       = &s5p_mfc_fops;
1180        vfd->ioctl_ops  = get_enc_v4l2_ioctl_ops();
1181        vfd->release    = video_device_release;
1182        vfd->lock       = &dev->mfc_mutex;
1183        vfd->v4l2_dev   = &dev->v4l2_dev;
1184        vfd->vfl_dir    = VFL_DIR_M2M;
1185        snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1186        dev->vfd_enc    = vfd;
1187        ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1188        if (ret) {
1189                v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1190                video_device_release(vfd);
1191                goto err_enc_reg;
1192        }
1193        v4l2_info(&dev->v4l2_dev,
1194                  "encoder registered as /dev/video%d\n", vfd->num);
1195        video_set_drvdata(vfd, dev);
1196        platform_set_drvdata(pdev, dev);
1197
1198        dev->hw_lock = 0;
1199        dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1200        INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1201        atomic_set(&dev->watchdog_cnt, 0);
1202        init_timer(&dev->watchdog_timer);
1203        dev->watchdog_timer.data = (unsigned long)dev;
1204        dev->watchdog_timer.function = s5p_mfc_watchdog;
1205
1206        /* Initialize HW ops and commands based on MFC version */
1207        s5p_mfc_init_hw_ops(dev);
1208        s5p_mfc_init_hw_cmds(dev);
1209        s5p_mfc_init_regs(dev);
1210
1211        pr_debug("%s--\n", __func__);
1212        return 0;
1213
1214/* Deinit MFC if probe had failed */
1215err_enc_reg:
1216        video_device_release(dev->vfd_enc);
1217err_enc_alloc:
1218        video_unregister_device(dev->vfd_dec);
1219err_dec_reg:
1220        video_device_release(dev->vfd_dec);
1221err_dec_alloc:
1222        v4l2_device_unregister(&dev->v4l2_dev);
1223err_v4l2_dev_reg:
1224        s5p_mfc_release_firmware(dev);
1225err_alloc_fw:
1226        vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1227err_mem_init_ctx_1:
1228        vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1229err_res:
1230        s5p_mfc_final_pm(dev);
1231
1232        pr_debug("%s-- with error\n", __func__);
1233        return ret;
1234
1235}
1236
1237/* Remove the driver */
1238static int s5p_mfc_remove(struct platform_device *pdev)
1239{
1240        struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1241
1242        v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1243
1244        del_timer_sync(&dev->watchdog_timer);
1245        flush_workqueue(dev->watchdog_workqueue);
1246        destroy_workqueue(dev->watchdog_workqueue);
1247
1248        video_unregister_device(dev->vfd_enc);
1249        video_unregister_device(dev->vfd_dec);
1250        v4l2_device_unregister(&dev->v4l2_dev);
1251        s5p_mfc_release_firmware(dev);
1252        vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1253        vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1254        if (pdev->dev.of_node) {
1255                put_device(dev->mem_dev_l);
1256                put_device(dev->mem_dev_r);
1257        }
1258
1259        s5p_mfc_final_pm(dev);
1260        return 0;
1261}
1262
1263#ifdef CONFIG_PM_SLEEP
1264
1265static int s5p_mfc_suspend(struct device *dev)
1266{
1267        struct platform_device *pdev = to_platform_device(dev);
1268        struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1269        int ret;
1270
1271        if (m_dev->num_inst == 0)
1272                return 0;
1273
1274        if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
1275                mfc_err("Error: going to suspend for a second time\n");
1276                return -EIO;
1277        }
1278
1279        /* Check if we're processing then wait if it necessary. */
1280        while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
1281                /* Try and lock the HW */
1282                /* Wait on the interrupt waitqueue */
1283                ret = wait_event_interruptible_timeout(m_dev->queue,
1284                        m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
1285                if (ret == 0) {
1286                        mfc_err("Waiting for hardware to finish timed out\n");
1287                        return -EIO;
1288                }
1289        }
1290
1291        return s5p_mfc_sleep(m_dev);
1292}
1293
1294static int s5p_mfc_resume(struct device *dev)
1295{
1296        struct platform_device *pdev = to_platform_device(dev);
1297        struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1298
1299        if (m_dev->num_inst == 0)
1300                return 0;
1301        return s5p_mfc_wakeup(m_dev);
1302}
1303#endif
1304
1305#ifdef CONFIG_PM_RUNTIME
1306static int s5p_mfc_runtime_suspend(struct device *dev)
1307{
1308        struct platform_device *pdev = to_platform_device(dev);
1309        struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1310
1311        atomic_set(&m_dev->pm.power, 0);
1312        return 0;
1313}
1314
1315static int s5p_mfc_runtime_resume(struct device *dev)
1316{
1317        struct platform_device *pdev = to_platform_device(dev);
1318        struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1319
1320        if (!m_dev->alloc_ctx)
1321                return 0;
1322        atomic_set(&m_dev->pm.power, 1);
1323        return 0;
1324}
1325#endif
1326
1327/* Power management */
1328static const struct dev_pm_ops s5p_mfc_pm_ops = {
1329        SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
1330        SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
1331                           NULL)
1332};
1333
1334static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
1335        .h264_ctx       = MFC_H264_CTX_BUF_SIZE,
1336        .non_h264_ctx   = MFC_CTX_BUF_SIZE,
1337        .dsc            = DESC_BUF_SIZE,
1338        .shm            = SHARED_BUF_SIZE,
1339};
1340
1341static struct s5p_mfc_buf_size buf_size_v5 = {
1342        .fw     = MAX_FW_SIZE,
1343        .cpb    = MAX_CPB_SIZE,
1344        .priv   = &mfc_buf_size_v5,
1345};
1346
1347static struct s5p_mfc_buf_align mfc_buf_align_v5 = {
1348        .base = MFC_BASE_ALIGN_ORDER,
1349};
1350
1351static struct s5p_mfc_variant mfc_drvdata_v5 = {
1352        .version        = MFC_VERSION,
1353        .version_bit    = MFC_V5_BIT,
1354        .port_num       = MFC_NUM_PORTS,
1355        .buf_size       = &buf_size_v5,
1356        .buf_align      = &mfc_buf_align_v5,
1357        .fw_name[0]     = "s5p-mfc.fw",
1358};
1359
1360static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
1361        .dev_ctx        = MFC_CTX_BUF_SIZE_V6,
1362        .h264_dec_ctx   = MFC_H264_DEC_CTX_BUF_SIZE_V6,
1363        .other_dec_ctx  = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
1364        .h264_enc_ctx   = MFC_H264_ENC_CTX_BUF_SIZE_V6,
1365        .other_enc_ctx  = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
1366};
1367
1368static struct s5p_mfc_buf_size buf_size_v6 = {
1369        .fw     = MAX_FW_SIZE_V6,
1370        .cpb    = MAX_CPB_SIZE_V6,
1371        .priv   = &mfc_buf_size_v6,
1372};
1373
1374static struct s5p_mfc_buf_align mfc_buf_align_v6 = {
1375        .base = 0,
1376};
1377
1378static struct s5p_mfc_variant mfc_drvdata_v6 = {
1379        .version        = MFC_VERSION_V6,
1380        .version_bit    = MFC_V6_BIT,
1381        .port_num       = MFC_NUM_PORTS_V6,
1382        .buf_size       = &buf_size_v6,
1383        .buf_align      = &mfc_buf_align_v6,
1384        .fw_name[0]     = "s5p-mfc-v6.fw",
1385        /*
1386         * v6-v2 firmware contains bug fixes and interface change
1387         * for init buffer command
1388         */
1389        .fw_name[1]     = "s5p-mfc-v6-v2.fw",
1390};
1391
1392static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
1393        .dev_ctx        = MFC_CTX_BUF_SIZE_V7,
1394        .h264_dec_ctx   = MFC_H264_DEC_CTX_BUF_SIZE_V7,
1395        .other_dec_ctx  = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
1396        .h264_enc_ctx   = MFC_H264_ENC_CTX_BUF_SIZE_V7,
1397        .other_enc_ctx  = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
1398};
1399
1400static struct s5p_mfc_buf_size buf_size_v7 = {
1401        .fw     = MAX_FW_SIZE_V7,
1402        .cpb    = MAX_CPB_SIZE_V7,
1403        .priv   = &mfc_buf_size_v7,
1404};
1405
1406static struct s5p_mfc_buf_align mfc_buf_align_v7 = {
1407        .base = 0,
1408};
1409
1410static struct s5p_mfc_variant mfc_drvdata_v7 = {
1411        .version        = MFC_VERSION_V7,
1412        .version_bit    = MFC_V7_BIT,
1413        .port_num       = MFC_NUM_PORTS_V7,
1414        .buf_size       = &buf_size_v7,
1415        .buf_align      = &mfc_buf_align_v7,
1416        .fw_name[0]     = "s5p-mfc-v7.fw",
1417};
1418
1419static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
1420        .dev_ctx        = MFC_CTX_BUF_SIZE_V8,
1421        .h264_dec_ctx   = MFC_H264_DEC_CTX_BUF_SIZE_V8,
1422        .other_dec_ctx  = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
1423        .h264_enc_ctx   = MFC_H264_ENC_CTX_BUF_SIZE_V8,
1424        .other_enc_ctx  = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
1425};
1426
1427static struct s5p_mfc_buf_size buf_size_v8 = {
1428        .fw     = MAX_FW_SIZE_V8,
1429        .cpb    = MAX_CPB_SIZE_V8,
1430        .priv   = &mfc_buf_size_v8,
1431};
1432
1433static struct s5p_mfc_buf_align mfc_buf_align_v8 = {
1434        .base = 0,
1435};
1436
1437static struct s5p_mfc_variant mfc_drvdata_v8 = {
1438        .version        = MFC_VERSION_V8,
1439        .version_bit    = MFC_V8_BIT,
1440        .port_num       = MFC_NUM_PORTS_V8,
1441        .buf_size       = &buf_size_v8,
1442        .buf_align      = &mfc_buf_align_v8,
1443        .fw_name[0]     = "s5p-mfc-v8.fw",
1444};
1445
1446static struct platform_device_id mfc_driver_ids[] = {
1447        {
1448                .name = "s5p-mfc",
1449                .driver_data = (unsigned long)&mfc_drvdata_v5,
1450        }, {
1451                .name = "s5p-mfc-v5",
1452                .driver_data = (unsigned long)&mfc_drvdata_v5,
1453        }, {
1454                .name = "s5p-mfc-v6",
1455                .driver_data = (unsigned long)&mfc_drvdata_v6,
1456        }, {
1457                .name = "s5p-mfc-v7",
1458                .driver_data = (unsigned long)&mfc_drvdata_v7,
1459        }, {
1460                .name = "s5p-mfc-v8",
1461                .driver_data = (unsigned long)&mfc_drvdata_v8,
1462        },
1463        {},
1464};
1465MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
1466
1467static const struct of_device_id exynos_mfc_match[] = {
1468        {
1469                .compatible = "samsung,mfc-v5",
1470                .data = &mfc_drvdata_v5,
1471        }, {
1472                .compatible = "samsung,mfc-v6",
1473                .data = &mfc_drvdata_v6,
1474        }, {
1475                .compatible = "samsung,mfc-v7",
1476                .data = &mfc_drvdata_v7,
1477        }, {
1478                .compatible = "samsung,mfc-v8",
1479                .data = &mfc_drvdata_v8,
1480        },
1481        {},
1482};
1483MODULE_DEVICE_TABLE(of, exynos_mfc_match);
1484
1485static void *mfc_get_drv_data(struct platform_device *pdev)
1486{
1487        struct s5p_mfc_variant *driver_data = NULL;
1488
1489        if (pdev->dev.of_node) {
1490                const struct of_device_id *match;
1491                match = of_match_node(exynos_mfc_match,
1492                                pdev->dev.of_node);
1493                if (match)
1494                        driver_data = (struct s5p_mfc_variant *)match->data;
1495        } else {
1496                driver_data = (struct s5p_mfc_variant *)
1497                        platform_get_device_id(pdev)->driver_data;
1498        }
1499        return driver_data;
1500}
1501
1502static struct platform_driver s5p_mfc_driver = {
1503        .probe          = s5p_mfc_probe,
1504        .remove         = s5p_mfc_remove,
1505        .id_table       = mfc_driver_ids,
1506        .driver = {
1507                .name   = S5P_MFC_NAME,
1508                .owner  = THIS_MODULE,
1509                .pm     = &s5p_mfc_pm_ops,
1510                .of_match_table = exynos_mfc_match,
1511        },
1512};
1513
1514module_platform_driver(s5p_mfc_driver);
1515
1516MODULE_LICENSE("GPL");
1517MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1518MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
1519
1520