linux/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Broadcom BM2835 V4L2 driver
   4 *
   5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
   6 *
   7 * Authors: Vincent Sanders @ Collabora
   8 *          Dave Stevenson @ Broadcom
   9 *              (now dave.stevenson@raspberrypi.org)
  10 *          Simon Mellor @ Broadcom
  11 *          Luke Diamand @ Broadcom
  12 *
  13 * V4L2 driver MMAL vchiq interface code
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/mutex.h>
  22#include <linux/mm.h>
  23#include <linux/slab.h>
  24#include <linux/completion.h>
  25#include <linux/vmalloc.h>
  26#include <linux/raspberrypi/vchiq.h>
  27#include <media/videobuf2-vmalloc.h>
  28
  29#include "mmal-common.h"
  30#include "mmal-vchiq.h"
  31#include "mmal-msg.h"
  32
  33/*
  34 * maximum number of components supported.
  35 * This matches the maximum permitted by default on the VPU
  36 */
  37#define VCHIQ_MMAL_MAX_COMPONENTS 64
  38
  39/*
  40 * Timeout for synchronous msg responses in seconds.
  41 * Helpful to increase this if stopping in the VPU debugger.
  42 */
  43#define SYNC_MSG_TIMEOUT       3
  44
  45/*#define FULL_MSG_DUMP 1*/
  46
  47#ifdef DEBUG
  48static const char *const msg_type_names[] = {
  49        "UNKNOWN",
  50        "QUIT",
  51        "SERVICE_CLOSED",
  52        "GET_VERSION",
  53        "COMPONENT_CREATE",
  54        "COMPONENT_DESTROY",
  55        "COMPONENT_ENABLE",
  56        "COMPONENT_DISABLE",
  57        "PORT_INFO_GET",
  58        "PORT_INFO_SET",
  59        "PORT_ACTION",
  60        "BUFFER_FROM_HOST",
  61        "BUFFER_TO_HOST",
  62        "GET_STATS",
  63        "PORT_PARAMETER_SET",
  64        "PORT_PARAMETER_GET",
  65        "EVENT_TO_HOST",
  66        "GET_CORE_STATS_FOR_PORT",
  67        "OPAQUE_ALLOCATOR",
  68        "CONSUME_MEM",
  69        "LMK",
  70        "OPAQUE_ALLOCATOR_DESC",
  71        "DRM_GET_LHS32",
  72        "DRM_GET_TIME",
  73        "BUFFER_FROM_HOST_ZEROLEN",
  74        "PORT_FLUSH",
  75        "HOST_LOG",
  76};
  77#endif
  78
  79static const char *const port_action_type_names[] = {
  80        "UNKNOWN",
  81        "ENABLE",
  82        "DISABLE",
  83        "FLUSH",
  84        "CONNECT",
  85        "DISCONNECT",
  86        "SET_REQUIREMENTS",
  87};
  88
  89#if defined(DEBUG)
  90#if defined(FULL_MSG_DUMP)
  91#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
  92        do {                                                            \
  93                pr_debug(TITLE" type:%s(%d) length:%d\n",               \
  94                         msg_type_names[(MSG)->h.type],                 \
  95                         (MSG)->h.type, (MSG_LEN));                     \
  96                print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
  97                               16, 4, (MSG),                            \
  98                               sizeof(struct mmal_msg_header), 1);      \
  99                print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
 100                               16, 4,                                   \
 101                               ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
 102                               (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
 103        } while (0)
 104#else
 105#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
 106        {                                                               \
 107                pr_debug(TITLE" type:%s(%d) length:%d\n",               \
 108                         msg_type_names[(MSG)->h.type],                 \
 109                         (MSG)->h.type, (MSG_LEN));                     \
 110        }
 111#endif
 112#else
 113#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
 114#endif
 115
 116struct vchiq_mmal_instance;
 117
 118/* normal message context */
 119struct mmal_msg_context {
 120        struct vchiq_mmal_instance *instance;
 121
 122        /* Index in the context_map idr so that we can find the
 123         * mmal_msg_context again when servicing the VCHI reply.
 124         */
 125        int handle;
 126
 127        union {
 128                struct {
 129                        /* work struct for buffer_cb callback */
 130                        struct work_struct work;
 131                        /* work struct for deferred callback */
 132                        struct work_struct buffer_to_host_work;
 133                        /* mmal instance */
 134                        struct vchiq_mmal_instance *instance;
 135                        /* mmal port */
 136                        struct vchiq_mmal_port *port;
 137                        /* actual buffer used to store bulk reply */
 138                        struct mmal_buffer *buffer;
 139                        /* amount of buffer used */
 140                        unsigned long buffer_used;
 141                        /* MMAL buffer flags */
 142                        u32 mmal_flags;
 143                        /* Presentation and Decode timestamps */
 144                        s64 pts;
 145                        s64 dts;
 146
 147                        int status;     /* context status */
 148
 149                } bulk;         /* bulk data */
 150
 151                struct {
 152                        /* message handle to release */
 153                        struct vchiq_header *msg_handle;
 154                        /* pointer to received message */
 155                        struct mmal_msg *msg;
 156                        /* received message length */
 157                        u32 msg_len;
 158                        /* completion upon reply */
 159                        struct completion cmplt;
 160                } sync;         /* synchronous response */
 161        } u;
 162
 163};
 164
 165struct vchiq_mmal_instance {
 166        unsigned int service_handle;
 167
 168        /* ensure serialised access to service */
 169        struct mutex vchiq_mutex;
 170
 171        /* vmalloc page to receive scratch bulk xfers into */
 172        void *bulk_scratch;
 173
 174        struct idr context_map;
 175        /* protect accesses to context_map */
 176        struct mutex context_map_lock;
 177
 178        struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
 179
 180        /* ordered workqueue to process all bulk operations */
 181        struct workqueue_struct *bulk_wq;
 182
 183        /* handle for a vchiq instance */
 184        struct vchiq_instance *vchiq_instance;
 185};
 186
 187static struct mmal_msg_context *
 188get_msg_context(struct vchiq_mmal_instance *instance)
 189{
 190        struct mmal_msg_context *msg_context;
 191        int handle;
 192
 193        /* todo: should this be allocated from a pool to avoid kzalloc */
 194        msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
 195
 196        if (!msg_context)
 197                return ERR_PTR(-ENOMEM);
 198
 199        /* Create an ID that will be passed along with our message so
 200         * that when we service the VCHI reply, we can look up what
 201         * message is being replied to.
 202         */
 203        mutex_lock(&instance->context_map_lock);
 204        handle = idr_alloc(&instance->context_map, msg_context,
 205                           0, 0, GFP_KERNEL);
 206        mutex_unlock(&instance->context_map_lock);
 207
 208        if (handle < 0) {
 209                kfree(msg_context);
 210                return ERR_PTR(handle);
 211        }
 212
 213        msg_context->instance = instance;
 214        msg_context->handle = handle;
 215
 216        return msg_context;
 217}
 218
 219static struct mmal_msg_context *
 220lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
 221{
 222        return idr_find(&instance->context_map, handle);
 223}
 224
 225static void
 226release_msg_context(struct mmal_msg_context *msg_context)
 227{
 228        struct vchiq_mmal_instance *instance = msg_context->instance;
 229
 230        mutex_lock(&instance->context_map_lock);
 231        idr_remove(&instance->context_map, msg_context->handle);
 232        mutex_unlock(&instance->context_map_lock);
 233        kfree(msg_context);
 234}
 235
 236/* deals with receipt of event to host message */
 237static void event_to_host_cb(struct vchiq_mmal_instance *instance,
 238                             struct mmal_msg *msg, u32 msg_len)
 239{
 240        pr_debug("unhandled event\n");
 241        pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
 242                 msg->u.event_to_host.client_component,
 243                 msg->u.event_to_host.port_type,
 244                 msg->u.event_to_host.port_num,
 245                 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
 246}
 247
 248/* workqueue scheduled callback
 249 *
 250 * we do this because it is important we do not call any other vchiq
 251 * sync calls from witin the message delivery thread
 252 */
 253static void buffer_work_cb(struct work_struct *work)
 254{
 255        struct mmal_msg_context *msg_context =
 256                container_of(work, struct mmal_msg_context, u.bulk.work);
 257        struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
 258
 259        if (!buffer) {
 260                pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
 261                       __func__, msg_context);
 262                return;
 263        }
 264
 265        buffer->length = msg_context->u.bulk.buffer_used;
 266        buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
 267        buffer->dts = msg_context->u.bulk.dts;
 268        buffer->pts = msg_context->u.bulk.pts;
 269
 270        atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
 271
 272        msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
 273                                            msg_context->u.bulk.port,
 274                                            msg_context->u.bulk.status,
 275                                            msg_context->u.bulk.buffer);
 276}
 277
 278/* workqueue scheduled callback to handle receiving buffers
 279 *
 280 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
 281 * If we block in the service_callback context then we can't process the
 282 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
 283 * vchiq_bulk_receive() call to complete.
 284 */
 285static void buffer_to_host_work_cb(struct work_struct *work)
 286{
 287        struct mmal_msg_context *msg_context =
 288                container_of(work, struct mmal_msg_context,
 289                             u.bulk.buffer_to_host_work);
 290        struct vchiq_mmal_instance *instance = msg_context->instance;
 291        unsigned long len = msg_context->u.bulk.buffer_used;
 292        int ret;
 293
 294        if (!len)
 295                /* Dummy receive to ensure the buffers remain in order */
 296                len = 8;
 297        /* queue the bulk submission */
 298        vchiq_use_service(instance->service_handle);
 299        ret = vchiq_bulk_receive(instance->service_handle,
 300                                 msg_context->u.bulk.buffer->buffer,
 301                                 /* Actual receive needs to be a multiple
 302                                  * of 4 bytes
 303                                  */
 304                                (len + 3) & ~3,
 305                                msg_context,
 306                                VCHIQ_BULK_MODE_CALLBACK);
 307
 308        vchiq_release_service(instance->service_handle);
 309
 310        if (ret != 0)
 311                pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
 312                       __func__, msg_context, ret);
 313}
 314
 315/* enqueue a bulk receive for a given message context */
 316static int bulk_receive(struct vchiq_mmal_instance *instance,
 317                        struct mmal_msg *msg,
 318                        struct mmal_msg_context *msg_context)
 319{
 320        unsigned long rd_len;
 321
 322        rd_len = msg->u.buffer_from_host.buffer_header.length;
 323
 324        if (!msg_context->u.bulk.buffer) {
 325                pr_err("bulk.buffer not configured - error in buffer_from_host\n");
 326
 327                /* todo: this is a serious error, we should never have
 328                 * committed a buffer_to_host operation to the mmal
 329                 * port without the buffer to back it up (underflow
 330                 * handling) and there is no obvious way to deal with
 331                 * this - how is the mmal servie going to react when
 332                 * we fail to do the xfer and reschedule a buffer when
 333                 * it arrives? perhaps a starved flag to indicate a
 334                 * waiting bulk receive?
 335                 */
 336
 337                return -EINVAL;
 338        }
 339
 340        /* ensure we do not overrun the available buffer */
 341        if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
 342                rd_len = msg_context->u.bulk.buffer->buffer_size;
 343                pr_warn("short read as not enough receive buffer space\n");
 344                /* todo: is this the correct response, what happens to
 345                 * the rest of the message data?
 346                 */
 347        }
 348
 349        /* store length */
 350        msg_context->u.bulk.buffer_used = rd_len;
 351        msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
 352        msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
 353
 354        queue_work(msg_context->instance->bulk_wq,
 355                   &msg_context->u.bulk.buffer_to_host_work);
 356
 357        return 0;
 358}
 359
 360/* data in message, memcpy from packet into output buffer */
 361static int inline_receive(struct vchiq_mmal_instance *instance,
 362                          struct mmal_msg *msg,
 363                          struct mmal_msg_context *msg_context)
 364{
 365        memcpy(msg_context->u.bulk.buffer->buffer,
 366               msg->u.buffer_from_host.short_data,
 367               msg->u.buffer_from_host.payload_in_message);
 368
 369        msg_context->u.bulk.buffer_used =
 370            msg->u.buffer_from_host.payload_in_message;
 371
 372        return 0;
 373}
 374
 375/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
 376static int
 377buffer_from_host(struct vchiq_mmal_instance *instance,
 378                 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
 379{
 380        struct mmal_msg_context *msg_context;
 381        struct mmal_msg m;
 382        int ret;
 383
 384        if (!port->enabled)
 385                return -EINVAL;
 386
 387        pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
 388
 389        /* get context */
 390        if (!buf->msg_context) {
 391                pr_err("%s: msg_context not allocated, buf %p\n", __func__,
 392                       buf);
 393                return -EINVAL;
 394        }
 395        msg_context = buf->msg_context;
 396
 397        /* store bulk message context for when data arrives */
 398        msg_context->u.bulk.instance = instance;
 399        msg_context->u.bulk.port = port;
 400        msg_context->u.bulk.buffer = buf;
 401        msg_context->u.bulk.buffer_used = 0;
 402
 403        /* initialise work structure ready to schedule callback */
 404        INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
 405        INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
 406                  buffer_to_host_work_cb);
 407
 408        atomic_inc(&port->buffers_with_vpu);
 409
 410        /* prep the buffer from host message */
 411        memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
 412
 413        m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
 414        m.h.magic = MMAL_MAGIC;
 415        m.h.context = msg_context->handle;
 416        m.h.status = 0;
 417
 418        /* drvbuf is our private data passed back */
 419        m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
 420        m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
 421        m.u.buffer_from_host.drvbuf.port_handle = port->handle;
 422        m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
 423
 424        /* buffer header */
 425        m.u.buffer_from_host.buffer_header.cmd = 0;
 426        m.u.buffer_from_host.buffer_header.data =
 427                (u32)(unsigned long)buf->buffer;
 428        m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
 429        m.u.buffer_from_host.buffer_header.length = 0;  /* nothing used yet */
 430        m.u.buffer_from_host.buffer_header.offset = 0;  /* no offset */
 431        m.u.buffer_from_host.buffer_header.flags = 0;   /* no flags */
 432        m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
 433        m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
 434
 435        /* clear buffer type specific data */
 436        memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
 437               sizeof(m.u.buffer_from_host.buffer_header_type_specific));
 438
 439        /* no payload in message */
 440        m.u.buffer_from_host.payload_in_message = 0;
 441
 442        vchiq_use_service(instance->service_handle);
 443
 444        ret = vchiq_queue_kernel_message(instance->service_handle, &m,
 445                                         sizeof(struct mmal_msg_header) +
 446                                         sizeof(m.u.buffer_from_host));
 447        if (ret)
 448                atomic_dec(&port->buffers_with_vpu);
 449
 450        vchiq_release_service(instance->service_handle);
 451
 452        return ret;
 453}
 454
 455/* deals with receipt of buffer to host message */
 456static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
 457                              struct mmal_msg *msg, u32 msg_len)
 458{
 459        struct mmal_msg_context *msg_context;
 460        u32 handle;
 461
 462        pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
 463                 __func__, instance, msg, msg_len);
 464
 465        if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
 466                handle = msg->u.buffer_from_host.drvbuf.client_context;
 467                msg_context = lookup_msg_context(instance, handle);
 468
 469                if (!msg_context) {
 470                        pr_err("drvbuf.client_context(%u) is invalid\n",
 471                               handle);
 472                        return;
 473                }
 474        } else {
 475                pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
 476                return;
 477        }
 478
 479        msg_context->u.bulk.mmal_flags =
 480                                msg->u.buffer_from_host.buffer_header.flags;
 481
 482        if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
 483                /* message reception had an error */
 484                pr_warn("error %d in reply\n", msg->h.status);
 485
 486                msg_context->u.bulk.status = msg->h.status;
 487
 488        } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
 489                /* empty buffer */
 490                if (msg->u.buffer_from_host.buffer_header.flags &
 491                    MMAL_BUFFER_HEADER_FLAG_EOS) {
 492                        msg_context->u.bulk.status =
 493                            bulk_receive(instance, msg, msg_context);
 494                        if (msg_context->u.bulk.status == 0)
 495                                return; /* successful bulk submission, bulk
 496                                         * completion will trigger callback
 497                                         */
 498                } else {
 499                        /* do callback with empty buffer - not EOS though */
 500                        msg_context->u.bulk.status = 0;
 501                        msg_context->u.bulk.buffer_used = 0;
 502                }
 503        } else if (msg->u.buffer_from_host.payload_in_message == 0) {
 504                /* data is not in message, queue a bulk receive */
 505                msg_context->u.bulk.status =
 506                    bulk_receive(instance, msg, msg_context);
 507                if (msg_context->u.bulk.status == 0)
 508                        return; /* successful bulk submission, bulk
 509                                 * completion will trigger callback
 510                                 */
 511
 512                /* failed to submit buffer, this will end badly */
 513                pr_err("error %d on bulk submission\n",
 514                       msg_context->u.bulk.status);
 515
 516        } else if (msg->u.buffer_from_host.payload_in_message <=
 517                   MMAL_VC_SHORT_DATA) {
 518                /* data payload within message */
 519                msg_context->u.bulk.status = inline_receive(instance, msg,
 520                                                            msg_context);
 521        } else {
 522                pr_err("message with invalid short payload\n");
 523
 524                /* signal error */
 525                msg_context->u.bulk.status = -EINVAL;
 526                msg_context->u.bulk.buffer_used =
 527                    msg->u.buffer_from_host.payload_in_message;
 528        }
 529
 530        /* schedule the port callback */
 531        schedule_work(&msg_context->u.bulk.work);
 532}
 533
 534static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
 535                            struct mmal_msg_context *msg_context)
 536{
 537        msg_context->u.bulk.status = 0;
 538
 539        /* schedule the port callback */
 540        schedule_work(&msg_context->u.bulk.work);
 541}
 542
 543static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
 544                          struct mmal_msg_context *msg_context)
 545{
 546        pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
 547
 548        msg_context->u.bulk.status = -EINTR;
 549
 550        schedule_work(&msg_context->u.bulk.work);
 551}
 552
 553/* incoming event service callback */
 554static enum vchiq_status service_callback(enum vchiq_reason reason,
 555                                          struct vchiq_header *header,
 556                                          unsigned int handle, void *bulk_ctx)
 557{
 558        struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle);
 559        u32 msg_len;
 560        struct mmal_msg *msg;
 561        struct mmal_msg_context *msg_context;
 562
 563        if (!instance) {
 564                pr_err("Message callback passed NULL instance\n");
 565                return VCHIQ_SUCCESS;
 566        }
 567
 568        switch (reason) {
 569        case VCHIQ_MESSAGE_AVAILABLE:
 570                msg = (void *)header->data;
 571                msg_len = header->size;
 572
 573                DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
 574
 575                /* handling is different for buffer messages */
 576                switch (msg->h.type) {
 577                case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
 578                        vchiq_release_message(handle, header);
 579                        break;
 580
 581                case MMAL_MSG_TYPE_EVENT_TO_HOST:
 582                        event_to_host_cb(instance, msg, msg_len);
 583                        vchiq_release_message(handle, header);
 584
 585                        break;
 586
 587                case MMAL_MSG_TYPE_BUFFER_TO_HOST:
 588                        buffer_to_host_cb(instance, msg, msg_len);
 589                        vchiq_release_message(handle, header);
 590                        break;
 591
 592                default:
 593                        /* messages dependent on header context to complete */
 594                        if (!msg->h.context) {
 595                                pr_err("received message context was null!\n");
 596                                vchiq_release_message(handle, header);
 597                                break;
 598                        }
 599
 600                        msg_context = lookup_msg_context(instance,
 601                                                         msg->h.context);
 602                        if (!msg_context) {
 603                                pr_err("received invalid message context %u!\n",
 604                                       msg->h.context);
 605                                vchiq_release_message(handle, header);
 606                                break;
 607                        }
 608
 609                        /* fill in context values */
 610                        msg_context->u.sync.msg_handle = header;
 611                        msg_context->u.sync.msg = msg;
 612                        msg_context->u.sync.msg_len = msg_len;
 613
 614                        /* todo: should this check (completion_done()
 615                         * == 1) for no one waiting? or do we need a
 616                         * flag to tell us the completion has been
 617                         * interrupted so we can free the message and
 618                         * its context. This probably also solves the
 619                         * message arriving after interruption todo
 620                         * below
 621                         */
 622
 623                        /* complete message so caller knows it happened */
 624                        complete(&msg_context->u.sync.cmplt);
 625                        break;
 626                }
 627
 628                break;
 629
 630        case VCHIQ_BULK_RECEIVE_DONE:
 631                bulk_receive_cb(instance, bulk_ctx);
 632                break;
 633
 634        case VCHIQ_BULK_RECEIVE_ABORTED:
 635                bulk_abort_cb(instance, bulk_ctx);
 636                break;
 637
 638        case VCHIQ_SERVICE_CLOSED:
 639                /* TODO: consider if this requires action if received when
 640                 * driver is not explicitly closing the service
 641                 */
 642                break;
 643
 644        default:
 645                pr_err("Received unhandled message reason %d\n", reason);
 646                break;
 647        }
 648
 649        return VCHIQ_SUCCESS;
 650}
 651
 652static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
 653                                     struct mmal_msg *msg,
 654                                     unsigned int payload_len,
 655                                     struct mmal_msg **msg_out,
 656                                     struct vchiq_header **msg_handle)
 657{
 658        struct mmal_msg_context *msg_context;
 659        int ret;
 660        unsigned long timeout;
 661
 662        /* payload size must not cause message to exceed max size */
 663        if (payload_len >
 664            (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
 665                pr_err("payload length %d exceeds max:%d\n", payload_len,
 666                       (int)(MMAL_MSG_MAX_SIZE -
 667                            sizeof(struct mmal_msg_header)));
 668                return -EINVAL;
 669        }
 670
 671        msg_context = get_msg_context(instance);
 672        if (IS_ERR(msg_context))
 673                return PTR_ERR(msg_context);
 674
 675        init_completion(&msg_context->u.sync.cmplt);
 676
 677        msg->h.magic = MMAL_MAGIC;
 678        msg->h.context = msg_context->handle;
 679        msg->h.status = 0;
 680
 681        DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
 682                     ">>> sync message");
 683
 684        vchiq_use_service(instance->service_handle);
 685
 686        ret = vchiq_queue_kernel_message(instance->service_handle, msg,
 687                                         sizeof(struct mmal_msg_header) +
 688                                         payload_len);
 689
 690        vchiq_release_service(instance->service_handle);
 691
 692        if (ret) {
 693                pr_err("error %d queuing message\n", ret);
 694                release_msg_context(msg_context);
 695                return ret;
 696        }
 697
 698        timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
 699                                              SYNC_MSG_TIMEOUT * HZ);
 700        if (timeout == 0) {
 701                pr_err("timed out waiting for sync completion\n");
 702                ret = -ETIME;
 703                /* todo: what happens if the message arrives after aborting */
 704                release_msg_context(msg_context);
 705                return ret;
 706        }
 707
 708        *msg_out = msg_context->u.sync.msg;
 709        *msg_handle = msg_context->u.sync.msg_handle;
 710        release_msg_context(msg_context);
 711
 712        return 0;
 713}
 714
 715static void dump_port_info(struct vchiq_mmal_port *port)
 716{
 717        pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
 718
 719        pr_debug("buffer minimum num:%d size:%d align:%d\n",
 720                 port->minimum_buffer.num,
 721                 port->minimum_buffer.size, port->minimum_buffer.alignment);
 722
 723        pr_debug("buffer recommended num:%d size:%d align:%d\n",
 724                 port->recommended_buffer.num,
 725                 port->recommended_buffer.size,
 726                 port->recommended_buffer.alignment);
 727
 728        pr_debug("buffer current values num:%d size:%d align:%d\n",
 729                 port->current_buffer.num,
 730                 port->current_buffer.size, port->current_buffer.alignment);
 731
 732        pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
 733                 port->format.type,
 734                 port->format.encoding, port->format.encoding_variant);
 735
 736        pr_debug("                  bitrate:%d flags:0x%x\n",
 737                 port->format.bitrate, port->format.flags);
 738
 739        if (port->format.type == MMAL_ES_TYPE_VIDEO) {
 740                pr_debug
 741                    ("es video format: width:%d height:%d colourspace:0x%x\n",
 742                     port->es.video.width, port->es.video.height,
 743                     port->es.video.color_space);
 744
 745                pr_debug("               : crop xywh %d,%d,%d,%d\n",
 746                         port->es.video.crop.x,
 747                         port->es.video.crop.y,
 748                         port->es.video.crop.width, port->es.video.crop.height);
 749                pr_debug("               : framerate %d/%d  aspect %d/%d\n",
 750                         port->es.video.frame_rate.num,
 751                         port->es.video.frame_rate.den,
 752                         port->es.video.par.num, port->es.video.par.den);
 753        }
 754}
 755
 756static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
 757{
 758        /* todo do readonly fields need setting at all? */
 759        p->type = port->type;
 760        p->index = port->index;
 761        p->index_all = 0;
 762        p->is_enabled = port->enabled;
 763        p->buffer_num_min = port->minimum_buffer.num;
 764        p->buffer_size_min = port->minimum_buffer.size;
 765        p->buffer_alignment_min = port->minimum_buffer.alignment;
 766        p->buffer_num_recommended = port->recommended_buffer.num;
 767        p->buffer_size_recommended = port->recommended_buffer.size;
 768
 769        /* only three writable fields in a port */
 770        p->buffer_num = port->current_buffer.num;
 771        p->buffer_size = port->current_buffer.size;
 772        p->userdata = (u32)(unsigned long)port;
 773}
 774
 775static int port_info_set(struct vchiq_mmal_instance *instance,
 776                         struct vchiq_mmal_port *port)
 777{
 778        int ret;
 779        struct mmal_msg m;
 780        struct mmal_msg *rmsg;
 781        struct vchiq_header *rmsg_handle;
 782
 783        pr_debug("setting port info port %p\n", port);
 784        if (!port)
 785                return -1;
 786        dump_port_info(port);
 787
 788        m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
 789
 790        m.u.port_info_set.component_handle = port->component->handle;
 791        m.u.port_info_set.port_type = port->type;
 792        m.u.port_info_set.port_index = port->index;
 793
 794        port_to_mmal_msg(port, &m.u.port_info_set.port);
 795
 796        /* elementary stream format setup */
 797        m.u.port_info_set.format.type = port->format.type;
 798        m.u.port_info_set.format.encoding = port->format.encoding;
 799        m.u.port_info_set.format.encoding_variant =
 800            port->format.encoding_variant;
 801        m.u.port_info_set.format.bitrate = port->format.bitrate;
 802        m.u.port_info_set.format.flags = port->format.flags;
 803
 804        memcpy(&m.u.port_info_set.es, &port->es,
 805               sizeof(union mmal_es_specific_format));
 806
 807        m.u.port_info_set.format.extradata_size = port->format.extradata_size;
 808        memcpy(&m.u.port_info_set.extradata, port->format.extradata,
 809               port->format.extradata_size);
 810
 811        ret = send_synchronous_mmal_msg(instance, &m,
 812                                        sizeof(m.u.port_info_set),
 813                                        &rmsg, &rmsg_handle);
 814        if (ret)
 815                return ret;
 816
 817        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
 818                /* got an unexpected message type in reply */
 819                ret = -EINVAL;
 820                goto release_msg;
 821        }
 822
 823        /* return operation status */
 824        ret = -rmsg->u.port_info_get_reply.status;
 825
 826        pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
 827                 port->component->handle, port->handle);
 828
 829release_msg:
 830        vchiq_release_message(instance->service_handle, rmsg_handle);
 831
 832        return ret;
 833}
 834
 835/* use port info get message to retrieve port information */
 836static int port_info_get(struct vchiq_mmal_instance *instance,
 837                         struct vchiq_mmal_port *port)
 838{
 839        int ret;
 840        struct mmal_msg m;
 841        struct mmal_msg *rmsg;
 842        struct vchiq_header *rmsg_handle;
 843
 844        /* port info time */
 845        m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
 846        m.u.port_info_get.component_handle = port->component->handle;
 847        m.u.port_info_get.port_type = port->type;
 848        m.u.port_info_get.index = port->index;
 849
 850        ret = send_synchronous_mmal_msg(instance, &m,
 851                                        sizeof(m.u.port_info_get),
 852                                        &rmsg, &rmsg_handle);
 853        if (ret)
 854                return ret;
 855
 856        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
 857                /* got an unexpected message type in reply */
 858                ret = -EINVAL;
 859                goto release_msg;
 860        }
 861
 862        /* return operation status */
 863        ret = -rmsg->u.port_info_get_reply.status;
 864        if (ret != MMAL_MSG_STATUS_SUCCESS)
 865                goto release_msg;
 866
 867        if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
 868                port->enabled = 0;
 869        else
 870                port->enabled = 1;
 871
 872        /* copy the values out of the message */
 873        port->handle = rmsg->u.port_info_get_reply.port_handle;
 874
 875        /* port type and index cached to use on port info set because
 876         * it does not use a port handle
 877         */
 878        port->type = rmsg->u.port_info_get_reply.port_type;
 879        port->index = rmsg->u.port_info_get_reply.port_index;
 880
 881        port->minimum_buffer.num =
 882            rmsg->u.port_info_get_reply.port.buffer_num_min;
 883        port->minimum_buffer.size =
 884            rmsg->u.port_info_get_reply.port.buffer_size_min;
 885        port->minimum_buffer.alignment =
 886            rmsg->u.port_info_get_reply.port.buffer_alignment_min;
 887
 888        port->recommended_buffer.alignment =
 889            rmsg->u.port_info_get_reply.port.buffer_alignment_min;
 890        port->recommended_buffer.num =
 891            rmsg->u.port_info_get_reply.port.buffer_num_recommended;
 892
 893        port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
 894        port->current_buffer.size =
 895            rmsg->u.port_info_get_reply.port.buffer_size;
 896
 897        /* stream format */
 898        port->format.type = rmsg->u.port_info_get_reply.format.type;
 899        port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
 900        port->format.encoding_variant =
 901            rmsg->u.port_info_get_reply.format.encoding_variant;
 902        port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
 903        port->format.flags = rmsg->u.port_info_get_reply.format.flags;
 904
 905        /* elementary stream format */
 906        memcpy(&port->es,
 907               &rmsg->u.port_info_get_reply.es,
 908               sizeof(union mmal_es_specific_format));
 909        port->format.es = &port->es;
 910
 911        port->format.extradata_size =
 912            rmsg->u.port_info_get_reply.format.extradata_size;
 913        memcpy(port->format.extradata,
 914               rmsg->u.port_info_get_reply.extradata,
 915               port->format.extradata_size);
 916
 917        pr_debug("received port info\n");
 918        dump_port_info(port);
 919
 920release_msg:
 921
 922        pr_debug("%s:result:%d component:0x%x port:%d\n",
 923                 __func__, ret, port->component->handle, port->handle);
 924
 925        vchiq_release_message(instance->service_handle, rmsg_handle);
 926
 927        return ret;
 928}
 929
 930/* create component on vc */
 931static int create_component(struct vchiq_mmal_instance *instance,
 932                            struct vchiq_mmal_component *component,
 933                            const char *name)
 934{
 935        int ret;
 936        struct mmal_msg m;
 937        struct mmal_msg *rmsg;
 938        struct vchiq_header *rmsg_handle;
 939
 940        /* build component create message */
 941        m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
 942        m.u.component_create.client_component = component->client_component;
 943        strncpy(m.u.component_create.name, name,
 944                sizeof(m.u.component_create.name));
 945
 946        ret = send_synchronous_mmal_msg(instance, &m,
 947                                        sizeof(m.u.component_create),
 948                                        &rmsg, &rmsg_handle);
 949        if (ret)
 950                return ret;
 951
 952        if (rmsg->h.type != m.h.type) {
 953                /* got an unexpected message type in reply */
 954                ret = -EINVAL;
 955                goto release_msg;
 956        }
 957
 958        ret = -rmsg->u.component_create_reply.status;
 959        if (ret != MMAL_MSG_STATUS_SUCCESS)
 960                goto release_msg;
 961
 962        /* a valid component response received */
 963        component->handle = rmsg->u.component_create_reply.component_handle;
 964        component->inputs = rmsg->u.component_create_reply.input_num;
 965        component->outputs = rmsg->u.component_create_reply.output_num;
 966        component->clocks = rmsg->u.component_create_reply.clock_num;
 967
 968        pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
 969                 component->handle,
 970                 component->inputs, component->outputs, component->clocks);
 971
 972release_msg:
 973        vchiq_release_message(instance->service_handle, rmsg_handle);
 974
 975        return ret;
 976}
 977
 978/* destroys a component on vc */
 979static int destroy_component(struct vchiq_mmal_instance *instance,
 980                             struct vchiq_mmal_component *component)
 981{
 982        int ret;
 983        struct mmal_msg m;
 984        struct mmal_msg *rmsg;
 985        struct vchiq_header *rmsg_handle;
 986
 987        m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
 988        m.u.component_destroy.component_handle = component->handle;
 989
 990        ret = send_synchronous_mmal_msg(instance, &m,
 991                                        sizeof(m.u.component_destroy),
 992                                        &rmsg, &rmsg_handle);
 993        if (ret)
 994                return ret;
 995
 996        if (rmsg->h.type != m.h.type) {
 997                /* got an unexpected message type in reply */
 998                ret = -EINVAL;
 999                goto release_msg;
1000        }
1001
1002        ret = -rmsg->u.component_destroy_reply.status;
1003
1004release_msg:
1005
1006        vchiq_release_message(instance->service_handle, rmsg_handle);
1007
1008        return ret;
1009}
1010
1011/* enable a component on vc */
1012static int enable_component(struct vchiq_mmal_instance *instance,
1013                            struct vchiq_mmal_component *component)
1014{
1015        int ret;
1016        struct mmal_msg m;
1017        struct mmal_msg *rmsg;
1018        struct vchiq_header *rmsg_handle;
1019
1020        m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1021        m.u.component_enable.component_handle = component->handle;
1022
1023        ret = send_synchronous_mmal_msg(instance, &m,
1024                                        sizeof(m.u.component_enable),
1025                                        &rmsg, &rmsg_handle);
1026        if (ret)
1027                return ret;
1028
1029        if (rmsg->h.type != m.h.type) {
1030                /* got an unexpected message type in reply */
1031                ret = -EINVAL;
1032                goto release_msg;
1033        }
1034
1035        ret = -rmsg->u.component_enable_reply.status;
1036
1037release_msg:
1038        vchiq_release_message(instance->service_handle, rmsg_handle);
1039
1040        return ret;
1041}
1042
1043/* disable a component on vc */
1044static int disable_component(struct vchiq_mmal_instance *instance,
1045                             struct vchiq_mmal_component *component)
1046{
1047        int ret;
1048        struct mmal_msg m;
1049        struct mmal_msg *rmsg;
1050        struct vchiq_header *rmsg_handle;
1051
1052        m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1053        m.u.component_disable.component_handle = component->handle;
1054
1055        ret = send_synchronous_mmal_msg(instance, &m,
1056                                        sizeof(m.u.component_disable),
1057                                        &rmsg, &rmsg_handle);
1058        if (ret)
1059                return ret;
1060
1061        if (rmsg->h.type != m.h.type) {
1062                /* got an unexpected message type in reply */
1063                ret = -EINVAL;
1064                goto release_msg;
1065        }
1066
1067        ret = -rmsg->u.component_disable_reply.status;
1068
1069release_msg:
1070
1071        vchiq_release_message(instance->service_handle, rmsg_handle);
1072
1073        return ret;
1074}
1075
1076/* get version of mmal implementation */
1077static int get_version(struct vchiq_mmal_instance *instance,
1078                       u32 *major_out, u32 *minor_out)
1079{
1080        int ret;
1081        struct mmal_msg m;
1082        struct mmal_msg *rmsg;
1083        struct vchiq_header *rmsg_handle;
1084
1085        m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1086
1087        ret = send_synchronous_mmal_msg(instance, &m,
1088                                        sizeof(m.u.version),
1089                                        &rmsg, &rmsg_handle);
1090        if (ret)
1091                return ret;
1092
1093        if (rmsg->h.type != m.h.type) {
1094                /* got an unexpected message type in reply */
1095                ret = -EINVAL;
1096                goto release_msg;
1097        }
1098
1099        *major_out = rmsg->u.version.major;
1100        *minor_out = rmsg->u.version.minor;
1101
1102release_msg:
1103        vchiq_release_message(instance->service_handle, rmsg_handle);
1104
1105        return ret;
1106}
1107
1108/* do a port action with a port as a parameter */
1109static int port_action_port(struct vchiq_mmal_instance *instance,
1110                            struct vchiq_mmal_port *port,
1111                            enum mmal_msg_port_action_type action_type)
1112{
1113        int ret;
1114        struct mmal_msg m;
1115        struct mmal_msg *rmsg;
1116        struct vchiq_header *rmsg_handle;
1117
1118        m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1119        m.u.port_action_port.component_handle = port->component->handle;
1120        m.u.port_action_port.port_handle = port->handle;
1121        m.u.port_action_port.action = action_type;
1122
1123        port_to_mmal_msg(port, &m.u.port_action_port.port);
1124
1125        ret = send_synchronous_mmal_msg(instance, &m,
1126                                        sizeof(m.u.port_action_port),
1127                                        &rmsg, &rmsg_handle);
1128        if (ret)
1129                return ret;
1130
1131        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1132                /* got an unexpected message type in reply */
1133                ret = -EINVAL;
1134                goto release_msg;
1135        }
1136
1137        ret = -rmsg->u.port_action_reply.status;
1138
1139        pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1140                 __func__,
1141                 ret, port->component->handle, port->handle,
1142                 port_action_type_names[action_type], action_type);
1143
1144release_msg:
1145        vchiq_release_message(instance->service_handle, rmsg_handle);
1146
1147        return ret;
1148}
1149
1150/* do a port action with handles as parameters */
1151static int port_action_handle(struct vchiq_mmal_instance *instance,
1152                              struct vchiq_mmal_port *port,
1153                              enum mmal_msg_port_action_type action_type,
1154                              u32 connect_component_handle,
1155                              u32 connect_port_handle)
1156{
1157        int ret;
1158        struct mmal_msg m;
1159        struct mmal_msg *rmsg;
1160        struct vchiq_header *rmsg_handle;
1161
1162        m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1163
1164        m.u.port_action_handle.component_handle = port->component->handle;
1165        m.u.port_action_handle.port_handle = port->handle;
1166        m.u.port_action_handle.action = action_type;
1167
1168        m.u.port_action_handle.connect_component_handle =
1169            connect_component_handle;
1170        m.u.port_action_handle.connect_port_handle = connect_port_handle;
1171
1172        ret = send_synchronous_mmal_msg(instance, &m,
1173                                        sizeof(m.u.port_action_handle),
1174                                        &rmsg, &rmsg_handle);
1175        if (ret)
1176                return ret;
1177
1178        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1179                /* got an unexpected message type in reply */
1180                ret = -EINVAL;
1181                goto release_msg;
1182        }
1183
1184        ret = -rmsg->u.port_action_reply.status;
1185
1186        pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1187                 __func__,
1188                 ret, port->component->handle, port->handle,
1189                 port_action_type_names[action_type],
1190                 action_type, connect_component_handle, connect_port_handle);
1191
1192release_msg:
1193        vchiq_release_message(instance->service_handle, rmsg_handle);
1194
1195        return ret;
1196}
1197
1198static int port_parameter_set(struct vchiq_mmal_instance *instance,
1199                              struct vchiq_mmal_port *port,
1200                              u32 parameter_id, void *value, u32 value_size)
1201{
1202        int ret;
1203        struct mmal_msg m;
1204        struct mmal_msg *rmsg;
1205        struct vchiq_header *rmsg_handle;
1206
1207        m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1208
1209        m.u.port_parameter_set.component_handle = port->component->handle;
1210        m.u.port_parameter_set.port_handle = port->handle;
1211        m.u.port_parameter_set.id = parameter_id;
1212        m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1213        memcpy(&m.u.port_parameter_set.value, value, value_size);
1214
1215        ret = send_synchronous_mmal_msg(instance, &m,
1216                                        (4 * sizeof(u32)) + value_size,
1217                                        &rmsg, &rmsg_handle);
1218        if (ret)
1219                return ret;
1220
1221        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1222                /* got an unexpected message type in reply */
1223                ret = -EINVAL;
1224                goto release_msg;
1225        }
1226
1227        ret = -rmsg->u.port_parameter_set_reply.status;
1228
1229        pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1230                 __func__,
1231                 ret, port->component->handle, port->handle, parameter_id);
1232
1233release_msg:
1234        vchiq_release_message(instance->service_handle, rmsg_handle);
1235
1236        return ret;
1237}
1238
1239static int port_parameter_get(struct vchiq_mmal_instance *instance,
1240                              struct vchiq_mmal_port *port,
1241                              u32 parameter_id, void *value, u32 *value_size)
1242{
1243        int ret;
1244        struct mmal_msg m;
1245        struct mmal_msg *rmsg;
1246        struct vchiq_header *rmsg_handle;
1247
1248        m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1249
1250        m.u.port_parameter_get.component_handle = port->component->handle;
1251        m.u.port_parameter_get.port_handle = port->handle;
1252        m.u.port_parameter_get.id = parameter_id;
1253        m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1254
1255        ret = send_synchronous_mmal_msg(instance, &m,
1256                                        sizeof(struct
1257                                               mmal_msg_port_parameter_get),
1258                                        &rmsg, &rmsg_handle);
1259        if (ret)
1260                return ret;
1261
1262        if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1263                /* got an unexpected message type in reply */
1264                pr_err("Incorrect reply type %d\n", rmsg->h.type);
1265                ret = -EINVAL;
1266                goto release_msg;
1267        }
1268
1269        ret = rmsg->u.port_parameter_get_reply.status;
1270
1271        /* port_parameter_get_reply.size includes the header,
1272         * whilst *value_size doesn't.
1273         */
1274        rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1275
1276        if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1277                /* Copy only as much as we have space for
1278                 * but report true size of parameter
1279                 */
1280                memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1281                       *value_size);
1282        } else {
1283                memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1284                       rmsg->u.port_parameter_get_reply.size);
1285        }
1286        /* Always report the size of the returned parameter to the caller */
1287        *value_size = rmsg->u.port_parameter_get_reply.size;
1288
1289        pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1290                 ret, port->component->handle, port->handle, parameter_id);
1291
1292release_msg:
1293        vchiq_release_message(instance->service_handle, rmsg_handle);
1294
1295        return ret;
1296}
1297
1298/* disables a port and drains buffers from it */
1299static int port_disable(struct vchiq_mmal_instance *instance,
1300                        struct vchiq_mmal_port *port)
1301{
1302        int ret;
1303        struct list_head *q, *buf_head;
1304        unsigned long flags = 0;
1305
1306        if (!port->enabled)
1307                return 0;
1308
1309        port->enabled = 0;
1310
1311        ret = port_action_port(instance, port,
1312                               MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1313        if (ret == 0) {
1314                /*
1315                 * Drain all queued buffers on port. This should only
1316                 * apply to buffers that have been queued before the port
1317                 * has been enabled. If the port has been enabled and buffers
1318                 * passed, then the buffers should have been removed from this
1319                 * list, and we should get the relevant callbacks via VCHIQ
1320                 * to release the buffers.
1321                 */
1322                spin_lock_irqsave(&port->slock, flags);
1323
1324                list_for_each_safe(buf_head, q, &port->buffers) {
1325                        struct mmal_buffer *mmalbuf;
1326
1327                        mmalbuf = list_entry(buf_head, struct mmal_buffer,
1328                                             list);
1329                        list_del(buf_head);
1330                        if (port->buffer_cb) {
1331                                mmalbuf->length = 0;
1332                                mmalbuf->mmal_flags = 0;
1333                                mmalbuf->dts = MMAL_TIME_UNKNOWN;
1334                                mmalbuf->pts = MMAL_TIME_UNKNOWN;
1335                                port->buffer_cb(instance,
1336                                                port, 0, mmalbuf);
1337                        }
1338                }
1339
1340                spin_unlock_irqrestore(&port->slock, flags);
1341
1342                ret = port_info_get(instance, port);
1343        }
1344
1345        return ret;
1346}
1347
1348/* enable a port */
1349static int port_enable(struct vchiq_mmal_instance *instance,
1350                       struct vchiq_mmal_port *port)
1351{
1352        unsigned int hdr_count;
1353        struct list_head *q, *buf_head;
1354        int ret;
1355
1356        if (port->enabled)
1357                return 0;
1358
1359        ret = port_action_port(instance, port,
1360                               MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1361        if (ret)
1362                goto done;
1363
1364        port->enabled = 1;
1365
1366        if (port->buffer_cb) {
1367                /* send buffer headers to videocore */
1368                hdr_count = 1;
1369                list_for_each_safe(buf_head, q, &port->buffers) {
1370                        struct mmal_buffer *mmalbuf;
1371
1372                        mmalbuf = list_entry(buf_head, struct mmal_buffer,
1373                                             list);
1374                        ret = buffer_from_host(instance, port, mmalbuf);
1375                        if (ret)
1376                                goto done;
1377
1378                        list_del(buf_head);
1379                        hdr_count++;
1380                        if (hdr_count > port->current_buffer.num)
1381                                break;
1382                }
1383        }
1384
1385        ret = port_info_get(instance, port);
1386
1387done:
1388        return ret;
1389}
1390
1391/* ------------------------------------------------------------------
1392 * Exported API
1393 *------------------------------------------------------------------
1394 */
1395
1396int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1397                               struct vchiq_mmal_port *port)
1398{
1399        int ret;
1400
1401        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1402                return -EINTR;
1403
1404        ret = port_info_set(instance, port);
1405        if (ret)
1406                goto release_unlock;
1407
1408        /* read what has actually been set */
1409        ret = port_info_get(instance, port);
1410
1411release_unlock:
1412        mutex_unlock(&instance->vchiq_mutex);
1413
1414        return ret;
1415}
1416EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1417
1418int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1419                                  struct vchiq_mmal_port *port,
1420                                  u32 parameter, void *value, u32 value_size)
1421{
1422        int ret;
1423
1424        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1425                return -EINTR;
1426
1427        ret = port_parameter_set(instance, port, parameter, value, value_size);
1428
1429        mutex_unlock(&instance->vchiq_mutex);
1430
1431        return ret;
1432}
1433EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1434
1435int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1436                                  struct vchiq_mmal_port *port,
1437                                  u32 parameter, void *value, u32 *value_size)
1438{
1439        int ret;
1440
1441        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1442                return -EINTR;
1443
1444        ret = port_parameter_get(instance, port, parameter, value, value_size);
1445
1446        mutex_unlock(&instance->vchiq_mutex);
1447
1448        return ret;
1449}
1450EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1451
1452/* enable a port
1453 *
1454 * enables a port and queues buffers for satisfying callbacks if we
1455 * provide a callback handler
1456 */
1457int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1458                           struct vchiq_mmal_port *port,
1459                           vchiq_mmal_buffer_cb buffer_cb)
1460{
1461        int ret;
1462
1463        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1464                return -EINTR;
1465
1466        /* already enabled - noop */
1467        if (port->enabled) {
1468                ret = 0;
1469                goto unlock;
1470        }
1471
1472        port->buffer_cb = buffer_cb;
1473
1474        ret = port_enable(instance, port);
1475
1476unlock:
1477        mutex_unlock(&instance->vchiq_mutex);
1478
1479        return ret;
1480}
1481EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1482
1483int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1484                            struct vchiq_mmal_port *port)
1485{
1486        int ret;
1487
1488        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1489                return -EINTR;
1490
1491        if (!port->enabled) {
1492                mutex_unlock(&instance->vchiq_mutex);
1493                return 0;
1494        }
1495
1496        ret = port_disable(instance, port);
1497
1498        mutex_unlock(&instance->vchiq_mutex);
1499
1500        return ret;
1501}
1502EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1503
1504/* ports will be connected in a tunneled manner so data buffers
1505 * are not handled by client.
1506 */
1507int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1508                                   struct vchiq_mmal_port *src,
1509                                   struct vchiq_mmal_port *dst)
1510{
1511        int ret;
1512
1513        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1514                return -EINTR;
1515
1516        /* disconnect ports if connected */
1517        if (src->connected) {
1518                ret = port_disable(instance, src);
1519                if (ret) {
1520                        pr_err("failed disabling src port(%d)\n", ret);
1521                        goto release_unlock;
1522                }
1523
1524                /* do not need to disable the destination port as they
1525                 * are connected and it is done automatically
1526                 */
1527
1528                ret = port_action_handle(instance, src,
1529                                         MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1530                                         src->connected->component->handle,
1531                                         src->connected->handle);
1532                if (ret < 0) {
1533                        pr_err("failed disconnecting src port\n");
1534                        goto release_unlock;
1535                }
1536                src->connected->enabled = 0;
1537                src->connected = NULL;
1538        }
1539
1540        if (!dst) {
1541                /* do not make new connection */
1542                ret = 0;
1543                pr_debug("not making new connection\n");
1544                goto release_unlock;
1545        }
1546
1547        /* copy src port format to dst */
1548        dst->format.encoding = src->format.encoding;
1549        dst->es.video.width = src->es.video.width;
1550        dst->es.video.height = src->es.video.height;
1551        dst->es.video.crop.x = src->es.video.crop.x;
1552        dst->es.video.crop.y = src->es.video.crop.y;
1553        dst->es.video.crop.width = src->es.video.crop.width;
1554        dst->es.video.crop.height = src->es.video.crop.height;
1555        dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1556        dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1557
1558        /* set new format */
1559        ret = port_info_set(instance, dst);
1560        if (ret) {
1561                pr_debug("setting port info failed\n");
1562                goto release_unlock;
1563        }
1564
1565        /* read what has actually been set */
1566        ret = port_info_get(instance, dst);
1567        if (ret) {
1568                pr_debug("read back port info failed\n");
1569                goto release_unlock;
1570        }
1571
1572        /* connect two ports together */
1573        ret = port_action_handle(instance, src,
1574                                 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1575                                 dst->component->handle, dst->handle);
1576        if (ret < 0) {
1577                pr_debug("connecting port %d:%d to %d:%d failed\n",
1578                         src->component->handle, src->handle,
1579                         dst->component->handle, dst->handle);
1580                goto release_unlock;
1581        }
1582        src->connected = dst;
1583
1584release_unlock:
1585
1586        mutex_unlock(&instance->vchiq_mutex);
1587
1588        return ret;
1589}
1590EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1591
1592int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1593                             struct vchiq_mmal_port *port,
1594                             struct mmal_buffer *buffer)
1595{
1596        unsigned long flags = 0;
1597        int ret;
1598
1599        ret = buffer_from_host(instance, port, buffer);
1600        if (ret == -EINVAL) {
1601                /* Port is disabled. Queue for when it is enabled. */
1602                spin_lock_irqsave(&port->slock, flags);
1603                list_add_tail(&buffer->list, &port->buffers);
1604                spin_unlock_irqrestore(&port->slock, flags);
1605        }
1606
1607        return 0;
1608}
1609EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1610
1611int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1612                          struct mmal_buffer *buf)
1613{
1614        struct mmal_msg_context *msg_context = get_msg_context(instance);
1615
1616        if (IS_ERR(msg_context))
1617                return (PTR_ERR(msg_context));
1618
1619        buf->msg_context = msg_context;
1620        return 0;
1621}
1622EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1623
1624int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1625{
1626        struct mmal_msg_context *msg_context = buf->msg_context;
1627
1628        if (msg_context)
1629                release_msg_context(msg_context);
1630        buf->msg_context = NULL;
1631
1632        return 0;
1633}
1634EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1635
1636/* Initialise a mmal component and its ports
1637 *
1638 */
1639int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1640                              const char *name,
1641                              struct vchiq_mmal_component **component_out)
1642{
1643        int ret;
1644        int idx;                /* port index */
1645        struct vchiq_mmal_component *component = NULL;
1646
1647        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1648                return -EINTR;
1649
1650        for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1651                if (!instance->component[idx].in_use) {
1652                        component = &instance->component[idx];
1653                        component->in_use = 1;
1654                        break;
1655                }
1656        }
1657
1658        if (!component) {
1659                ret = -EINVAL;  /* todo is this correct error? */
1660                goto unlock;
1661        }
1662
1663        /* We need a handle to reference back to our component structure.
1664         * Use the array index in instance->component rather than rolling
1665         * another IDR.
1666         */
1667        component->client_component = idx;
1668
1669        ret = create_component(instance, component, name);
1670        if (ret < 0) {
1671                pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1672                       __func__, ret);
1673                goto unlock;
1674        }
1675
1676        /* ports info needs gathering */
1677        component->control.type = MMAL_PORT_TYPE_CONTROL;
1678        component->control.index = 0;
1679        component->control.component = component;
1680        spin_lock_init(&component->control.slock);
1681        INIT_LIST_HEAD(&component->control.buffers);
1682        ret = port_info_get(instance, &component->control);
1683        if (ret < 0)
1684                goto release_component;
1685
1686        for (idx = 0; idx < component->inputs; idx++) {
1687                component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1688                component->input[idx].index = idx;
1689                component->input[idx].component = component;
1690                spin_lock_init(&component->input[idx].slock);
1691                INIT_LIST_HEAD(&component->input[idx].buffers);
1692                ret = port_info_get(instance, &component->input[idx]);
1693                if (ret < 0)
1694                        goto release_component;
1695        }
1696
1697        for (idx = 0; idx < component->outputs; idx++) {
1698                component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1699                component->output[idx].index = idx;
1700                component->output[idx].component = component;
1701                spin_lock_init(&component->output[idx].slock);
1702                INIT_LIST_HEAD(&component->output[idx].buffers);
1703                ret = port_info_get(instance, &component->output[idx]);
1704                if (ret < 0)
1705                        goto release_component;
1706        }
1707
1708        for (idx = 0; idx < component->clocks; idx++) {
1709                component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1710                component->clock[idx].index = idx;
1711                component->clock[idx].component = component;
1712                spin_lock_init(&component->clock[idx].slock);
1713                INIT_LIST_HEAD(&component->clock[idx].buffers);
1714                ret = port_info_get(instance, &component->clock[idx]);
1715                if (ret < 0)
1716                        goto release_component;
1717        }
1718
1719        *component_out = component;
1720
1721        mutex_unlock(&instance->vchiq_mutex);
1722
1723        return 0;
1724
1725release_component:
1726        destroy_component(instance, component);
1727unlock:
1728        if (component)
1729                component->in_use = 0;
1730        mutex_unlock(&instance->vchiq_mutex);
1731
1732        return ret;
1733}
1734EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1735
1736/*
1737 * cause a mmal component to be destroyed
1738 */
1739int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1740                                  struct vchiq_mmal_component *component)
1741{
1742        int ret;
1743
1744        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1745                return -EINTR;
1746
1747        if (component->enabled)
1748                ret = disable_component(instance, component);
1749
1750        ret = destroy_component(instance, component);
1751
1752        component->in_use = 0;
1753
1754        mutex_unlock(&instance->vchiq_mutex);
1755
1756        return ret;
1757}
1758EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1759
1760/*
1761 * cause a mmal component to be enabled
1762 */
1763int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1764                                struct vchiq_mmal_component *component)
1765{
1766        int ret;
1767
1768        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1769                return -EINTR;
1770
1771        if (component->enabled) {
1772                mutex_unlock(&instance->vchiq_mutex);
1773                return 0;
1774        }
1775
1776        ret = enable_component(instance, component);
1777        if (ret == 0)
1778                component->enabled = true;
1779
1780        mutex_unlock(&instance->vchiq_mutex);
1781
1782        return ret;
1783}
1784EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1785
1786/*
1787 * cause a mmal component to be enabled
1788 */
1789int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1790                                 struct vchiq_mmal_component *component)
1791{
1792        int ret;
1793
1794        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1795                return -EINTR;
1796
1797        if (!component->enabled) {
1798                mutex_unlock(&instance->vchiq_mutex);
1799                return 0;
1800        }
1801
1802        ret = disable_component(instance, component);
1803        if (ret == 0)
1804                component->enabled = 0;
1805
1806        mutex_unlock(&instance->vchiq_mutex);
1807
1808        return ret;
1809}
1810EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1811
1812int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1813                       u32 *major_out, u32 *minor_out)
1814{
1815        int ret;
1816
1817        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1818                return -EINTR;
1819
1820        ret = get_version(instance, major_out, minor_out);
1821
1822        mutex_unlock(&instance->vchiq_mutex);
1823
1824        return ret;
1825}
1826EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1827
1828int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1829{
1830        int status = 0;
1831
1832        if (!instance)
1833                return -EINVAL;
1834
1835        if (mutex_lock_interruptible(&instance->vchiq_mutex))
1836                return -EINTR;
1837
1838        vchiq_use_service(instance->service_handle);
1839
1840        status = vchiq_close_service(instance->service_handle);
1841        if (status != 0)
1842                pr_err("mmal-vchiq: VCHIQ close failed\n");
1843
1844        mutex_unlock(&instance->vchiq_mutex);
1845
1846        vchiq_shutdown(instance->vchiq_instance);
1847        flush_workqueue(instance->bulk_wq);
1848        destroy_workqueue(instance->bulk_wq);
1849
1850        vfree(instance->bulk_scratch);
1851
1852        idr_destroy(&instance->context_map);
1853
1854        kfree(instance);
1855
1856        return status;
1857}
1858EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1859
1860int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1861{
1862        int status;
1863        int err = -ENODEV;
1864        struct vchiq_mmal_instance *instance;
1865        struct vchiq_instance *vchiq_instance;
1866        struct vchiq_service_params_kernel params = {
1867                .version                = VC_MMAL_VER,
1868                .version_min            = VC_MMAL_MIN_VER,
1869                .fourcc                 = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1870                .callback               = service_callback,
1871                .userdata               = NULL,
1872        };
1873
1874        /* compile time checks to ensure structure size as they are
1875         * directly (de)serialised from memory.
1876         */
1877
1878        /* ensure the header structure has packed to the correct size */
1879        BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1880
1881        /* ensure message structure does not exceed maximum length */
1882        BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1883
1884        /* mmal port struct is correct size */
1885        BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1886
1887        /* create a vchi instance */
1888        status = vchiq_initialise(&vchiq_instance);
1889        if (status) {
1890                pr_err("Failed to initialise VCHI instance (status=%d)\n",
1891                       status);
1892                return -EIO;
1893        }
1894
1895        status = vchiq_connect(vchiq_instance);
1896        if (status) {
1897                pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1898                err = -EIO;
1899                goto err_shutdown_vchiq;
1900        }
1901
1902        instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1903
1904        if (!instance) {
1905                err = -ENOMEM;
1906                goto err_shutdown_vchiq;
1907        }
1908
1909        mutex_init(&instance->vchiq_mutex);
1910
1911        instance->bulk_scratch = vmalloc(PAGE_SIZE);
1912        instance->vchiq_instance = vchiq_instance;
1913
1914        mutex_init(&instance->context_map_lock);
1915        idr_init_base(&instance->context_map, 1);
1916
1917        params.userdata = instance;
1918
1919        instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1920                                                    WQ_MEM_RECLAIM);
1921        if (!instance->bulk_wq)
1922                goto err_free;
1923
1924        status = vchiq_open_service(vchiq_instance, &params,
1925                                    &instance->service_handle);
1926        if (status) {
1927                pr_err("Failed to open VCHI service connection (status=%d)\n",
1928                       status);
1929                goto err_close_services;
1930        }
1931
1932        vchiq_release_service(instance->service_handle);
1933
1934        *out_instance = instance;
1935
1936        return 0;
1937
1938err_close_services:
1939        vchiq_close_service(instance->service_handle);
1940        destroy_workqueue(instance->bulk_wq);
1941err_free:
1942        vfree(instance->bulk_scratch);
1943        kfree(instance);
1944err_shutdown_vchiq:
1945        vchiq_shutdown(vchiq_instance);
1946        return err;
1947}
1948EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1949
1950MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1951MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1952MODULE_LICENSE("GPL");
1953