linux/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
<<
>>
Prefs
   1/**
   2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
   3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
   4 *
   5 * Redistribution and use in source and binary forms, with or without
   6 * modification, are permitted provided that the following conditions
   7 * are met:
   8 * 1. Redistributions of source code must retain the above copyright
   9 *    notice, this list of conditions, and the following disclaimer,
  10 *    without modification.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. The names of the above-listed copyright holders may not be used
  15 *    to endorse or promote products derived from this software without
  16 *    specific prior written permission.
  17 *
  18 * ALTERNATIVELY, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2, as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/module.h>
  37#include <linux/sched/signal.h>
  38#include <linux/types.h>
  39#include <linux/errno.h>
  40#include <linux/cdev.h>
  41#include <linux/fs.h>
  42#include <linux/device.h>
  43#include <linux/mm.h>
  44#include <linux/highmem.h>
  45#include <linux/pagemap.h>
  46#include <linux/bug.h>
  47#include <linux/semaphore.h>
  48#include <linux/list.h>
  49#include <linux/of.h>
  50#include <linux/platform_device.h>
  51#include <linux/compat.h>
  52#include <soc/bcm2835/raspberrypi-firmware.h>
  53
  54#include "vchiq_core.h"
  55#include "vchiq_ioctl.h"
  56#include "vchiq_arm.h"
  57#include "vchiq_debugfs.h"
  58#include "vchiq_killable.h"
  59
  60#define DEVICE_NAME "vchiq"
  61
  62/* Override the default prefix, which would be vchiq_arm (from the filename) */
  63#undef MODULE_PARAM_PREFIX
  64#define MODULE_PARAM_PREFIX DEVICE_NAME "."
  65
  66#define VCHIQ_MINOR 0
  67
  68/* Some per-instance constants */
  69#define MAX_COMPLETIONS 128
  70#define MAX_SERVICES 64
  71#define MAX_ELEMENTS 8
  72#define MSG_QUEUE_SIZE 128
  73
  74#define KEEPALIVE_VER 1
  75#define KEEPALIVE_VER_MIN KEEPALIVE_VER
  76
  77/* Run time control of log level, based on KERN_XXX level. */
  78int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
  79int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
  80
  81#define SUSPEND_TIMER_TIMEOUT_MS 100
  82#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
  83
  84#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
  85static const char *const suspend_state_names[] = {
  86        "VC_SUSPEND_FORCE_CANCELED",
  87        "VC_SUSPEND_REJECTED",
  88        "VC_SUSPEND_FAILED",
  89        "VC_SUSPEND_IDLE",
  90        "VC_SUSPEND_REQUESTED",
  91        "VC_SUSPEND_IN_PROGRESS",
  92        "VC_SUSPEND_SUSPENDED"
  93};
  94#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
  95static const char *const resume_state_names[] = {
  96        "VC_RESUME_FAILED",
  97        "VC_RESUME_IDLE",
  98        "VC_RESUME_REQUESTED",
  99        "VC_RESUME_IN_PROGRESS",
 100        "VC_RESUME_RESUMED"
 101};
 102/* The number of times we allow force suspend to timeout before actually
 103** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
 104** correctly - we don't want to prevent ARM suspend indefinitely in this case.
 105*/
 106#define FORCE_SUSPEND_FAIL_MAX 8
 107
 108/* The time in ms allowed for videocore to go idle when force suspend has been
 109 * requested */
 110#define FORCE_SUSPEND_TIMEOUT_MS 200
 111
 112static void suspend_timer_callback(struct timer_list *t);
 113
 114typedef struct user_service_struct {
 115        VCHIQ_SERVICE_T *service;
 116        void *userdata;
 117        VCHIQ_INSTANCE_T instance;
 118        char is_vchi;
 119        char dequeue_pending;
 120        char close_pending;
 121        int message_available_pos;
 122        int msg_insert;
 123        int msg_remove;
 124        struct semaphore insert_event;
 125        struct semaphore remove_event;
 126        struct semaphore close_event;
 127        VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
 128} USER_SERVICE_T;
 129
 130struct bulk_waiter_node {
 131        struct bulk_waiter bulk_waiter;
 132        int pid;
 133        struct list_head list;
 134};
 135
 136struct vchiq_instance_struct {
 137        VCHIQ_STATE_T *state;
 138        VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
 139        int completion_insert;
 140        int completion_remove;
 141        struct semaphore insert_event;
 142        struct semaphore remove_event;
 143        struct mutex completion_mutex;
 144
 145        int connected;
 146        int closing;
 147        int pid;
 148        int mark;
 149        int use_close_delivered;
 150        int trace;
 151
 152        struct list_head bulk_waiter_list;
 153        struct mutex bulk_waiter_list_mutex;
 154
 155        VCHIQ_DEBUGFS_NODE_T debugfs_node;
 156};
 157
 158typedef struct dump_context_struct {
 159        char __user *buf;
 160        size_t actual;
 161        size_t space;
 162        loff_t offset;
 163} DUMP_CONTEXT_T;
 164
 165static struct cdev    vchiq_cdev;
 166static dev_t          vchiq_devid;
 167static VCHIQ_STATE_T g_state;
 168static struct class  *vchiq_class;
 169static struct device *vchiq_dev;
 170static DEFINE_SPINLOCK(msg_queue_spinlock);
 171
 172static const char *const ioctl_names[] = {
 173        "CONNECT",
 174        "SHUTDOWN",
 175        "CREATE_SERVICE",
 176        "REMOVE_SERVICE",
 177        "QUEUE_MESSAGE",
 178        "QUEUE_BULK_TRANSMIT",
 179        "QUEUE_BULK_RECEIVE",
 180        "AWAIT_COMPLETION",
 181        "DEQUEUE_MESSAGE",
 182        "GET_CLIENT_ID",
 183        "GET_CONFIG",
 184        "CLOSE_SERVICE",
 185        "USE_SERVICE",
 186        "RELEASE_SERVICE",
 187        "SET_SERVICE_OPTION",
 188        "DUMP_PHYS_MEM",
 189        "LIB_VERSION",
 190        "CLOSE_DELIVERED"
 191};
 192
 193vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
 194                    (VCHIQ_IOC_MAX + 1));
 195
 196/****************************************************************************
 197*
 198*   add_completion
 199*
 200***************************************************************************/
 201
 202static VCHIQ_STATUS_T
 203add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
 204        VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
 205        void *bulk_userdata)
 206{
 207        VCHIQ_COMPLETION_DATA_T *completion;
 208        int insert;
 209
 210        DEBUG_INITIALISE(g_state.local)
 211
 212        insert = instance->completion_insert;
 213        while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
 214                /* Out of space - wait for the client */
 215                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 216                vchiq_log_trace(vchiq_arm_log_level,
 217                        "add_completion - completion queue full");
 218                DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
 219                if (down_interruptible(&instance->remove_event) != 0) {
 220                        vchiq_log_info(vchiq_arm_log_level,
 221                                "service_callback interrupted");
 222                        return VCHIQ_RETRY;
 223                } else if (instance->closing) {
 224                        vchiq_log_info(vchiq_arm_log_level,
 225                                "service_callback closing");
 226                        return VCHIQ_SUCCESS;
 227                }
 228                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 229        }
 230
 231        completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
 232
 233        completion->header = header;
 234        completion->reason = reason;
 235        /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
 236        completion->service_userdata = user_service->service;
 237        completion->bulk_userdata = bulk_userdata;
 238
 239        if (reason == VCHIQ_SERVICE_CLOSED) {
 240                /* Take an extra reference, to be held until
 241                   this CLOSED notification is delivered. */
 242                lock_service(user_service->service);
 243                if (instance->use_close_delivered)
 244                        user_service->close_pending = 1;
 245        }
 246
 247        /* A write barrier is needed here to ensure that the entire completion
 248                record is written out before the insert point. */
 249        wmb();
 250
 251        if (reason == VCHIQ_MESSAGE_AVAILABLE)
 252                user_service->message_available_pos = insert;
 253
 254        insert++;
 255        instance->completion_insert = insert;
 256
 257        up(&instance->insert_event);
 258
 259        return VCHIQ_SUCCESS;
 260}
 261
 262/****************************************************************************
 263*
 264*   service_callback
 265*
 266***************************************************************************/
 267
 268static VCHIQ_STATUS_T
 269service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
 270        VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
 271{
 272        /* How do we ensure the callback goes to the right client?
 273        ** The service_user data points to a USER_SERVICE_T record containing
 274        ** the original callback and the user state structure, which contains a
 275        ** circular buffer for completion records.
 276        */
 277        USER_SERVICE_T *user_service;
 278        VCHIQ_SERVICE_T *service;
 279        VCHIQ_INSTANCE_T instance;
 280        bool skip_completion = false;
 281
 282        DEBUG_INITIALISE(g_state.local)
 283
 284        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 285
 286        service = handle_to_service(handle);
 287        BUG_ON(!service);
 288        user_service = (USER_SERVICE_T *)service->base.userdata;
 289        instance = user_service->instance;
 290
 291        if (!instance || instance->closing)
 292                return VCHIQ_SUCCESS;
 293
 294        vchiq_log_trace(vchiq_arm_log_level,
 295                "service_callback - service %lx(%d,%p), reason %d, header %lx, "
 296                "instance %lx, bulk_userdata %lx",
 297                (unsigned long)user_service,
 298                service->localport, user_service->userdata,
 299                reason, (unsigned long)header,
 300                (unsigned long)instance, (unsigned long)bulk_userdata);
 301
 302        if (header && user_service->is_vchi) {
 303                spin_lock(&msg_queue_spinlock);
 304                while (user_service->msg_insert ==
 305                        (user_service->msg_remove + MSG_QUEUE_SIZE)) {
 306                        spin_unlock(&msg_queue_spinlock);
 307                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 308                        DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
 309                        vchiq_log_trace(vchiq_arm_log_level,
 310                                "service_callback - msg queue full");
 311                        /* If there is no MESSAGE_AVAILABLE in the completion
 312                        ** queue, add one
 313                        */
 314                        if ((user_service->message_available_pos -
 315                                instance->completion_remove) < 0) {
 316                                VCHIQ_STATUS_T status;
 317
 318                                vchiq_log_info(vchiq_arm_log_level,
 319                                        "Inserting extra MESSAGE_AVAILABLE");
 320                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 321                                status = add_completion(instance, reason,
 322                                        NULL, user_service, bulk_userdata);
 323                                if (status != VCHIQ_SUCCESS) {
 324                                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 325                                        return status;
 326                                }
 327                        }
 328
 329                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 330                        if (down_interruptible(&user_service->remove_event)
 331                                != 0) {
 332                                vchiq_log_info(vchiq_arm_log_level,
 333                                        "service_callback interrupted");
 334                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 335                                return VCHIQ_RETRY;
 336                        } else if (instance->closing) {
 337                                vchiq_log_info(vchiq_arm_log_level,
 338                                        "service_callback closing");
 339                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 340                                return VCHIQ_ERROR;
 341                        }
 342                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 343                        spin_lock(&msg_queue_spinlock);
 344                }
 345
 346                user_service->msg_queue[user_service->msg_insert &
 347                        (MSG_QUEUE_SIZE - 1)] = header;
 348                user_service->msg_insert++;
 349
 350                /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
 351                ** there is a MESSAGE_AVAILABLE in the completion queue then
 352                ** bypass the completion queue.
 353                */
 354                if (((user_service->message_available_pos -
 355                        instance->completion_remove) >= 0) ||
 356                        user_service->dequeue_pending) {
 357                        user_service->dequeue_pending = 0;
 358                        skip_completion = true;
 359                }
 360
 361                spin_unlock(&msg_queue_spinlock);
 362                up(&user_service->insert_event);
 363
 364                header = NULL;
 365        }
 366        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 367
 368        if (skip_completion)
 369                return VCHIQ_SUCCESS;
 370
 371        return add_completion(instance, reason, header, user_service,
 372                bulk_userdata);
 373}
 374
 375/****************************************************************************
 376*
 377*   user_service_free
 378*
 379***************************************************************************/
 380static void
 381user_service_free(void *userdata)
 382{
 383        kfree(userdata);
 384}
 385
 386/****************************************************************************
 387*
 388*   close_delivered
 389*
 390***************************************************************************/
 391static void close_delivered(USER_SERVICE_T *user_service)
 392{
 393        vchiq_log_info(vchiq_arm_log_level,
 394                "close_delivered(handle=%x)",
 395                user_service->service->handle);
 396
 397        if (user_service->close_pending) {
 398                /* Allow the underlying service to be culled */
 399                unlock_service(user_service->service);
 400
 401                /* Wake the user-thread blocked in close_ or remove_service */
 402                up(&user_service->close_event);
 403
 404                user_service->close_pending = 0;
 405        }
 406}
 407
 408struct vchiq_io_copy_callback_context {
 409        struct vchiq_element *current_element;
 410        size_t current_element_offset;
 411        unsigned long elements_to_go;
 412        size_t current_offset;
 413};
 414
 415static ssize_t
 416vchiq_ioc_copy_element_data(
 417        void *context,
 418        void *dest,
 419        size_t offset,
 420        size_t maxsize)
 421{
 422        long res;
 423        size_t bytes_this_round;
 424        struct vchiq_io_copy_callback_context *copy_context =
 425                (struct vchiq_io_copy_callback_context *)context;
 426
 427        if (offset != copy_context->current_offset)
 428                return 0;
 429
 430        if (!copy_context->elements_to_go)
 431                return 0;
 432
 433        /*
 434         * Complex logic here to handle the case of 0 size elements
 435         * in the middle of the array of elements.
 436         *
 437         * Need to skip over these 0 size elements.
 438         */
 439        while (1) {
 440                bytes_this_round = min(copy_context->current_element->size -
 441                                       copy_context->current_element_offset,
 442                                       maxsize);
 443
 444                if (bytes_this_round)
 445                        break;
 446
 447                copy_context->elements_to_go--;
 448                copy_context->current_element++;
 449                copy_context->current_element_offset = 0;
 450
 451                if (!copy_context->elements_to_go)
 452                        return 0;
 453        }
 454
 455        res = copy_from_user(dest,
 456                             copy_context->current_element->data +
 457                             copy_context->current_element_offset,
 458                             bytes_this_round);
 459
 460        if (res != 0)
 461                return -EFAULT;
 462
 463        copy_context->current_element_offset += bytes_this_round;
 464        copy_context->current_offset += bytes_this_round;
 465
 466        /*
 467         * Check if done with current element, and if so advance to the next.
 468         */
 469        if (copy_context->current_element_offset ==
 470            copy_context->current_element->size) {
 471                copy_context->elements_to_go--;
 472                copy_context->current_element++;
 473                copy_context->current_element_offset = 0;
 474        }
 475
 476        return bytes_this_round;
 477}
 478
 479/**************************************************************************
 480 *
 481 *   vchiq_ioc_queue_message
 482 *
 483 **************************************************************************/
 484static VCHIQ_STATUS_T
 485vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
 486                        struct vchiq_element *elements,
 487                        unsigned long count)
 488{
 489        struct vchiq_io_copy_callback_context context;
 490        unsigned long i;
 491        size_t total_size = 0;
 492
 493        context.current_element = elements;
 494        context.current_element_offset = 0;
 495        context.elements_to_go = count;
 496        context.current_offset = 0;
 497
 498        for (i = 0; i < count; i++) {
 499                if (!elements[i].data && elements[i].size != 0)
 500                        return -EFAULT;
 501
 502                total_size += elements[i].size;
 503        }
 504
 505        return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
 506                                   &context, total_size);
 507}
 508
 509/****************************************************************************
 510*
 511*   vchiq_ioctl
 512*
 513***************************************************************************/
 514static long
 515vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 516{
 517        VCHIQ_INSTANCE_T instance = file->private_data;
 518        VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 519        VCHIQ_SERVICE_T *service = NULL;
 520        long ret = 0;
 521        int i, rc;
 522
 523        DEBUG_INITIALISE(g_state.local)
 524
 525        vchiq_log_trace(vchiq_arm_log_level,
 526                "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
 527                instance,
 528                ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
 529                (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
 530                ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
 531
 532        switch (cmd) {
 533        case VCHIQ_IOC_SHUTDOWN:
 534                if (!instance->connected)
 535                        break;
 536
 537                /* Remove all services */
 538                i = 0;
 539                while ((service = next_service_by_instance(instance->state,
 540                        instance, &i)) != NULL) {
 541                        status = vchiq_remove_service(service->handle);
 542                        unlock_service(service);
 543                        if (status != VCHIQ_SUCCESS)
 544                                break;
 545                }
 546                service = NULL;
 547
 548                if (status == VCHIQ_SUCCESS) {
 549                        /* Wake the completion thread and ask it to exit */
 550                        instance->closing = 1;
 551                        up(&instance->insert_event);
 552                }
 553
 554                break;
 555
 556        case VCHIQ_IOC_CONNECT:
 557                if (instance->connected) {
 558                        ret = -EINVAL;
 559                        break;
 560                }
 561                rc = mutex_lock_killable(&instance->state->mutex);
 562                if (rc != 0) {
 563                        vchiq_log_error(vchiq_arm_log_level,
 564                                "vchiq: connect: could not lock mutex for "
 565                                "state %d: %d",
 566                                instance->state->id, rc);
 567                        ret = -EINTR;
 568                        break;
 569                }
 570                status = vchiq_connect_internal(instance->state, instance);
 571                mutex_unlock(&instance->state->mutex);
 572
 573                if (status == VCHIQ_SUCCESS)
 574                        instance->connected = 1;
 575                else
 576                        vchiq_log_error(vchiq_arm_log_level,
 577                                "vchiq: could not connect: %d", status);
 578                break;
 579
 580        case VCHIQ_IOC_CREATE_SERVICE: {
 581                VCHIQ_CREATE_SERVICE_T args;
 582                USER_SERVICE_T *user_service = NULL;
 583                void *userdata;
 584                int srvstate;
 585
 586                if (copy_from_user
 587                         (&args, (const void __user *)arg,
 588                          sizeof(args)) != 0) {
 589                        ret = -EFAULT;
 590                        break;
 591                }
 592
 593                user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
 594                if (!user_service) {
 595                        ret = -ENOMEM;
 596                        break;
 597                }
 598
 599                if (args.is_open) {
 600                        if (!instance->connected) {
 601                                ret = -ENOTCONN;
 602                                kfree(user_service);
 603                                break;
 604                        }
 605                        srvstate = VCHIQ_SRVSTATE_OPENING;
 606                } else {
 607                        srvstate =
 608                                 instance->connected ?
 609                                 VCHIQ_SRVSTATE_LISTENING :
 610                                 VCHIQ_SRVSTATE_HIDDEN;
 611                }
 612
 613                userdata = args.params.userdata;
 614                args.params.callback = service_callback;
 615                args.params.userdata = user_service;
 616                service = vchiq_add_service_internal(
 617                                instance->state,
 618                                &args.params, srvstate,
 619                                instance, user_service_free);
 620
 621                if (service != NULL) {
 622                        user_service->service = service;
 623                        user_service->userdata = userdata;
 624                        user_service->instance = instance;
 625                        user_service->is_vchi = (args.is_vchi != 0);
 626                        user_service->dequeue_pending = 0;
 627                        user_service->close_pending = 0;
 628                        user_service->message_available_pos =
 629                                instance->completion_remove - 1;
 630                        user_service->msg_insert = 0;
 631                        user_service->msg_remove = 0;
 632                        sema_init(&user_service->insert_event, 0);
 633                        sema_init(&user_service->remove_event, 0);
 634                        sema_init(&user_service->close_event, 0);
 635
 636                        if (args.is_open) {
 637                                status = vchiq_open_service_internal
 638                                        (service, instance->pid);
 639                                if (status != VCHIQ_SUCCESS) {
 640                                        vchiq_remove_service(service->handle);
 641                                        service = NULL;
 642                                        ret = (status == VCHIQ_RETRY) ?
 643                                                -EINTR : -EIO;
 644                                        break;
 645                                }
 646                        }
 647
 648                        if (copy_to_user((void __user *)
 649                                &(((VCHIQ_CREATE_SERVICE_T __user *)
 650                                        arg)->handle),
 651                                (const void *)&service->handle,
 652                                sizeof(service->handle)) != 0) {
 653                                ret = -EFAULT;
 654                                vchiq_remove_service(service->handle);
 655                        }
 656
 657                        service = NULL;
 658                } else {
 659                        ret = -EEXIST;
 660                        kfree(user_service);
 661                }
 662        } break;
 663
 664        case VCHIQ_IOC_CLOSE_SERVICE: {
 665                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
 666
 667                service = find_service_for_instance(instance, handle);
 668                if (service != NULL) {
 669                        USER_SERVICE_T *user_service =
 670                                (USER_SERVICE_T *)service->base.userdata;
 671                        /* close_pending is false on first entry, and when the
 672                           wait in vchiq_close_service has been interrupted. */
 673                        if (!user_service->close_pending) {
 674                                status = vchiq_close_service(service->handle);
 675                                if (status != VCHIQ_SUCCESS)
 676                                        break;
 677                        }
 678
 679                        /* close_pending is true once the underlying service
 680                           has been closed until the client library calls the
 681                           CLOSE_DELIVERED ioctl, signalling close_event. */
 682                        if (user_service->close_pending &&
 683                                down_interruptible(&user_service->close_event))
 684                                status = VCHIQ_RETRY;
 685                }
 686                else
 687                        ret = -EINVAL;
 688        } break;
 689
 690        case VCHIQ_IOC_REMOVE_SERVICE: {
 691                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
 692
 693                service = find_service_for_instance(instance, handle);
 694                if (service != NULL) {
 695                        USER_SERVICE_T *user_service =
 696                                (USER_SERVICE_T *)service->base.userdata;
 697                        /* close_pending is false on first entry, and when the
 698                           wait in vchiq_close_service has been interrupted. */
 699                        if (!user_service->close_pending) {
 700                                status = vchiq_remove_service(service->handle);
 701                                if (status != VCHIQ_SUCCESS)
 702                                        break;
 703                        }
 704
 705                        /* close_pending is true once the underlying service
 706                           has been closed until the client library calls the
 707                           CLOSE_DELIVERED ioctl, signalling close_event. */
 708                        if (user_service->close_pending &&
 709                                down_interruptible(&user_service->close_event))
 710                                status = VCHIQ_RETRY;
 711                }
 712                else
 713                        ret = -EINVAL;
 714        } break;
 715
 716        case VCHIQ_IOC_USE_SERVICE:
 717        case VCHIQ_IOC_RELEASE_SERVICE: {
 718                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
 719
 720                service = find_service_for_instance(instance, handle);
 721                if (service != NULL) {
 722                        status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
 723                                vchiq_use_service_internal(service) :
 724                                vchiq_release_service_internal(service);
 725                        if (status != VCHIQ_SUCCESS) {
 726                                vchiq_log_error(vchiq_susp_log_level,
 727                                        "%s: cmd %s returned error %d for "
 728                                        "service %c%c%c%c:%03d",
 729                                        __func__,
 730                                        (cmd == VCHIQ_IOC_USE_SERVICE) ?
 731                                                "VCHIQ_IOC_USE_SERVICE" :
 732                                                "VCHIQ_IOC_RELEASE_SERVICE",
 733                                        status,
 734                                        VCHIQ_FOURCC_AS_4CHARS(
 735                                                service->base.fourcc),
 736                                        service->client_id);
 737                                ret = -EINVAL;
 738                        }
 739                } else
 740                        ret = -EINVAL;
 741        } break;
 742
 743        case VCHIQ_IOC_QUEUE_MESSAGE: {
 744                VCHIQ_QUEUE_MESSAGE_T args;
 745
 746                if (copy_from_user
 747                         (&args, (const void __user *)arg,
 748                          sizeof(args)) != 0) {
 749                        ret = -EFAULT;
 750                        break;
 751                }
 752
 753                service = find_service_for_instance(instance, args.handle);
 754
 755                if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
 756                        /* Copy elements into kernel space */
 757                        struct vchiq_element elements[MAX_ELEMENTS];
 758
 759                        if (copy_from_user(elements, args.elements,
 760                                args.count * sizeof(struct vchiq_element)) == 0)
 761                                status = vchiq_ioc_queue_message
 762                                        (args.handle,
 763                                        elements, args.count);
 764                        else
 765                                ret = -EFAULT;
 766                } else {
 767                        ret = -EINVAL;
 768                }
 769        } break;
 770
 771        case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
 772        case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
 773                VCHIQ_QUEUE_BULK_TRANSFER_T args;
 774                struct bulk_waiter_node *waiter = NULL;
 775
 776                VCHIQ_BULK_DIR_T dir =
 777                        (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
 778                        VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
 779
 780                if (copy_from_user
 781                        (&args, (const void __user *)arg,
 782                        sizeof(args)) != 0) {
 783                        ret = -EFAULT;
 784                        break;
 785                }
 786
 787                service = find_service_for_instance(instance, args.handle);
 788                if (!service) {
 789                        ret = -EINVAL;
 790                        break;
 791                }
 792
 793                if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
 794                        waiter = kzalloc(sizeof(struct bulk_waiter_node),
 795                                GFP_KERNEL);
 796                        if (!waiter) {
 797                                ret = -ENOMEM;
 798                                break;
 799                        }
 800                        args.userdata = &waiter->bulk_waiter;
 801                } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
 802                        struct list_head *pos;
 803
 804                        mutex_lock(&instance->bulk_waiter_list_mutex);
 805                        list_for_each(pos, &instance->bulk_waiter_list) {
 806                                if (list_entry(pos, struct bulk_waiter_node,
 807                                        list)->pid == current->pid) {
 808                                        waiter = list_entry(pos,
 809                                                struct bulk_waiter_node,
 810                                                list);
 811                                        list_del(pos);
 812                                        break;
 813                                }
 814
 815                        }
 816                        mutex_unlock(&instance->bulk_waiter_list_mutex);
 817                        if (!waiter) {
 818                                vchiq_log_error(vchiq_arm_log_level,
 819                                        "no bulk_waiter found for pid %d",
 820                                        current->pid);
 821                                ret = -ESRCH;
 822                                break;
 823                        }
 824                        vchiq_log_info(vchiq_arm_log_level,
 825                                "found bulk_waiter %pK for pid %d", waiter,
 826                                current->pid);
 827                        args.userdata = &waiter->bulk_waiter;
 828                }
 829                status = vchiq_bulk_transfer
 830                        (args.handle,
 831                         VCHI_MEM_HANDLE_INVALID,
 832                         args.data, args.size,
 833                         args.userdata, args.mode,
 834                         dir);
 835                if (!waiter)
 836                        break;
 837                if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
 838                        !waiter->bulk_waiter.bulk) {
 839                        if (waiter->bulk_waiter.bulk) {
 840                                /* Cancel the signal when the transfer
 841                                ** completes. */
 842                                spin_lock(&bulk_waiter_spinlock);
 843                                waiter->bulk_waiter.bulk->userdata = NULL;
 844                                spin_unlock(&bulk_waiter_spinlock);
 845                        }
 846                        kfree(waiter);
 847                } else {
 848                        const VCHIQ_BULK_MODE_T mode_waiting =
 849                                VCHIQ_BULK_MODE_WAITING;
 850                        waiter->pid = current->pid;
 851                        mutex_lock(&instance->bulk_waiter_list_mutex);
 852                        list_add(&waiter->list, &instance->bulk_waiter_list);
 853                        mutex_unlock(&instance->bulk_waiter_list_mutex);
 854                        vchiq_log_info(vchiq_arm_log_level,
 855                                "saved bulk_waiter %pK for pid %d",
 856                                waiter, current->pid);
 857
 858                        if (copy_to_user((void __user *)
 859                                &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
 860                                        arg)->mode),
 861                                (const void *)&mode_waiting,
 862                                sizeof(mode_waiting)) != 0)
 863                                ret = -EFAULT;
 864                }
 865        } break;
 866
 867        case VCHIQ_IOC_AWAIT_COMPLETION: {
 868                VCHIQ_AWAIT_COMPLETION_T args;
 869
 870                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
 871                if (!instance->connected) {
 872                        ret = -ENOTCONN;
 873                        break;
 874                }
 875
 876                if (copy_from_user(&args, (const void __user *)arg,
 877                        sizeof(args)) != 0) {
 878                        ret = -EFAULT;
 879                        break;
 880                }
 881
 882                mutex_lock(&instance->completion_mutex);
 883
 884                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
 885                while ((instance->completion_remove ==
 886                        instance->completion_insert)
 887                        && !instance->closing) {
 888                        int rc;
 889
 890                        DEBUG_TRACE(AWAIT_COMPLETION_LINE);
 891                        mutex_unlock(&instance->completion_mutex);
 892                        rc = down_interruptible(&instance->insert_event);
 893                        mutex_lock(&instance->completion_mutex);
 894                        if (rc != 0) {
 895                                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
 896                                vchiq_log_info(vchiq_arm_log_level,
 897                                        "AWAIT_COMPLETION interrupted");
 898                                ret = -EINTR;
 899                                break;
 900                        }
 901                }
 902                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
 903
 904                if (ret == 0) {
 905                        int msgbufcount = args.msgbufcount;
 906                        int remove = instance->completion_remove;
 907
 908                        for (ret = 0; ret < args.count; ret++) {
 909                                VCHIQ_COMPLETION_DATA_T *completion;
 910                                VCHIQ_SERVICE_T *service;
 911                                USER_SERVICE_T *user_service;
 912                                VCHIQ_HEADER_T *header;
 913
 914                                if (remove == instance->completion_insert)
 915                                        break;
 916
 917                                completion = &instance->completions[
 918                                        remove & (MAX_COMPLETIONS - 1)];
 919
 920                                /*
 921                                 * A read memory barrier is needed to stop
 922                                 * prefetch of a stale completion record
 923                                 */
 924                                rmb();
 925
 926                                service = completion->service_userdata;
 927                                user_service = service->base.userdata;
 928                                completion->service_userdata =
 929                                        user_service->userdata;
 930
 931                                header = completion->header;
 932                                if (header) {
 933                                        void __user *msgbuf;
 934                                        int msglen;
 935
 936                                        msglen = header->size +
 937                                                sizeof(VCHIQ_HEADER_T);
 938                                        /* This must be a VCHIQ-style service */
 939                                        if (args.msgbufsize < msglen) {
 940                                                vchiq_log_error(
 941                                                        vchiq_arm_log_level,
 942                                                        "header %pK: msgbufsize %x < msglen %x",
 943                                                        header, args.msgbufsize,
 944                                                        msglen);
 945                                                WARN(1, "invalid message "
 946                                                        "size\n");
 947                                                if (ret == 0)
 948                                                        ret = -EMSGSIZE;
 949                                                break;
 950                                        }
 951                                        if (msgbufcount <= 0)
 952                                                /* Stall here for lack of a
 953                                                ** buffer for the message. */
 954                                                break;
 955                                        /* Get the pointer from user space */
 956                                        msgbufcount--;
 957                                        if (copy_from_user(&msgbuf,
 958                                                (const void __user *)
 959                                                &args.msgbufs[msgbufcount],
 960                                                sizeof(msgbuf)) != 0) {
 961                                                if (ret == 0)
 962                                                        ret = -EFAULT;
 963                                                break;
 964                                        }
 965
 966                                        /* Copy the message to user space */
 967                                        if (copy_to_user(msgbuf, header,
 968                                                msglen) != 0) {
 969                                                if (ret == 0)
 970                                                        ret = -EFAULT;
 971                                                break;
 972                                        }
 973
 974                                        /* Now it has been copied, the message
 975                                        ** can be released. */
 976                                        vchiq_release_message(service->handle,
 977                                                header);
 978
 979                                        /* The completion must point to the
 980                                        ** msgbuf. */
 981                                        completion->header = msgbuf;
 982                                }
 983
 984                                if ((completion->reason ==
 985                                        VCHIQ_SERVICE_CLOSED) &&
 986                                        !instance->use_close_delivered)
 987                                        unlock_service(service);
 988
 989                                if (copy_to_user((void __user *)(
 990                                        (size_t)args.buf +
 991                                        ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
 992                                        completion,
 993                                        sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
 994                                                if (ret == 0)
 995                                                        ret = -EFAULT;
 996                                        break;
 997                                }
 998
 999                                /*
1000                                 * Ensure that the above copy has completed
1001                                 * before advancing the remove pointer.
1002                                 */
1003                                mb();
1004                                remove++;
1005                                instance->completion_remove = remove;
1006                        }
1007
1008                        if (msgbufcount != args.msgbufcount) {
1009                                if (copy_to_user((void __user *)
1010                                        &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
1011                                                msgbufcount,
1012                                        &msgbufcount,
1013                                        sizeof(msgbufcount)) != 0) {
1014                                        ret = -EFAULT;
1015                                }
1016                        }
1017                }
1018
1019                if (ret != 0)
1020                        up(&instance->remove_event);
1021                mutex_unlock(&instance->completion_mutex);
1022                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1023        } break;
1024
1025        case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1026                VCHIQ_DEQUEUE_MESSAGE_T args;
1027                USER_SERVICE_T *user_service;
1028                VCHIQ_HEADER_T *header;
1029
1030                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1031                if (copy_from_user
1032                         (&args, (const void __user *)arg,
1033                          sizeof(args)) != 0) {
1034                        ret = -EFAULT;
1035                        break;
1036                }
1037                service = find_service_for_instance(instance, args.handle);
1038                if (!service) {
1039                        ret = -EINVAL;
1040                        break;
1041                }
1042                user_service = (USER_SERVICE_T *)service->base.userdata;
1043                if (user_service->is_vchi == 0) {
1044                        ret = -EINVAL;
1045                        break;
1046                }
1047
1048                spin_lock(&msg_queue_spinlock);
1049                if (user_service->msg_remove == user_service->msg_insert) {
1050                        if (!args.blocking) {
1051                                spin_unlock(&msg_queue_spinlock);
1052                                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1053                                ret = -EWOULDBLOCK;
1054                                break;
1055                        }
1056                        user_service->dequeue_pending = 1;
1057                        do {
1058                                spin_unlock(&msg_queue_spinlock);
1059                                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1060                                if (down_interruptible(
1061                                        &user_service->insert_event) != 0) {
1062                                        vchiq_log_info(vchiq_arm_log_level,
1063                                                "DEQUEUE_MESSAGE interrupted");
1064                                        ret = -EINTR;
1065                                        break;
1066                                }
1067                                spin_lock(&msg_queue_spinlock);
1068                        } while (user_service->msg_remove ==
1069                                user_service->msg_insert);
1070
1071                        if (ret)
1072                                break;
1073                }
1074
1075                BUG_ON((int)(user_service->msg_insert -
1076                        user_service->msg_remove) < 0);
1077
1078                header = user_service->msg_queue[user_service->msg_remove &
1079                        (MSG_QUEUE_SIZE - 1)];
1080                user_service->msg_remove++;
1081                spin_unlock(&msg_queue_spinlock);
1082
1083                up(&user_service->remove_event);
1084                if (header == NULL)
1085                        ret = -ENOTCONN;
1086                else if (header->size <= args.bufsize) {
1087                        /* Copy to user space if msgbuf is not NULL */
1088                        if ((args.buf == NULL) ||
1089                                (copy_to_user((void __user *)args.buf,
1090                                header->data,
1091                                header->size) == 0)) {
1092                                ret = header->size;
1093                                vchiq_release_message(
1094                                        service->handle,
1095                                        header);
1096                        } else
1097                                ret = -EFAULT;
1098                } else {
1099                        vchiq_log_error(vchiq_arm_log_level,
1100                                "header %pK: bufsize %x < size %x",
1101                                header, args.bufsize, header->size);
1102                        WARN(1, "invalid size\n");
1103                        ret = -EMSGSIZE;
1104                }
1105                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1106        } break;
1107
1108        case VCHIQ_IOC_GET_CLIENT_ID: {
1109                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1110
1111                ret = vchiq_get_client_id(handle);
1112        } break;
1113
1114        case VCHIQ_IOC_GET_CONFIG: {
1115                VCHIQ_GET_CONFIG_T args;
1116                VCHIQ_CONFIG_T config;
1117
1118                if (copy_from_user(&args, (const void __user *)arg,
1119                        sizeof(args)) != 0) {
1120                        ret = -EFAULT;
1121                        break;
1122                }
1123                if (args.config_size > sizeof(config)) {
1124                        ret = -EINVAL;
1125                        break;
1126                }
1127                status = vchiq_get_config(instance, args.config_size, &config);
1128                if (status == VCHIQ_SUCCESS) {
1129                        if (copy_to_user((void __user *)args.pconfig,
1130                                    &config, args.config_size) != 0) {
1131                                ret = -EFAULT;
1132                                break;
1133                        }
1134                }
1135        } break;
1136
1137        case VCHIQ_IOC_SET_SERVICE_OPTION: {
1138                VCHIQ_SET_SERVICE_OPTION_T args;
1139
1140                if (copy_from_user(
1141                        &args, (const void __user *)arg,
1142                        sizeof(args)) != 0) {
1143                        ret = -EFAULT;
1144                        break;
1145                }
1146
1147                service = find_service_for_instance(instance, args.handle);
1148                if (!service) {
1149                        ret = -EINVAL;
1150                        break;
1151                }
1152
1153                status = vchiq_set_service_option(
1154                                args.handle, args.option, args.value);
1155        } break;
1156
1157        case VCHIQ_IOC_LIB_VERSION: {
1158                unsigned int lib_version = (unsigned int)arg;
1159
1160                if (lib_version < VCHIQ_VERSION_MIN)
1161                        ret = -EINVAL;
1162                else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1163                        instance->use_close_delivered = 1;
1164        } break;
1165
1166        case VCHIQ_IOC_CLOSE_DELIVERED: {
1167                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1168
1169                service = find_closed_service_for_instance(instance, handle);
1170                if (service != NULL) {
1171                        USER_SERVICE_T *user_service =
1172                                (USER_SERVICE_T *)service->base.userdata;
1173                        close_delivered(user_service);
1174                }
1175                else
1176                        ret = -EINVAL;
1177        } break;
1178
1179        default:
1180                ret = -ENOTTY;
1181                break;
1182        }
1183
1184        if (service)
1185                unlock_service(service);
1186
1187        if (ret == 0) {
1188                if (status == VCHIQ_ERROR)
1189                        ret = -EIO;
1190                else if (status == VCHIQ_RETRY)
1191                        ret = -EINTR;
1192        }
1193
1194        if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1195                (ret != -EWOULDBLOCK))
1196                vchiq_log_info(vchiq_arm_log_level,
1197                        "  ioctl instance %lx, cmd %s -> status %d, %ld",
1198                        (unsigned long)instance,
1199                        (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1200                                ioctl_names[_IOC_NR(cmd)] :
1201                                "<invalid>",
1202                        status, ret);
1203        else
1204                vchiq_log_trace(vchiq_arm_log_level,
1205                        "  ioctl instance %lx, cmd %s -> status %d, %ld",
1206                        (unsigned long)instance,
1207                        (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1208                                ioctl_names[_IOC_NR(cmd)] :
1209                                "<invalid>",
1210                        status, ret);
1211
1212        return ret;
1213}
1214
1215#if defined(CONFIG_COMPAT)
1216
1217struct vchiq_service_params32 {
1218        int fourcc;
1219        compat_uptr_t callback;
1220        compat_uptr_t userdata;
1221        short version; /* Increment for non-trivial changes */
1222        short version_min; /* Update for incompatible changes */
1223};
1224
1225struct vchiq_create_service32 {
1226        struct vchiq_service_params32 params;
1227        int is_open;
1228        int is_vchi;
1229        unsigned int handle; /* OUT */
1230};
1231
1232#define VCHIQ_IOC_CREATE_SERVICE32 \
1233        _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1234
1235static long
1236vchiq_compat_ioctl_create_service(
1237        struct file *file,
1238        unsigned int cmd,
1239        unsigned long arg)
1240{
1241        VCHIQ_CREATE_SERVICE_T __user *args;
1242        struct vchiq_create_service32 __user *ptrargs32 =
1243                (struct vchiq_create_service32 __user *)arg;
1244        struct vchiq_create_service32 args32;
1245        long ret;
1246
1247        args = compat_alloc_user_space(sizeof(*args));
1248        if (!args)
1249                return -EFAULT;
1250
1251        if (copy_from_user(&args32,
1252                           (struct vchiq_create_service32 __user *)arg,
1253                           sizeof(args32)))
1254                return -EFAULT;
1255
1256        if (put_user(args32.params.fourcc, &args->params.fourcc) ||
1257            put_user(compat_ptr(args32.params.callback),
1258                     &args->params.callback) ||
1259            put_user(compat_ptr(args32.params.userdata),
1260                     &args->params.userdata) ||
1261            put_user(args32.params.version, &args->params.version) ||
1262            put_user(args32.params.version_min,
1263                     &args->params.version_min) ||
1264            put_user(args32.is_open, &args->is_open) ||
1265            put_user(args32.is_vchi, &args->is_vchi) ||
1266            put_user(args32.handle, &args->handle))
1267                return -EFAULT;
1268
1269        ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
1270
1271        if (ret < 0)
1272                return ret;
1273
1274        if (get_user(args32.handle, &args->handle))
1275                return -EFAULT;
1276
1277        if (copy_to_user(&ptrargs32->handle,
1278                         &args32.handle,
1279                         sizeof(args32.handle)))
1280                return -EFAULT;
1281
1282        return 0;
1283}
1284
1285struct vchiq_element32 {
1286        compat_uptr_t data;
1287        unsigned int size;
1288};
1289
1290struct vchiq_queue_message32 {
1291        unsigned int handle;
1292        unsigned int count;
1293        compat_uptr_t elements;
1294};
1295
1296#define VCHIQ_IOC_QUEUE_MESSAGE32 \
1297        _IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1298
1299static long
1300vchiq_compat_ioctl_queue_message(struct file *file,
1301                                 unsigned int cmd,
1302                                 unsigned long arg)
1303{
1304        VCHIQ_QUEUE_MESSAGE_T *args;
1305        struct vchiq_element *elements;
1306        struct vchiq_queue_message32 args32;
1307        unsigned int count;
1308
1309        if (copy_from_user(&args32,
1310                           (struct vchiq_queue_message32 __user *)arg,
1311                           sizeof(args32)))
1312                return -EFAULT;
1313
1314        args = compat_alloc_user_space(sizeof(*args) +
1315                                       (sizeof(*elements) * MAX_ELEMENTS));
1316
1317        if (!args)
1318                return -EFAULT;
1319
1320        if (put_user(args32.handle, &args->handle) ||
1321            put_user(args32.count, &args->count) ||
1322            put_user(compat_ptr(args32.elements), &args->elements))
1323                return -EFAULT;
1324
1325        if (args32.count > MAX_ELEMENTS)
1326                return -EINVAL;
1327
1328        if (args32.elements && args32.count) {
1329                struct vchiq_element32 tempelement32[MAX_ELEMENTS];
1330
1331                elements = (struct vchiq_element __user *)(args + 1);
1332
1333                if (copy_from_user(&tempelement32,
1334                                   compat_ptr(args32.elements),
1335                                   sizeof(tempelement32)))
1336                        return -EFAULT;
1337
1338                for (count = 0; count < args32.count; count++) {
1339                        if (put_user(compat_ptr(tempelement32[count].data),
1340                                     &elements[count].data) ||
1341                            put_user(tempelement32[count].size,
1342                                     &elements[count].size))
1343                                return -EFAULT;
1344                }
1345
1346                if (put_user(elements, &args->elements))
1347                        return -EFAULT;
1348        }
1349
1350        return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
1351}
1352
1353struct vchiq_queue_bulk_transfer32 {
1354        unsigned int handle;
1355        compat_uptr_t data;
1356        unsigned int size;
1357        compat_uptr_t userdata;
1358        VCHIQ_BULK_MODE_T mode;
1359};
1360
1361#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1362        _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1363#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1364        _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1365
1366static long
1367vchiq_compat_ioctl_queue_bulk(struct file *file,
1368                              unsigned int cmd,
1369                              unsigned long arg)
1370{
1371        VCHIQ_QUEUE_BULK_TRANSFER_T *args;
1372        struct vchiq_queue_bulk_transfer32 args32;
1373        struct vchiq_queue_bulk_transfer32 *ptrargs32 =
1374                (struct vchiq_queue_bulk_transfer32 *)arg;
1375        long ret;
1376
1377        args = compat_alloc_user_space(sizeof(*args));
1378        if (!args)
1379                return -EFAULT;
1380
1381        if (copy_from_user(&args32,
1382                           (struct vchiq_queue_bulk_transfer32 __user *)arg,
1383                           sizeof(args32)))
1384                return -EFAULT;
1385
1386        if (put_user(args32.handle, &args->handle) ||
1387            put_user(compat_ptr(args32.data), &args->data) ||
1388            put_user(args32.size, &args->size) ||
1389            put_user(compat_ptr(args32.userdata), &args->userdata) ||
1390            put_user(args32.mode, &args->mode))
1391                return -EFAULT;
1392
1393        if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
1394                cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
1395        else
1396                cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
1397
1398        ret = vchiq_ioctl(file, cmd, (unsigned long)args);
1399
1400        if (ret < 0)
1401                return ret;
1402
1403        if (get_user(args32.mode, &args->mode))
1404                return -EFAULT;
1405
1406        if (copy_to_user(&ptrargs32->mode,
1407                         &args32.mode,
1408                         sizeof(args32.mode)))
1409                return -EFAULT;
1410
1411        return 0;
1412}
1413
1414struct vchiq_completion_data32 {
1415        VCHIQ_REASON_T reason;
1416        compat_uptr_t header;
1417        compat_uptr_t service_userdata;
1418        compat_uptr_t bulk_userdata;
1419};
1420
1421struct vchiq_await_completion32 {
1422        unsigned int count;
1423        compat_uptr_t buf;
1424        unsigned int msgbufsize;
1425        unsigned int msgbufcount; /* IN/OUT */
1426        compat_uptr_t msgbufs;
1427};
1428
1429#define VCHIQ_IOC_AWAIT_COMPLETION32 \
1430        _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1431
1432static long
1433vchiq_compat_ioctl_await_completion(struct file *file,
1434                                    unsigned int cmd,
1435                                    unsigned long arg)
1436{
1437        VCHIQ_AWAIT_COMPLETION_T *args;
1438        VCHIQ_COMPLETION_DATA_T *completion;
1439        VCHIQ_COMPLETION_DATA_T completiontemp;
1440        struct vchiq_await_completion32 args32;
1441        struct vchiq_completion_data32 completion32;
1442        unsigned int *msgbufcount32;
1443        compat_uptr_t msgbuf32;
1444        void *msgbuf;
1445        void **msgbufptr;
1446        long ret;
1447
1448        args = compat_alloc_user_space(sizeof(*args) +
1449                                       sizeof(*completion) +
1450                                       sizeof(*msgbufptr));
1451        if (!args)
1452                return -EFAULT;
1453
1454        completion = (VCHIQ_COMPLETION_DATA_T *)(args + 1);
1455        msgbufptr = (void __user **)(completion + 1);
1456
1457        if (copy_from_user(&args32,
1458                           (struct vchiq_completion_data32 *)arg,
1459                           sizeof(args32)))
1460                return -EFAULT;
1461
1462        if (put_user(args32.count, &args->count) ||
1463            put_user(compat_ptr(args32.buf), &args->buf) ||
1464            put_user(args32.msgbufsize, &args->msgbufsize) ||
1465            put_user(args32.msgbufcount, &args->msgbufcount) ||
1466            put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
1467                return -EFAULT;
1468
1469        /* These are simple cases, so just fall into the native handler */
1470        if (!args32.count || !args32.buf || !args32.msgbufcount)
1471                return vchiq_ioctl(file,
1472                                   VCHIQ_IOC_AWAIT_COMPLETION,
1473                                   (unsigned long)args);
1474
1475        /*
1476         * These are the more complex cases.  Typical applications of this
1477         * ioctl will use a very large count, with a very large msgbufcount.
1478         * Since the native ioctl can asynchronously fill in the returned
1479         * buffers and the application can in theory begin processing messages
1480         * even before the ioctl returns, a bit of a trick is used here.
1481         *
1482         * By forcing both count and msgbufcount to be 1, it forces the native
1483         * ioctl to only claim at most 1 message is available.   This tricks
1484         * the calling application into thinking only 1 message was actually
1485         * available in the queue so like all good applications it will retry
1486         * waiting until all the required messages are received.
1487         *
1488         * This trick has been tested and proven to work with vchiq_test,
1489         * Minecraft_PI, the "hello pi" examples, and various other
1490         * applications that are included in Raspbian.
1491         */
1492
1493        if (copy_from_user(&msgbuf32,
1494                           compat_ptr(args32.msgbufs) +
1495                           (sizeof(compat_uptr_t) *
1496                           (args32.msgbufcount - 1)),
1497                           sizeof(msgbuf32)))
1498                return -EFAULT;
1499
1500        msgbuf = compat_ptr(msgbuf32);
1501
1502        if (copy_to_user(msgbufptr,
1503                         &msgbuf,
1504                         sizeof(msgbuf)))
1505                return -EFAULT;
1506
1507        if (copy_to_user(&args->msgbufs,
1508                         &msgbufptr,
1509                         sizeof(msgbufptr)))
1510                return -EFAULT;
1511
1512        if (put_user(1U, &args->count) ||
1513            put_user(completion, &args->buf) ||
1514            put_user(1U, &args->msgbufcount))
1515                return -EFAULT;
1516
1517        ret = vchiq_ioctl(file,
1518                          VCHIQ_IOC_AWAIT_COMPLETION,
1519                          (unsigned long)args);
1520
1521        /*
1522         * An return value of 0 here means that no messages where available
1523         * in the message queue.  In this case the native ioctl does not
1524         * return any data to the application at all.  Not even to update
1525         * msgbufcount.  This functionality needs to be kept here for
1526         * compatibility.
1527         *
1528         * Of course, < 0 means that an error occurred and no data is being
1529         * returned.
1530         *
1531         * Since count and msgbufcount was forced to 1, that means
1532         * the only other possible return value is 1. Meaning that 1 message
1533         * was available, so that multiple message case does not need to be
1534         * handled here.
1535         */
1536        if (ret <= 0)
1537                return ret;
1538
1539        if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
1540                return -EFAULT;
1541
1542        completion32.reason = completiontemp.reason;
1543        completion32.header = ptr_to_compat(completiontemp.header);
1544        completion32.service_userdata =
1545                ptr_to_compat(completiontemp.service_userdata);
1546        completion32.bulk_userdata =
1547                ptr_to_compat(completiontemp.bulk_userdata);
1548
1549        if (copy_to_user(compat_ptr(args32.buf),
1550                         &completion32,
1551                         sizeof(completion32)))
1552                return -EFAULT;
1553
1554        args32.msgbufcount--;
1555
1556        msgbufcount32 =
1557                &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
1558
1559        if (copy_to_user(msgbufcount32,
1560                         &args32.msgbufcount,
1561                         sizeof(args32.msgbufcount)))
1562                return -EFAULT;
1563
1564        return 1;
1565}
1566
1567struct vchiq_dequeue_message32 {
1568        unsigned int handle;
1569        int blocking;
1570        unsigned int bufsize;
1571        compat_uptr_t buf;
1572};
1573
1574#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1575        _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1576
1577static long
1578vchiq_compat_ioctl_dequeue_message(struct file *file,
1579                                   unsigned int cmd,
1580                                   unsigned long arg)
1581{
1582        VCHIQ_DEQUEUE_MESSAGE_T *args;
1583        struct vchiq_dequeue_message32 args32;
1584
1585        args = compat_alloc_user_space(sizeof(*args));
1586        if (!args)
1587                return -EFAULT;
1588
1589        if (copy_from_user(&args32,
1590                           (struct vchiq_dequeue_message32 *)arg,
1591                           sizeof(args32)))
1592                return -EFAULT;
1593
1594        if (put_user(args32.handle, &args->handle) ||
1595            put_user(args32.blocking, &args->blocking) ||
1596            put_user(args32.bufsize, &args->bufsize) ||
1597            put_user(compat_ptr(args32.buf), &args->buf))
1598                return -EFAULT;
1599
1600        return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
1601                           (unsigned long)args);
1602}
1603
1604struct vchiq_get_config32 {
1605        unsigned int config_size;
1606        compat_uptr_t pconfig;
1607};
1608
1609#define VCHIQ_IOC_GET_CONFIG32 \
1610        _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1611
1612static long
1613vchiq_compat_ioctl_get_config(struct file *file,
1614                              unsigned int cmd,
1615                              unsigned long arg)
1616{
1617        VCHIQ_GET_CONFIG_T *args;
1618        struct vchiq_get_config32 args32;
1619
1620        args = compat_alloc_user_space(sizeof(*args));
1621        if (!args)
1622                return -EFAULT;
1623
1624        if (copy_from_user(&args32,
1625                           (struct vchiq_get_config32 *)arg,
1626                           sizeof(args32)))
1627                return -EFAULT;
1628
1629        if (put_user(args32.config_size, &args->config_size) ||
1630            put_user(compat_ptr(args32.pconfig), &args->pconfig))
1631                return -EFAULT;
1632
1633        return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
1634}
1635
1636static long
1637vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1638{
1639        switch (cmd) {
1640        case VCHIQ_IOC_CREATE_SERVICE32:
1641                return vchiq_compat_ioctl_create_service(file, cmd, arg);
1642        case VCHIQ_IOC_QUEUE_MESSAGE32:
1643                return vchiq_compat_ioctl_queue_message(file, cmd, arg);
1644        case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1645        case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1646                return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
1647        case VCHIQ_IOC_AWAIT_COMPLETION32:
1648                return vchiq_compat_ioctl_await_completion(file, cmd, arg);
1649        case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1650                return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
1651        case VCHIQ_IOC_GET_CONFIG32:
1652                return vchiq_compat_ioctl_get_config(file, cmd, arg);
1653        default:
1654                return vchiq_ioctl(file, cmd, arg);
1655        }
1656}
1657
1658#endif
1659
1660/****************************************************************************
1661*
1662*   vchiq_open
1663*
1664***************************************************************************/
1665
1666static int
1667vchiq_open(struct inode *inode, struct file *file)
1668{
1669        int dev = iminor(inode) & 0x0f;
1670
1671        vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1672        switch (dev) {
1673        case VCHIQ_MINOR: {
1674                int ret;
1675                VCHIQ_STATE_T *state = vchiq_get_state();
1676                VCHIQ_INSTANCE_T instance;
1677
1678                if (!state) {
1679                        vchiq_log_error(vchiq_arm_log_level,
1680                                "vchiq has no connection to VideoCore");
1681                        return -ENOTCONN;
1682                }
1683
1684                instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1685                if (!instance)
1686                        return -ENOMEM;
1687
1688                instance->state = state;
1689                instance->pid = current->tgid;
1690
1691                ret = vchiq_debugfs_add_instance(instance);
1692                if (ret != 0) {
1693                        kfree(instance);
1694                        return ret;
1695                }
1696
1697                sema_init(&instance->insert_event, 0);
1698                sema_init(&instance->remove_event, 0);
1699                mutex_init(&instance->completion_mutex);
1700                mutex_init(&instance->bulk_waiter_list_mutex);
1701                INIT_LIST_HEAD(&instance->bulk_waiter_list);
1702
1703                file->private_data = instance;
1704        } break;
1705
1706        default:
1707                vchiq_log_error(vchiq_arm_log_level,
1708                        "Unknown minor device: %d", dev);
1709                return -ENXIO;
1710        }
1711
1712        return 0;
1713}
1714
1715/****************************************************************************
1716*
1717*   vchiq_release
1718*
1719***************************************************************************/
1720
1721static int
1722vchiq_release(struct inode *inode, struct file *file)
1723{
1724        int dev = iminor(inode) & 0x0f;
1725        int ret = 0;
1726
1727        switch (dev) {
1728        case VCHIQ_MINOR: {
1729                VCHIQ_INSTANCE_T instance = file->private_data;
1730                VCHIQ_STATE_T *state = vchiq_get_state();
1731                VCHIQ_SERVICE_T *service;
1732                int i;
1733
1734                vchiq_log_info(vchiq_arm_log_level,
1735                        "vchiq_release: instance=%lx",
1736                        (unsigned long)instance);
1737
1738                if (!state) {
1739                        ret = -EPERM;
1740                        goto out;
1741                }
1742
1743                /* Ensure videocore is awake to allow termination. */
1744                vchiq_use_internal(instance->state, NULL,
1745                                USE_TYPE_VCHIQ);
1746
1747                mutex_lock(&instance->completion_mutex);
1748
1749                /* Wake the completion thread and ask it to exit */
1750                instance->closing = 1;
1751                up(&instance->insert_event);
1752
1753                mutex_unlock(&instance->completion_mutex);
1754
1755                /* Wake the slot handler if the completion queue is full. */
1756                up(&instance->remove_event);
1757
1758                /* Mark all services for termination... */
1759                i = 0;
1760                while ((service = next_service_by_instance(state, instance,
1761                        &i)) != NULL) {
1762                        USER_SERVICE_T *user_service = service->base.userdata;
1763
1764                        /* Wake the slot handler if the msg queue is full. */
1765                        up(&user_service->remove_event);
1766
1767                        vchiq_terminate_service_internal(service);
1768                        unlock_service(service);
1769                }
1770
1771                /* ...and wait for them to die */
1772                i = 0;
1773                while ((service = next_service_by_instance(state, instance, &i))
1774                        != NULL) {
1775                        USER_SERVICE_T *user_service = service->base.userdata;
1776
1777                        down(&service->remove_event);
1778
1779                        BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1780
1781                        spin_lock(&msg_queue_spinlock);
1782
1783                        while (user_service->msg_remove !=
1784                                user_service->msg_insert) {
1785                                VCHIQ_HEADER_T *header = user_service->
1786                                        msg_queue[user_service->msg_remove &
1787                                                (MSG_QUEUE_SIZE - 1)];
1788                                user_service->msg_remove++;
1789                                spin_unlock(&msg_queue_spinlock);
1790
1791                                if (header)
1792                                        vchiq_release_message(
1793                                                service->handle,
1794                                                header);
1795                                spin_lock(&msg_queue_spinlock);
1796                        }
1797
1798                        spin_unlock(&msg_queue_spinlock);
1799
1800                        unlock_service(service);
1801                }
1802
1803                /* Release any closed services */
1804                while (instance->completion_remove !=
1805                        instance->completion_insert) {
1806                        VCHIQ_COMPLETION_DATA_T *completion;
1807                        VCHIQ_SERVICE_T *service;
1808
1809                        completion = &instance->completions[
1810                                instance->completion_remove &
1811                                (MAX_COMPLETIONS - 1)];
1812                        service = completion->service_userdata;
1813                        if (completion->reason == VCHIQ_SERVICE_CLOSED)
1814                        {
1815                                USER_SERVICE_T *user_service =
1816                                        service->base.userdata;
1817
1818                                /* Wake any blocked user-thread */
1819                                if (instance->use_close_delivered)
1820                                        up(&user_service->close_event);
1821                                unlock_service(service);
1822                        }
1823                        instance->completion_remove++;
1824                }
1825
1826                /* Release the PEER service count. */
1827                vchiq_release_internal(instance->state, NULL);
1828
1829                {
1830                        struct list_head *pos, *next;
1831
1832                        list_for_each_safe(pos, next,
1833                                &instance->bulk_waiter_list) {
1834                                struct bulk_waiter_node *waiter;
1835
1836                                waiter = list_entry(pos,
1837                                        struct bulk_waiter_node,
1838                                        list);
1839                                list_del(pos);
1840                                vchiq_log_info(vchiq_arm_log_level,
1841                                        "bulk_waiter - cleaned up %pK for pid %d",
1842                                        waiter, waiter->pid);
1843                                kfree(waiter);
1844                        }
1845                }
1846
1847                vchiq_debugfs_remove_instance(instance);
1848
1849                kfree(instance);
1850                file->private_data = NULL;
1851        } break;
1852
1853        default:
1854                vchiq_log_error(vchiq_arm_log_level,
1855                        "Unknown minor device: %d", dev);
1856                ret = -ENXIO;
1857        }
1858
1859out:
1860        return ret;
1861}
1862
1863/****************************************************************************
1864*
1865*   vchiq_dump
1866*
1867***************************************************************************/
1868
1869void
1870vchiq_dump(void *dump_context, const char *str, int len)
1871{
1872        DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1873
1874        if (context->actual < context->space) {
1875                int copy_bytes;
1876
1877                if (context->offset > 0) {
1878                        int skip_bytes = min(len, (int)context->offset);
1879
1880                        str += skip_bytes;
1881                        len -= skip_bytes;
1882                        context->offset -= skip_bytes;
1883                        if (context->offset > 0)
1884                                return;
1885                }
1886                copy_bytes = min(len, (int)(context->space - context->actual));
1887                if (copy_bytes == 0)
1888                        return;
1889                if (copy_to_user(context->buf + context->actual, str,
1890                        copy_bytes))
1891                        context->actual = -EFAULT;
1892                context->actual += copy_bytes;
1893                len -= copy_bytes;
1894
1895                /* If tne terminating NUL is included in the length, then it
1896                ** marks the end of a line and should be replaced with a
1897                ** carriage return. */
1898                if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1899                        char cr = '\n';
1900
1901                        if (copy_to_user(context->buf + context->actual - 1,
1902                                &cr, 1))
1903                                context->actual = -EFAULT;
1904                }
1905        }
1906}
1907
1908/****************************************************************************
1909*
1910*   vchiq_dump_platform_instance_state
1911*
1912***************************************************************************/
1913
1914void
1915vchiq_dump_platform_instances(void *dump_context)
1916{
1917        VCHIQ_STATE_T *state = vchiq_get_state();
1918        char buf[80];
1919        int len;
1920        int i;
1921
1922        /* There is no list of instances, so instead scan all services,
1923                marking those that have been dumped. */
1924
1925        for (i = 0; i < state->unused_service; i++) {
1926                VCHIQ_SERVICE_T *service = state->services[i];
1927                VCHIQ_INSTANCE_T instance;
1928
1929                if (service && (service->base.callback == service_callback)) {
1930                        instance = service->instance;
1931                        if (instance)
1932                                instance->mark = 0;
1933                }
1934        }
1935
1936        for (i = 0; i < state->unused_service; i++) {
1937                VCHIQ_SERVICE_T *service = state->services[i];
1938                VCHIQ_INSTANCE_T instance;
1939
1940                if (service && (service->base.callback == service_callback)) {
1941                        instance = service->instance;
1942                        if (instance && !instance->mark) {
1943                                len = snprintf(buf, sizeof(buf),
1944                                        "Instance %pK: pid %d,%s completions %d/%d",
1945                                        instance, instance->pid,
1946                                        instance->connected ? " connected, " :
1947                                                "",
1948                                        instance->completion_insert -
1949                                                instance->completion_remove,
1950                                        MAX_COMPLETIONS);
1951
1952                                vchiq_dump(dump_context, buf, len + 1);
1953
1954                                instance->mark = 1;
1955                        }
1956                }
1957        }
1958}
1959
1960/****************************************************************************
1961*
1962*   vchiq_dump_platform_service_state
1963*
1964***************************************************************************/
1965
1966void
1967vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1968{
1969        USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1970        char buf[80];
1971        int len;
1972
1973        len = snprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1974
1975        if ((service->base.callback == service_callback) &&
1976                user_service->is_vchi) {
1977                len += snprintf(buf + len, sizeof(buf) - len,
1978                        ", %d/%d messages",
1979                        user_service->msg_insert - user_service->msg_remove,
1980                        MSG_QUEUE_SIZE);
1981
1982                if (user_service->dequeue_pending)
1983                        len += snprintf(buf + len, sizeof(buf) - len,
1984                                " (dequeue pending)");
1985        }
1986
1987        vchiq_dump(dump_context, buf, len + 1);
1988}
1989
1990/****************************************************************************
1991*
1992*   vchiq_read
1993*
1994***************************************************************************/
1995
1996static ssize_t
1997vchiq_read(struct file *file, char __user *buf,
1998        size_t count, loff_t *ppos)
1999{
2000        DUMP_CONTEXT_T context;
2001
2002        context.buf = buf;
2003        context.actual = 0;
2004        context.space = count;
2005        context.offset = *ppos;
2006
2007        vchiq_dump_state(&context, &g_state);
2008
2009        *ppos += context.actual;
2010
2011        return context.actual;
2012}
2013
2014VCHIQ_STATE_T *
2015vchiq_get_state(void)
2016{
2017
2018        if (g_state.remote == NULL)
2019                printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2020        else if (g_state.remote->initialised != 1)
2021                printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2022                        __func__, g_state.remote->initialised);
2023
2024        return ((g_state.remote != NULL) &&
2025                (g_state.remote->initialised == 1)) ? &g_state : NULL;
2026}
2027
2028static const struct file_operations
2029vchiq_fops = {
2030        .owner = THIS_MODULE,
2031        .unlocked_ioctl = vchiq_ioctl,
2032#if defined(CONFIG_COMPAT)
2033        .compat_ioctl = vchiq_compat_ioctl,
2034#endif
2035        .open = vchiq_open,
2036        .release = vchiq_release,
2037        .read = vchiq_read
2038};
2039
2040/*
2041 * Autosuspend related functionality
2042 */
2043
2044int
2045vchiq_videocore_wanted(VCHIQ_STATE_T *state)
2046{
2047        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2048
2049        if (!arm_state)
2050                /* autosuspend not supported - always return wanted */
2051                return 1;
2052        else if (arm_state->blocked_count)
2053                return 1;
2054        else if (!arm_state->videocore_use_count)
2055                /* usage count zero - check for override unless we're forcing */
2056                if (arm_state->resume_blocked)
2057                        return 0;
2058                else
2059                        return vchiq_platform_videocore_wanted(state);
2060        else
2061                /* non-zero usage count - videocore still required */
2062                return 1;
2063}
2064
2065static VCHIQ_STATUS_T
2066vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
2067        VCHIQ_HEADER_T *header,
2068        VCHIQ_SERVICE_HANDLE_T service_user,
2069        void *bulk_user)
2070{
2071        vchiq_log_error(vchiq_susp_log_level,
2072                "%s callback reason %d", __func__, reason);
2073        return 0;
2074}
2075
2076static int
2077vchiq_keepalive_thread_func(void *v)
2078{
2079        VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
2080        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2081
2082        VCHIQ_STATUS_T status;
2083        VCHIQ_INSTANCE_T instance;
2084        VCHIQ_SERVICE_HANDLE_T ka_handle;
2085
2086        VCHIQ_SERVICE_PARAMS_T params = {
2087                .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2088                .callback    = vchiq_keepalive_vchiq_callback,
2089                .version     = KEEPALIVE_VER,
2090                .version_min = KEEPALIVE_VER_MIN
2091        };
2092
2093        status = vchiq_initialise(&instance);
2094        if (status != VCHIQ_SUCCESS) {
2095                vchiq_log_error(vchiq_susp_log_level,
2096                        "%s vchiq_initialise failed %d", __func__, status);
2097                goto exit;
2098        }
2099
2100        status = vchiq_connect(instance);
2101        if (status != VCHIQ_SUCCESS) {
2102                vchiq_log_error(vchiq_susp_log_level,
2103                        "%s vchiq_connect failed %d", __func__, status);
2104                goto shutdown;
2105        }
2106
2107        status = vchiq_add_service(instance, &params, &ka_handle);
2108        if (status != VCHIQ_SUCCESS) {
2109                vchiq_log_error(vchiq_susp_log_level,
2110                        "%s vchiq_open_service failed %d", __func__, status);
2111                goto shutdown;
2112        }
2113
2114        while (1) {
2115                long rc = 0, uc = 0;
2116
2117                if (wait_for_completion_interruptible(&arm_state->ka_evt)
2118                                != 0) {
2119                        vchiq_log_error(vchiq_susp_log_level,
2120                                "%s interrupted", __func__);
2121                        flush_signals(current);
2122                        continue;
2123                }
2124
2125                /* read and clear counters.  Do release_count then use_count to
2126                 * prevent getting more releases than uses */
2127                rc = atomic_xchg(&arm_state->ka_release_count, 0);
2128                uc = atomic_xchg(&arm_state->ka_use_count, 0);
2129
2130                /* Call use/release service the requisite number of times.
2131                 * Process use before release so use counts don't go negative */
2132                while (uc--) {
2133                        atomic_inc(&arm_state->ka_use_ack_count);
2134                        status = vchiq_use_service(ka_handle);
2135                        if (status != VCHIQ_SUCCESS) {
2136                                vchiq_log_error(vchiq_susp_log_level,
2137                                        "%s vchiq_use_service error %d",
2138                                        __func__, status);
2139                        }
2140                }
2141                while (rc--) {
2142                        status = vchiq_release_service(ka_handle);
2143                        if (status != VCHIQ_SUCCESS) {
2144                                vchiq_log_error(vchiq_susp_log_level,
2145                                        "%s vchiq_release_service error %d",
2146                                        __func__, status);
2147                        }
2148                }
2149        }
2150
2151shutdown:
2152        vchiq_shutdown(instance);
2153exit:
2154        return 0;
2155}
2156
2157VCHIQ_STATUS_T
2158vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
2159{
2160        if (arm_state) {
2161                rwlock_init(&arm_state->susp_res_lock);
2162
2163                init_completion(&arm_state->ka_evt);
2164                atomic_set(&arm_state->ka_use_count, 0);
2165                atomic_set(&arm_state->ka_use_ack_count, 0);
2166                atomic_set(&arm_state->ka_release_count, 0);
2167
2168                init_completion(&arm_state->vc_suspend_complete);
2169
2170                init_completion(&arm_state->vc_resume_complete);
2171                /* Initialise to 'done' state.  We only want to block on resume
2172                 * completion while videocore is suspended. */
2173                set_resume_state(arm_state, VC_RESUME_RESUMED);
2174
2175                init_completion(&arm_state->resume_blocker);
2176                /* Initialise to 'done' state.  We only want to block on this
2177                 * completion while resume is blocked */
2178                complete_all(&arm_state->resume_blocker);
2179
2180                init_completion(&arm_state->blocked_blocker);
2181                /* Initialise to 'done' state.  We only want to block on this
2182                 * completion while things are waiting on the resume blocker */
2183                complete_all(&arm_state->blocked_blocker);
2184
2185                arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
2186                arm_state->suspend_timer_running = 0;
2187                arm_state->state = state;
2188                timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
2189                            0);
2190
2191                arm_state->first_connect = 0;
2192
2193        }
2194        return VCHIQ_SUCCESS;
2195}
2196
2197/*
2198** Functions to modify the state variables;
2199**      set_suspend_state
2200**      set_resume_state
2201**
2202** There are more state variables than we might like, so ensure they remain in
2203** step.  Suspend and resume state are maintained separately, since most of
2204** these state machines can operate independently.  However, there are a few
2205** states where state transitions in one state machine cause a reset to the
2206** other state machine.  In addition, there are some completion events which
2207** need to occur on state machine reset and end-state(s), so these are also
2208** dealt with in these functions.
2209**
2210** In all states we set the state variable according to the input, but in some
2211** cases we perform additional steps outlined below;
2212**
2213** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2214**                      The suspend completion is completed after any suspend
2215**                      attempt.  When we reset the state machine we also reset
2216**                      the completion.  This reset occurs when videocore is
2217**                      resumed, and also if we initiate suspend after a suspend
2218**                      failure.
2219**
2220** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2221**                      suspend - ie from this point on we must try to suspend
2222**                      before resuming can occur.  We therefore also reset the
2223**                      resume state machine to VC_RESUME_IDLE in this state.
2224**
2225** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2226**                      complete_all on the suspend completion to notify
2227**                      anything waiting for suspend to happen.
2228**
2229** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2230**                      initiate resume, so no need to alter resume state.
2231**                      We call complete_all on the suspend completion to notify
2232**                      of suspend rejection.
2233**
2234** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
2235**                      suspend completion and reset the resume state machine.
2236**
2237** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
2238**                      resume completion is in it's 'done' state whenever
2239**                      videcore is running.  Therefore, the VC_RESUME_IDLE
2240**                      state implies that videocore is suspended.
2241**                      Hence, any thread which needs to wait until videocore is
2242**                      running can wait on this completion - it will only block
2243**                      if videocore is suspended.
2244**
2245** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
2246**                      Call complete_all on the resume completion to unblock
2247**                      any threads waiting for resume.  Also reset the suspend
2248**                      state machine to it's idle state.
2249**
2250** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2251*/
2252
2253void
2254set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
2255        enum vc_suspend_status new_state)
2256{
2257        /* set the state in all cases */
2258        arm_state->vc_suspend_state = new_state;
2259
2260        /* state specific additional actions */
2261        switch (new_state) {
2262        case VC_SUSPEND_FORCE_CANCELED:
2263                complete_all(&arm_state->vc_suspend_complete);
2264                break;
2265        case VC_SUSPEND_REJECTED:
2266                complete_all(&arm_state->vc_suspend_complete);
2267                break;
2268        case VC_SUSPEND_FAILED:
2269                complete_all(&arm_state->vc_suspend_complete);
2270                arm_state->vc_resume_state = VC_RESUME_RESUMED;
2271                complete_all(&arm_state->vc_resume_complete);
2272                break;
2273        case VC_SUSPEND_IDLE:
2274                reinit_completion(&arm_state->vc_suspend_complete);
2275                break;
2276        case VC_SUSPEND_REQUESTED:
2277                break;
2278        case VC_SUSPEND_IN_PROGRESS:
2279                set_resume_state(arm_state, VC_RESUME_IDLE);
2280                break;
2281        case VC_SUSPEND_SUSPENDED:
2282                complete_all(&arm_state->vc_suspend_complete);
2283                break;
2284        default:
2285                BUG();
2286                break;
2287        }
2288}
2289
2290void
2291set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
2292        enum vc_resume_status new_state)
2293{
2294        /* set the state in all cases */
2295        arm_state->vc_resume_state = new_state;
2296
2297        /* state specific additional actions */
2298        switch (new_state) {
2299        case VC_RESUME_FAILED:
2300                break;
2301        case VC_RESUME_IDLE:
2302                reinit_completion(&arm_state->vc_resume_complete);
2303                break;
2304        case VC_RESUME_REQUESTED:
2305                break;
2306        case VC_RESUME_IN_PROGRESS:
2307                break;
2308        case VC_RESUME_RESUMED:
2309                complete_all(&arm_state->vc_resume_complete);
2310                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2311                break;
2312        default:
2313                BUG();
2314                break;
2315        }
2316}
2317
2318/* should be called with the write lock held */
2319inline void
2320start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
2321{
2322        del_timer(&arm_state->suspend_timer);
2323        arm_state->suspend_timer.expires = jiffies +
2324                msecs_to_jiffies(arm_state->
2325                        suspend_timer_timeout);
2326        add_timer(&arm_state->suspend_timer);
2327        arm_state->suspend_timer_running = 1;
2328}
2329
2330/* should be called with the write lock held */
2331static inline void
2332stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
2333{
2334        if (arm_state->suspend_timer_running) {
2335                del_timer(&arm_state->suspend_timer);
2336                arm_state->suspend_timer_running = 0;
2337        }
2338}
2339
2340static inline int
2341need_resume(VCHIQ_STATE_T *state)
2342{
2343        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2344
2345        return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
2346                        (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
2347                        vchiq_videocore_wanted(state);
2348}
2349
2350static int
2351block_resume(VCHIQ_ARM_STATE_T *arm_state)
2352{
2353        int status = VCHIQ_SUCCESS;
2354        const unsigned long timeout_val =
2355                                msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
2356        int resume_count = 0;
2357
2358        /* Allow any threads which were blocked by the last force suspend to
2359         * complete if they haven't already.  Only give this one shot; if
2360         * blocked_count is incremented after blocked_blocker is completed
2361         * (which only happens when blocked_count hits 0) then those threads
2362         * will have to wait until next time around */
2363        if (arm_state->blocked_count) {
2364                reinit_completion(&arm_state->blocked_blocker);
2365                write_unlock_bh(&arm_state->susp_res_lock);
2366                vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
2367                        "blocked clients", __func__);
2368                if (wait_for_completion_interruptible_timeout(
2369                                &arm_state->blocked_blocker, timeout_val)
2370                                        <= 0) {
2371                        vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2372                                "previously blocked clients failed", __func__);
2373                        status = VCHIQ_ERROR;
2374                        write_lock_bh(&arm_state->susp_res_lock);
2375                        goto out;
2376                }
2377                vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2378                        "clients resumed", __func__);
2379                write_lock_bh(&arm_state->susp_res_lock);
2380        }
2381
2382        /* We need to wait for resume to complete if it's in process */
2383        while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2384                        arm_state->vc_resume_state > VC_RESUME_IDLE) {
2385                if (resume_count > 1) {
2386                        status = VCHIQ_ERROR;
2387                        vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2388                                "many times for resume", __func__);
2389                        goto out;
2390                }
2391                write_unlock_bh(&arm_state->susp_res_lock);
2392                vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2393                        __func__);
2394                if (wait_for_completion_interruptible_timeout(
2395                                &arm_state->vc_resume_complete, timeout_val)
2396                                        <= 0) {
2397                        vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2398                                "resume failed (%s)", __func__,
2399                                resume_state_names[arm_state->vc_resume_state +
2400                                                        VC_RESUME_NUM_OFFSET]);
2401                        status = VCHIQ_ERROR;
2402                        write_lock_bh(&arm_state->susp_res_lock);
2403                        goto out;
2404                }
2405                vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2406                write_lock_bh(&arm_state->susp_res_lock);
2407                resume_count++;
2408        }
2409        reinit_completion(&arm_state->resume_blocker);
2410        arm_state->resume_blocked = 1;
2411
2412out:
2413        return status;
2414}
2415
2416static inline void
2417unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
2418{
2419        complete_all(&arm_state->resume_blocker);
2420        arm_state->resume_blocked = 0;
2421}
2422
2423/* Initiate suspend via slot handler. Should be called with the write lock
2424 * held */
2425VCHIQ_STATUS_T
2426vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2427{
2428        VCHIQ_STATUS_T status = VCHIQ_ERROR;
2429        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2430
2431        if (!arm_state)
2432                goto out;
2433
2434        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2435        status = VCHIQ_SUCCESS;
2436
2437        switch (arm_state->vc_suspend_state) {
2438        case VC_SUSPEND_REQUESTED:
2439                vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2440                        "requested", __func__);
2441                break;
2442        case VC_SUSPEND_IN_PROGRESS:
2443                vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2444                        "progress", __func__);
2445                break;
2446
2447        default:
2448                /* We don't expect to be in other states, so log but continue
2449                 * anyway */
2450                vchiq_log_error(vchiq_susp_log_level,
2451                        "%s unexpected suspend state %s", __func__,
2452                        suspend_state_names[arm_state->vc_suspend_state +
2453                                                VC_SUSPEND_NUM_OFFSET]);
2454                /* fall through */
2455        case VC_SUSPEND_REJECTED:
2456        case VC_SUSPEND_FAILED:
2457                /* Ensure any idle state actions have been run */
2458                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2459                /* fall through */
2460        case VC_SUSPEND_IDLE:
2461                vchiq_log_info(vchiq_susp_log_level,
2462                        "%s: suspending", __func__);
2463                set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2464                /* kick the slot handler thread to initiate suspend */
2465                request_poll(state, NULL, 0);
2466                break;
2467        }
2468
2469out:
2470        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2471        return status;
2472}
2473
2474void
2475vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2476{
2477        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2478        int susp = 0;
2479
2480        if (!arm_state)
2481                goto out;
2482
2483        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2484
2485        write_lock_bh(&arm_state->susp_res_lock);
2486        if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2487                        arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2488                set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2489                susp = 1;
2490        }
2491        write_unlock_bh(&arm_state->susp_res_lock);
2492
2493        if (susp)
2494                vchiq_platform_suspend(state);
2495
2496out:
2497        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2498        return;
2499}
2500
2501static void
2502output_timeout_error(VCHIQ_STATE_T *state)
2503{
2504        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2505        char err[50] = "";
2506        int vc_use_count = arm_state->videocore_use_count;
2507        int active_services = state->unused_service;
2508        int i;
2509
2510        if (!arm_state->videocore_use_count) {
2511                snprintf(err, sizeof(err), " Videocore usecount is 0");
2512                goto output_msg;
2513        }
2514        for (i = 0; i < active_services; i++) {
2515                VCHIQ_SERVICE_T *service_ptr = state->services[i];
2516
2517                if (service_ptr && service_ptr->service_use_count &&
2518                        (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2519                        snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2520                                "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2521                                        service_ptr->base.fourcc),
2522                                 service_ptr->client_id,
2523                                 service_ptr->service_use_count,
2524                                 service_ptr->service_use_count ==
2525                                         vc_use_count ? "" : " (+ more)");
2526                        break;
2527                }
2528        }
2529
2530output_msg:
2531        vchiq_log_error(vchiq_susp_log_level,
2532                "timed out waiting for vc suspend (%d).%s",
2533                 arm_state->autosuspend_override, err);
2534
2535}
2536
2537/* Try to get videocore into suspended state, regardless of autosuspend state.
2538** We don't actually force suspend, since videocore may get into a bad state
2539** if we force suspend at a bad time.  Instead, we wait for autosuspend to
2540** determine a good point to suspend.  If this doesn't happen within 100ms we
2541** report failure.
2542**
2543** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2544** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2545*/
2546VCHIQ_STATUS_T
2547vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2548{
2549        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2550        VCHIQ_STATUS_T status = VCHIQ_ERROR;
2551        long rc = 0;
2552        int repeat = -1;
2553
2554        if (!arm_state)
2555                goto out;
2556
2557        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2558
2559        write_lock_bh(&arm_state->susp_res_lock);
2560
2561        status = block_resume(arm_state);
2562        if (status != VCHIQ_SUCCESS)
2563                goto unlock;
2564        if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2565                /* Already suspended - just block resume and exit */
2566                vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2567                        __func__);
2568                status = VCHIQ_SUCCESS;
2569                goto unlock;
2570        } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2571                /* initiate suspend immediately in the case that we're waiting
2572                 * for the timeout */
2573                stop_suspend_timer(arm_state);
2574                if (!vchiq_videocore_wanted(state)) {
2575                        vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2576                                "idle, initiating suspend", __func__);
2577                        status = vchiq_arm_vcsuspend(state);
2578                } else if (arm_state->autosuspend_override <
2579                                                FORCE_SUSPEND_FAIL_MAX) {
2580                        vchiq_log_info(vchiq_susp_log_level, "%s letting "
2581                                "videocore go idle", __func__);
2582                        status = VCHIQ_SUCCESS;
2583                } else {
2584                        vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2585                                "many times - attempting suspend", __func__);
2586                        status = vchiq_arm_vcsuspend(state);
2587                }
2588        } else {
2589                vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2590                        "in progress - wait for completion", __func__);
2591                status = VCHIQ_SUCCESS;
2592        }
2593
2594        /* Wait for suspend to happen due to system idle (not forced..) */
2595        if (status != VCHIQ_SUCCESS)
2596                goto unblock_resume;
2597
2598        do {
2599                write_unlock_bh(&arm_state->susp_res_lock);
2600
2601                rc = wait_for_completion_interruptible_timeout(
2602                                &arm_state->vc_suspend_complete,
2603                                msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2604
2605                write_lock_bh(&arm_state->susp_res_lock);
2606                if (rc < 0) {
2607                        vchiq_log_warning(vchiq_susp_log_level, "%s "
2608                                "interrupted waiting for suspend", __func__);
2609                        status = VCHIQ_ERROR;
2610                        goto unblock_resume;
2611                } else if (rc == 0) {
2612                        if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2613                                /* Repeat timeout once if in progress */
2614                                if (repeat < 0) {
2615                                        repeat = 1;
2616                                        continue;
2617                                }
2618                        }
2619                        arm_state->autosuspend_override++;
2620                        output_timeout_error(state);
2621
2622                        status = VCHIQ_RETRY;
2623                        goto unblock_resume;
2624                }
2625        } while (0 < (repeat--));
2626
2627        /* Check and report state in case we need to abort ARM suspend */
2628        if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2629                status = VCHIQ_RETRY;
2630                vchiq_log_error(vchiq_susp_log_level,
2631                        "%s videocore suspend failed (state %s)", __func__,
2632                        suspend_state_names[arm_state->vc_suspend_state +
2633                                                VC_SUSPEND_NUM_OFFSET]);
2634                /* Reset the state only if it's still in an error state.
2635                 * Something could have already initiated another suspend. */
2636                if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2637                        set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2638
2639                goto unblock_resume;
2640        }
2641
2642        /* successfully suspended - unlock and exit */
2643        goto unlock;
2644
2645unblock_resume:
2646        /* all error states need to unblock resume before exit */
2647        unblock_resume(arm_state);
2648
2649unlock:
2650        write_unlock_bh(&arm_state->susp_res_lock);
2651
2652out:
2653        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2654        return status;
2655}
2656
2657void
2658vchiq_check_suspend(VCHIQ_STATE_T *state)
2659{
2660        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2661
2662        if (!arm_state)
2663                goto out;
2664
2665        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2666
2667        write_lock_bh(&arm_state->susp_res_lock);
2668        if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2669                        arm_state->first_connect &&
2670                        !vchiq_videocore_wanted(state)) {
2671                vchiq_arm_vcsuspend(state);
2672        }
2673        write_unlock_bh(&arm_state->susp_res_lock);
2674
2675out:
2676        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2677        return;
2678}
2679
2680int
2681vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2682{
2683        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2684        int resume = 0;
2685        int ret = -1;
2686
2687        if (!arm_state)
2688                goto out;
2689
2690        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2691
2692        write_lock_bh(&arm_state->susp_res_lock);
2693        unblock_resume(arm_state);
2694        resume = vchiq_check_resume(state);
2695        write_unlock_bh(&arm_state->susp_res_lock);
2696
2697        if (resume) {
2698                if (wait_for_completion_interruptible(
2699                        &arm_state->vc_resume_complete) < 0) {
2700                        vchiq_log_error(vchiq_susp_log_level,
2701                                "%s interrupted", __func__);
2702                        /* failed, cannot accurately derive suspend
2703                         * state, so exit early. */
2704                        goto out;
2705                }
2706        }
2707
2708        read_lock_bh(&arm_state->susp_res_lock);
2709        if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2710                vchiq_log_info(vchiq_susp_log_level,
2711                                "%s: Videocore remains suspended", __func__);
2712        } else {
2713                vchiq_log_info(vchiq_susp_log_level,
2714                                "%s: Videocore resumed", __func__);
2715                ret = 0;
2716        }
2717        read_unlock_bh(&arm_state->susp_res_lock);
2718out:
2719        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2720        return ret;
2721}
2722
2723/* This function should be called with the write lock held */
2724int
2725vchiq_check_resume(VCHIQ_STATE_T *state)
2726{
2727        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2728        int resume = 0;
2729
2730        if (!arm_state)
2731                goto out;
2732
2733        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2734
2735        if (need_resume(state)) {
2736                set_resume_state(arm_state, VC_RESUME_REQUESTED);
2737                request_poll(state, NULL, 0);
2738                resume = 1;
2739        }
2740
2741out:
2742        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2743        return resume;
2744}
2745
2746VCHIQ_STATUS_T
2747vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2748                enum USE_TYPE_E use_type)
2749{
2750        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2751        VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2752        char entity[16];
2753        int *entity_uc;
2754        int local_uc, local_entity_uc;
2755
2756        if (!arm_state)
2757                goto out;
2758
2759        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2760
2761        if (use_type == USE_TYPE_VCHIQ) {
2762                sprintf(entity, "VCHIQ:   ");
2763                entity_uc = &arm_state->peer_use_count;
2764        } else if (service) {
2765                sprintf(entity, "%c%c%c%c:%03d",
2766                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2767                        service->client_id);
2768                entity_uc = &service->service_use_count;
2769        } else {
2770                vchiq_log_error(vchiq_susp_log_level, "%s null service "
2771                                "ptr", __func__);
2772                ret = VCHIQ_ERROR;
2773                goto out;
2774        }
2775
2776        write_lock_bh(&arm_state->susp_res_lock);
2777        while (arm_state->resume_blocked) {
2778                /* If we call 'use' while force suspend is waiting for suspend,
2779                 * then we're about to block the thread which the force is
2780                 * waiting to complete, so we're bound to just time out. In this
2781                 * case, set the suspend state such that the wait will be
2782                 * canceled, so we can complete as quickly as possible. */
2783                if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2784                                VC_SUSPEND_IDLE) {
2785                        set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2786                        break;
2787                }
2788                /* If suspend is already in progress then we need to block */
2789                if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2790                        /* Indicate that there are threads waiting on the resume
2791                         * blocker.  These need to be allowed to complete before
2792                         * a _second_ call to force suspend can complete,
2793                         * otherwise low priority threads might never actually
2794                         * continue */
2795                        arm_state->blocked_count++;
2796                        write_unlock_bh(&arm_state->susp_res_lock);
2797                        vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2798                                "blocked - waiting...", __func__, entity);
2799                        if (wait_for_completion_killable(
2800                                        &arm_state->resume_blocker) != 0) {
2801                                vchiq_log_error(vchiq_susp_log_level, "%s %s "
2802                                        "wait for resume blocker interrupted",
2803                                        __func__, entity);
2804                                ret = VCHIQ_ERROR;
2805                                write_lock_bh(&arm_state->susp_res_lock);
2806                                arm_state->blocked_count--;
2807                                write_unlock_bh(&arm_state->susp_res_lock);
2808                                goto out;
2809                        }
2810                        vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2811                                "unblocked", __func__, entity);
2812                        write_lock_bh(&arm_state->susp_res_lock);
2813                        if (--arm_state->blocked_count == 0)
2814                                complete_all(&arm_state->blocked_blocker);
2815                }
2816        }
2817
2818        stop_suspend_timer(arm_state);
2819
2820        local_uc = ++arm_state->videocore_use_count;
2821        local_entity_uc = ++(*entity_uc);
2822
2823        /* If there's a pending request which hasn't yet been serviced then
2824         * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
2825         * vc_resume_complete will block until we either resume or fail to
2826         * suspend */
2827        if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2828                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2829
2830        if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2831                set_resume_state(arm_state, VC_RESUME_REQUESTED);
2832                vchiq_log_info(vchiq_susp_log_level,
2833                        "%s %s count %d, state count %d",
2834                        __func__, entity, local_entity_uc, local_uc);
2835                request_poll(state, NULL, 0);
2836        } else
2837                vchiq_log_trace(vchiq_susp_log_level,
2838                        "%s %s count %d, state count %d",
2839                        __func__, entity, *entity_uc, local_uc);
2840
2841        write_unlock_bh(&arm_state->susp_res_lock);
2842
2843        /* Completion is in a done state when we're not suspended, so this won't
2844         * block for the non-suspended case. */
2845        if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2846                vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2847                        __func__, entity);
2848                if (wait_for_completion_killable(
2849                                &arm_state->vc_resume_complete) != 0) {
2850                        vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2851                                "resume interrupted", __func__, entity);
2852                        ret = VCHIQ_ERROR;
2853                        goto out;
2854                }
2855                vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2856                        entity);
2857        }
2858
2859        if (ret == VCHIQ_SUCCESS) {
2860                VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2861                long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2862
2863                while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2864                        /* Send the use notify to videocore */
2865                        status = vchiq_send_remote_use_active(state);
2866                        if (status == VCHIQ_SUCCESS)
2867                                ack_cnt--;
2868                        else
2869                                atomic_add(ack_cnt,
2870                                        &arm_state->ka_use_ack_count);
2871                }
2872        }
2873
2874out:
2875        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2876        return ret;
2877}
2878
2879VCHIQ_STATUS_T
2880vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2881{
2882        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2883        VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2884        char entity[16];
2885        int *entity_uc;
2886        int local_uc, local_entity_uc;
2887
2888        if (!arm_state)
2889                goto out;
2890
2891        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2892
2893        if (service) {
2894                sprintf(entity, "%c%c%c%c:%03d",
2895                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2896                        service->client_id);
2897                entity_uc = &service->service_use_count;
2898        } else {
2899                sprintf(entity, "PEER:   ");
2900                entity_uc = &arm_state->peer_use_count;
2901        }
2902
2903        write_lock_bh(&arm_state->susp_res_lock);
2904        if (!arm_state->videocore_use_count || !(*entity_uc)) {
2905                /* Don't use BUG_ON - don't allow user thread to crash kernel */
2906                WARN_ON(!arm_state->videocore_use_count);
2907                WARN_ON(!(*entity_uc));
2908                ret = VCHIQ_ERROR;
2909                goto unlock;
2910        }
2911        local_uc = --arm_state->videocore_use_count;
2912        local_entity_uc = --(*entity_uc);
2913
2914        if (!vchiq_videocore_wanted(state)) {
2915                if (vchiq_platform_use_suspend_timer() &&
2916                                !arm_state->resume_blocked) {
2917                        /* Only use the timer if we're not trying to force
2918                         * suspend (=> resume_blocked) */
2919                        start_suspend_timer(arm_state);
2920                } else {
2921                        vchiq_log_info(vchiq_susp_log_level,
2922                                "%s %s count %d, state count %d - suspending",
2923                                __func__, entity, *entity_uc,
2924                                arm_state->videocore_use_count);
2925                        vchiq_arm_vcsuspend(state);
2926                }
2927        } else
2928                vchiq_log_trace(vchiq_susp_log_level,
2929                        "%s %s count %d, state count %d",
2930                        __func__, entity, *entity_uc,
2931                        arm_state->videocore_use_count);
2932
2933unlock:
2934        write_unlock_bh(&arm_state->susp_res_lock);
2935
2936out:
2937        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2938        return ret;
2939}
2940
2941void
2942vchiq_on_remote_use(VCHIQ_STATE_T *state)
2943{
2944        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2945
2946        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2947        atomic_inc(&arm_state->ka_use_count);
2948        complete(&arm_state->ka_evt);
2949}
2950
2951void
2952vchiq_on_remote_release(VCHIQ_STATE_T *state)
2953{
2954        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2955
2956        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2957        atomic_inc(&arm_state->ka_release_count);
2958        complete(&arm_state->ka_evt);
2959}
2960
2961VCHIQ_STATUS_T
2962vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2963{
2964        return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2965}
2966
2967VCHIQ_STATUS_T
2968vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2969{
2970        return vchiq_release_internal(service->state, service);
2971}
2972
2973VCHIQ_DEBUGFS_NODE_T *
2974vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2975{
2976        return &instance->debugfs_node;
2977}
2978
2979int
2980vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2981{
2982        VCHIQ_SERVICE_T *service;
2983        int use_count = 0, i;
2984
2985        i = 0;
2986        while ((service = next_service_by_instance(instance->state,
2987                instance, &i)) != NULL) {
2988                use_count += service->service_use_count;
2989                unlock_service(service);
2990        }
2991        return use_count;
2992}
2993
2994int
2995vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2996{
2997        return instance->pid;
2998}
2999
3000int
3001vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
3002{
3003        return instance->trace;
3004}
3005
3006void
3007vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
3008{
3009        VCHIQ_SERVICE_T *service;
3010        int i;
3011
3012        i = 0;
3013        while ((service = next_service_by_instance(instance->state,
3014                instance, &i)) != NULL) {
3015                service->trace = trace;
3016                unlock_service(service);
3017        }
3018        instance->trace = (trace != 0);
3019}
3020
3021static void suspend_timer_callback(struct timer_list *t)
3022{
3023        VCHIQ_ARM_STATE_T *arm_state = from_timer(arm_state, t, suspend_timer);
3024        VCHIQ_STATE_T *state = arm_state->state;
3025
3026        vchiq_log_info(vchiq_susp_log_level,
3027                "%s - suspend timer expired - check suspend", __func__);
3028        vchiq_check_suspend(state);
3029}
3030
3031VCHIQ_STATUS_T
3032vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
3033{
3034        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3035        VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3036
3037        if (service) {
3038                ret = vchiq_use_internal(service->state, service,
3039                                USE_TYPE_SERVICE_NO_RESUME);
3040                unlock_service(service);
3041        }
3042        return ret;
3043}
3044
3045VCHIQ_STATUS_T
3046vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
3047{
3048        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3049        VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3050
3051        if (service) {
3052                ret = vchiq_use_internal(service->state, service,
3053                                USE_TYPE_SERVICE);
3054                unlock_service(service);
3055        }
3056        return ret;
3057}
3058
3059VCHIQ_STATUS_T
3060vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
3061{
3062        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3063        VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3064
3065        if (service) {
3066                ret = vchiq_release_internal(service->state, service);
3067                unlock_service(service);
3068        }
3069        return ret;
3070}
3071
3072void
3073vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
3074{
3075        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3076        int i, j = 0;
3077        /* Only dump 64 services */
3078        static const int local_max_services = 64;
3079        /* If there's more than 64 services, only dump ones with
3080         * non-zero counts */
3081        int only_nonzero = 0;
3082        static const char *nz = "<-- preventing suspend";
3083
3084        enum vc_suspend_status vc_suspend_state;
3085        enum vc_resume_status  vc_resume_state;
3086        int peer_count;
3087        int vc_use_count;
3088        int active_services;
3089        struct service_data_struct {
3090                int fourcc;
3091                int clientid;
3092                int use_count;
3093        } service_data[local_max_services];
3094
3095        if (!arm_state)
3096                return;
3097
3098        read_lock_bh(&arm_state->susp_res_lock);
3099        vc_suspend_state = arm_state->vc_suspend_state;
3100        vc_resume_state  = arm_state->vc_resume_state;
3101        peer_count = arm_state->peer_use_count;
3102        vc_use_count = arm_state->videocore_use_count;
3103        active_services = state->unused_service;
3104        if (active_services > local_max_services)
3105                only_nonzero = 1;
3106
3107        for (i = 0; (i < active_services) && (j < local_max_services); i++) {
3108                VCHIQ_SERVICE_T *service_ptr = state->services[i];
3109
3110                if (!service_ptr)
3111                        continue;
3112
3113                if (only_nonzero && !service_ptr->service_use_count)
3114                        continue;
3115
3116                if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
3117                        continue;
3118
3119                service_data[j].fourcc = service_ptr->base.fourcc;
3120                service_data[j].clientid = service_ptr->client_id;
3121                service_data[j++].use_count = service_ptr->service_use_count;
3122        }
3123
3124        read_unlock_bh(&arm_state->susp_res_lock);
3125
3126        vchiq_log_warning(vchiq_susp_log_level,
3127                "-- Videcore suspend state: %s --",
3128                suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
3129        vchiq_log_warning(vchiq_susp_log_level,
3130                "-- Videcore resume state: %s --",
3131                resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
3132
3133        if (only_nonzero)
3134                vchiq_log_warning(vchiq_susp_log_level, "Too many active "
3135                        "services (%d).  Only dumping up to first %d services "
3136                        "with non-zero use-count", active_services,
3137                        local_max_services);
3138
3139        for (i = 0; i < j; i++) {
3140                vchiq_log_warning(vchiq_susp_log_level,
3141                        "----- %c%c%c%c:%d service count %d %s",
3142                        VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
3143                        service_data[i].clientid,
3144                        service_data[i].use_count,
3145                        service_data[i].use_count ? nz : "");
3146        }
3147        vchiq_log_warning(vchiq_susp_log_level,
3148                "----- VCHIQ use count count %d", peer_count);
3149        vchiq_log_warning(vchiq_susp_log_level,
3150                "--- Overall vchiq instance use count %d", vc_use_count);
3151
3152        vchiq_dump_platform_use_state(state);
3153}
3154
3155VCHIQ_STATUS_T
3156vchiq_check_service(VCHIQ_SERVICE_T *service)
3157{
3158        VCHIQ_ARM_STATE_T *arm_state;
3159        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3160
3161        if (!service || !service->state)
3162                goto out;
3163
3164        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3165
3166        arm_state = vchiq_platform_get_arm_state(service->state);
3167
3168        read_lock_bh(&arm_state->susp_res_lock);
3169        if (service->service_use_count)
3170                ret = VCHIQ_SUCCESS;
3171        read_unlock_bh(&arm_state->susp_res_lock);
3172
3173        if (ret == VCHIQ_ERROR) {
3174                vchiq_log_error(vchiq_susp_log_level,
3175                        "%s ERROR - %c%c%c%c:%d service count %d, "
3176                        "state count %d, videocore suspend state %s", __func__,
3177                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3178                        service->client_id, service->service_use_count,
3179                        arm_state->videocore_use_count,
3180                        suspend_state_names[arm_state->vc_suspend_state +
3181                                                VC_SUSPEND_NUM_OFFSET]);
3182                vchiq_dump_service_use_state(service->state);
3183        }
3184out:
3185        return ret;
3186}
3187
3188/* stub functions */
3189void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
3190{
3191        (void)state;
3192}
3193
3194void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
3195        VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
3196{
3197        VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3198
3199        vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
3200                get_conn_state_name(oldstate), get_conn_state_name(newstate));
3201        if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
3202                write_lock_bh(&arm_state->susp_res_lock);
3203                if (!arm_state->first_connect) {
3204                        char threadname[16];
3205
3206                        arm_state->first_connect = 1;
3207                        write_unlock_bh(&arm_state->susp_res_lock);
3208                        snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
3209                                state->id);
3210                        arm_state->ka_thread = kthread_create(
3211                                &vchiq_keepalive_thread_func,
3212                                (void *)state,
3213                                threadname);
3214                        if (IS_ERR(arm_state->ka_thread)) {
3215                                vchiq_log_error(vchiq_susp_log_level,
3216                                        "vchiq: FATAL: couldn't create thread %s",
3217                                        threadname);
3218                        } else {
3219                                wake_up_process(arm_state->ka_thread);
3220                        }
3221                } else
3222                        write_unlock_bh(&arm_state->susp_res_lock);
3223        }
3224}
3225
3226static int vchiq_probe(struct platform_device *pdev)
3227{
3228        struct device_node *fw_node;
3229        struct rpi_firmware *fw;
3230        int err;
3231
3232        fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
3233        if (!fw_node) {
3234                dev_err(&pdev->dev, "Missing firmware node\n");
3235                return -ENOENT;
3236        }
3237
3238        fw = rpi_firmware_get(fw_node);
3239        of_node_put(fw_node);
3240        if (!fw)
3241                return -EPROBE_DEFER;
3242
3243        platform_set_drvdata(pdev, fw);
3244
3245        err = vchiq_platform_init(pdev, &g_state);
3246        if (err != 0)
3247                goto failed_platform_init;
3248
3249        err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
3250        if (err != 0) {
3251                vchiq_log_error(vchiq_arm_log_level,
3252                        "Unable to allocate device number");
3253                goto failed_platform_init;
3254        }
3255        cdev_init(&vchiq_cdev, &vchiq_fops);
3256        vchiq_cdev.owner = THIS_MODULE;
3257        err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
3258        if (err != 0) {
3259                vchiq_log_error(vchiq_arm_log_level,
3260                        "Unable to register device");
3261                goto failed_cdev_add;
3262        }
3263
3264        /* create sysfs entries */
3265        vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3266        err = PTR_ERR(vchiq_class);
3267        if (IS_ERR(vchiq_class))
3268                goto failed_class_create;
3269
3270        vchiq_dev = device_create(vchiq_class, NULL,
3271                vchiq_devid, NULL, "vchiq");
3272        err = PTR_ERR(vchiq_dev);
3273        if (IS_ERR(vchiq_dev))
3274                goto failed_device_create;
3275
3276        /* create debugfs entries */
3277        err = vchiq_debugfs_init();
3278        if (err != 0)
3279                goto failed_debugfs_init;
3280
3281        vchiq_log_info(vchiq_arm_log_level,
3282                "vchiq: initialised - version %d (min %d), device %d.%d",
3283                VCHIQ_VERSION, VCHIQ_VERSION_MIN,
3284                MAJOR(vchiq_devid), MINOR(vchiq_devid));
3285
3286        return 0;
3287
3288failed_debugfs_init:
3289        device_destroy(vchiq_class, vchiq_devid);
3290failed_device_create:
3291        class_destroy(vchiq_class);
3292failed_class_create:
3293        cdev_del(&vchiq_cdev);
3294failed_cdev_add:
3295        unregister_chrdev_region(vchiq_devid, 1);
3296failed_platform_init:
3297        vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
3298        return err;
3299}
3300
3301static int vchiq_remove(struct platform_device *pdev)
3302{
3303        vchiq_debugfs_deinit();
3304        device_destroy(vchiq_class, vchiq_devid);
3305        class_destroy(vchiq_class);
3306        cdev_del(&vchiq_cdev);
3307        unregister_chrdev_region(vchiq_devid, 1);
3308
3309        return 0;
3310}
3311
3312static const struct of_device_id vchiq_of_match[] = {
3313        { .compatible = "brcm,bcm2835-vchiq", },
3314        {},
3315};
3316MODULE_DEVICE_TABLE(of, vchiq_of_match);
3317
3318static struct platform_driver vchiq_driver = {
3319        .driver = {
3320                .name = "bcm2835_vchiq",
3321                .of_match_table = vchiq_of_match,
3322        },
3323        .probe = vchiq_probe,
3324        .remove = vchiq_remove,
3325};
3326module_platform_driver(vchiq_driver);
3327
3328MODULE_LICENSE("Dual BSD/GPL");
3329MODULE_DESCRIPTION("Videocore VCHIQ driver");
3330MODULE_AUTHOR("Broadcom Corporation");
3331