linux/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
   4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/sched/signal.h>
  10#include <linux/types.h>
  11#include <linux/errno.h>
  12#include <linux/cdev.h>
  13#include <linux/fs.h>
  14#include <linux/device.h>
  15#include <linux/mm.h>
  16#include <linux/highmem.h>
  17#include <linux/pagemap.h>
  18#include <linux/bug.h>
  19#include <linux/completion.h>
  20#include <linux/list.h>
  21#include <linux/of.h>
  22#include <linux/platform_device.h>
  23#include <linux/compat.h>
  24#include <linux/dma-mapping.h>
  25#include <soc/bcm2835/raspberrypi-firmware.h>
  26
  27#include "vchiq_core.h"
  28#include "vchiq_ioctl.h"
  29#include "vchiq_arm.h"
  30#include "vchiq_debugfs.h"
  31
  32#define DEVICE_NAME "vchiq"
  33
  34/* Override the default prefix, which would be vchiq_arm (from the filename) */
  35#undef MODULE_PARAM_PREFIX
  36#define MODULE_PARAM_PREFIX DEVICE_NAME "."
  37
  38/* Some per-instance constants */
  39#define MAX_COMPLETIONS 128
  40#define MAX_SERVICES 64
  41#define MAX_ELEMENTS 8
  42#define MSG_QUEUE_SIZE 128
  43
  44#define KEEPALIVE_VER 1
  45#define KEEPALIVE_VER_MIN KEEPALIVE_VER
  46
  47/* Run time control of log level, based on KERN_XXX level. */
  48int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
  49int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
  50
  51#define SUSPEND_TIMER_TIMEOUT_MS 100
  52#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
  53
  54#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
  55static const char *const suspend_state_names[] = {
  56        "VC_SUSPEND_FORCE_CANCELED",
  57        "VC_SUSPEND_REJECTED",
  58        "VC_SUSPEND_FAILED",
  59        "VC_SUSPEND_IDLE",
  60        "VC_SUSPEND_REQUESTED",
  61        "VC_SUSPEND_IN_PROGRESS",
  62        "VC_SUSPEND_SUSPENDED"
  63};
  64#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
  65static const char *const resume_state_names[] = {
  66        "VC_RESUME_FAILED",
  67        "VC_RESUME_IDLE",
  68        "VC_RESUME_REQUESTED",
  69        "VC_RESUME_IN_PROGRESS",
  70        "VC_RESUME_RESUMED"
  71};
  72/* The number of times we allow force suspend to timeout before actually
  73** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
  74** correctly - we don't want to prevent ARM suspend indefinitely in this case.
  75*/
  76#define FORCE_SUSPEND_FAIL_MAX 8
  77
  78/* The time in ms allowed for videocore to go idle when force suspend has been
  79 * requested */
  80#define FORCE_SUSPEND_TIMEOUT_MS 200
  81
  82static void suspend_timer_callback(struct timer_list *t);
  83
  84struct user_service {
  85        struct vchiq_service *service;
  86        void *userdata;
  87        VCHIQ_INSTANCE_T instance;
  88        char is_vchi;
  89        char dequeue_pending;
  90        char close_pending;
  91        int message_available_pos;
  92        int msg_insert;
  93        int msg_remove;
  94        struct completion insert_event;
  95        struct completion remove_event;
  96        struct completion close_event;
  97        struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
  98};
  99
 100struct bulk_waiter_node {
 101        struct bulk_waiter bulk_waiter;
 102        int pid;
 103        struct list_head list;
 104};
 105
 106struct vchiq_instance_struct {
 107        struct vchiq_state *state;
 108        struct vchiq_completion_data completions[MAX_COMPLETIONS];
 109        int completion_insert;
 110        int completion_remove;
 111        struct completion insert_event;
 112        struct completion remove_event;
 113        struct mutex completion_mutex;
 114
 115        int connected;
 116        int closing;
 117        int pid;
 118        int mark;
 119        int use_close_delivered;
 120        int trace;
 121
 122        struct list_head bulk_waiter_list;
 123        struct mutex bulk_waiter_list_mutex;
 124
 125        struct vchiq_debugfs_node debugfs_node;
 126};
 127
 128struct dump_context {
 129        char __user *buf;
 130        size_t actual;
 131        size_t space;
 132        loff_t offset;
 133};
 134
 135static struct cdev    vchiq_cdev;
 136static dev_t          vchiq_devid;
 137static struct vchiq_state g_state;
 138static struct class  *vchiq_class;
 139static DEFINE_SPINLOCK(msg_queue_spinlock);
 140static struct platform_device *bcm2835_camera;
 141static struct platform_device *bcm2835_audio;
 142
 143static struct vchiq_drvdata bcm2835_drvdata = {
 144        .cache_line_size = 32,
 145};
 146
 147static struct vchiq_drvdata bcm2836_drvdata = {
 148        .cache_line_size = 64,
 149};
 150
 151static const char *const ioctl_names[] = {
 152        "CONNECT",
 153        "SHUTDOWN",
 154        "CREATE_SERVICE",
 155        "REMOVE_SERVICE",
 156        "QUEUE_MESSAGE",
 157        "QUEUE_BULK_TRANSMIT",
 158        "QUEUE_BULK_RECEIVE",
 159        "AWAIT_COMPLETION",
 160        "DEQUEUE_MESSAGE",
 161        "GET_CLIENT_ID",
 162        "GET_CONFIG",
 163        "CLOSE_SERVICE",
 164        "USE_SERVICE",
 165        "RELEASE_SERVICE",
 166        "SET_SERVICE_OPTION",
 167        "DUMP_PHYS_MEM",
 168        "LIB_VERSION",
 169        "CLOSE_DELIVERED"
 170};
 171
 172vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
 173                    (VCHIQ_IOC_MAX + 1));
 174
 175static VCHIQ_STATUS_T
 176vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
 177        unsigned int size, VCHIQ_BULK_DIR_T dir);
 178
 179#define VCHIQ_INIT_RETRIES 10
 180VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
 181{
 182        VCHIQ_STATUS_T status = VCHIQ_ERROR;
 183        struct vchiq_state *state;
 184        VCHIQ_INSTANCE_T instance = NULL;
 185        int i;
 186
 187        vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
 188
 189        /* VideoCore may not be ready due to boot up timing.
 190         * It may never be ready if kernel and firmware are mismatched,so don't
 191         * block forever.
 192         */
 193        for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
 194                state = vchiq_get_state();
 195                if (state)
 196                        break;
 197                usleep_range(500, 600);
 198        }
 199        if (i == VCHIQ_INIT_RETRIES) {
 200                vchiq_log_error(vchiq_core_log_level,
 201                        "%s: videocore not initialized\n", __func__);
 202                goto failed;
 203        } else if (i > 0) {
 204                vchiq_log_warning(vchiq_core_log_level,
 205                        "%s: videocore initialized after %d retries\n",
 206                        __func__, i);
 207        }
 208
 209        instance = kzalloc(sizeof(*instance), GFP_KERNEL);
 210        if (!instance) {
 211                vchiq_log_error(vchiq_core_log_level,
 212                        "%s: error allocating vchiq instance\n", __func__);
 213                goto failed;
 214        }
 215
 216        instance->connected = 0;
 217        instance->state = state;
 218        mutex_init(&instance->bulk_waiter_list_mutex);
 219        INIT_LIST_HEAD(&instance->bulk_waiter_list);
 220
 221        *instance_out = instance;
 222
 223        status = VCHIQ_SUCCESS;
 224
 225failed:
 226        vchiq_log_trace(vchiq_core_log_level,
 227                "%s(%p): returning %d", __func__, instance, status);
 228
 229        return status;
 230}
 231EXPORT_SYMBOL(vchiq_initialise);
 232
 233VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
 234{
 235        VCHIQ_STATUS_T status;
 236        struct vchiq_state *state = instance->state;
 237
 238        vchiq_log_trace(vchiq_core_log_level,
 239                "%s(%p) called", __func__, instance);
 240
 241        if (mutex_lock_killable(&state->mutex))
 242                return VCHIQ_RETRY;
 243
 244        /* Remove all services */
 245        status = vchiq_shutdown_internal(state, instance);
 246
 247        mutex_unlock(&state->mutex);
 248
 249        vchiq_log_trace(vchiq_core_log_level,
 250                "%s(%p): returning %d", __func__, instance, status);
 251
 252        if (status == VCHIQ_SUCCESS) {
 253                struct bulk_waiter_node *waiter, *next;
 254
 255                list_for_each_entry_safe(waiter, next,
 256                                         &instance->bulk_waiter_list, list) {
 257                        list_del(&waiter->list);
 258                        vchiq_log_info(vchiq_arm_log_level,
 259                                        "bulk_waiter - cleaned up %pK for pid %d",
 260                                        waiter, waiter->pid);
 261                        kfree(waiter);
 262                }
 263                kfree(instance);
 264        }
 265
 266        return status;
 267}
 268EXPORT_SYMBOL(vchiq_shutdown);
 269
 270static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
 271{
 272        return instance->connected;
 273}
 274
 275VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
 276{
 277        VCHIQ_STATUS_T status;
 278        struct vchiq_state *state = instance->state;
 279
 280        vchiq_log_trace(vchiq_core_log_level,
 281                "%s(%p) called", __func__, instance);
 282
 283        if (mutex_lock_killable(&state->mutex)) {
 284                vchiq_log_trace(vchiq_core_log_level,
 285                        "%s: call to mutex_lock failed", __func__);
 286                status = VCHIQ_RETRY;
 287                goto failed;
 288        }
 289        status = vchiq_connect_internal(state, instance);
 290
 291        if (status == VCHIQ_SUCCESS)
 292                instance->connected = 1;
 293
 294        mutex_unlock(&state->mutex);
 295
 296failed:
 297        vchiq_log_trace(vchiq_core_log_level,
 298                "%s(%p): returning %d", __func__, instance, status);
 299
 300        return status;
 301}
 302EXPORT_SYMBOL(vchiq_connect);
 303
 304VCHIQ_STATUS_T vchiq_add_service(
 305        VCHIQ_INSTANCE_T              instance,
 306        const struct vchiq_service_params *params,
 307        VCHIQ_SERVICE_HANDLE_T       *phandle)
 308{
 309        VCHIQ_STATUS_T status;
 310        struct vchiq_state *state = instance->state;
 311        struct vchiq_service *service = NULL;
 312        int srvstate;
 313
 314        vchiq_log_trace(vchiq_core_log_level,
 315                "%s(%p) called", __func__, instance);
 316
 317        *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
 318
 319        srvstate = vchiq_is_connected(instance)
 320                ? VCHIQ_SRVSTATE_LISTENING
 321                : VCHIQ_SRVSTATE_HIDDEN;
 322
 323        service = vchiq_add_service_internal(
 324                state,
 325                params,
 326                srvstate,
 327                instance,
 328                NULL);
 329
 330        if (service) {
 331                *phandle = service->handle;
 332                status = VCHIQ_SUCCESS;
 333        } else
 334                status = VCHIQ_ERROR;
 335
 336        vchiq_log_trace(vchiq_core_log_level,
 337                "%s(%p): returning %d", __func__, instance, status);
 338
 339        return status;
 340}
 341EXPORT_SYMBOL(vchiq_add_service);
 342
 343VCHIQ_STATUS_T vchiq_open_service(
 344        VCHIQ_INSTANCE_T              instance,
 345        const struct vchiq_service_params *params,
 346        VCHIQ_SERVICE_HANDLE_T       *phandle)
 347{
 348        VCHIQ_STATUS_T   status = VCHIQ_ERROR;
 349        struct vchiq_state   *state = instance->state;
 350        struct vchiq_service *service = NULL;
 351
 352        vchiq_log_trace(vchiq_core_log_level,
 353                "%s(%p) called", __func__, instance);
 354
 355        *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
 356
 357        if (!vchiq_is_connected(instance))
 358                goto failed;
 359
 360        service = vchiq_add_service_internal(state,
 361                params,
 362                VCHIQ_SRVSTATE_OPENING,
 363                instance,
 364                NULL);
 365
 366        if (service) {
 367                *phandle = service->handle;
 368                status = vchiq_open_service_internal(service, current->pid);
 369                if (status != VCHIQ_SUCCESS) {
 370                        vchiq_remove_service(service->handle);
 371                        *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
 372                }
 373        }
 374
 375failed:
 376        vchiq_log_trace(vchiq_core_log_level,
 377                "%s(%p): returning %d", __func__, instance, status);
 378
 379        return status;
 380}
 381EXPORT_SYMBOL(vchiq_open_service);
 382
 383VCHIQ_STATUS_T
 384vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
 385        unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
 386{
 387        VCHIQ_STATUS_T status;
 388
 389        switch (mode) {
 390        case VCHIQ_BULK_MODE_NOCALLBACK:
 391        case VCHIQ_BULK_MODE_CALLBACK:
 392                status = vchiq_bulk_transfer(handle, (void *)data, size,
 393                                             userdata, mode,
 394                                             VCHIQ_BULK_TRANSMIT);
 395                break;
 396        case VCHIQ_BULK_MODE_BLOCKING:
 397                status = vchiq_blocking_bulk_transfer(handle,
 398                        (void *)data, size, VCHIQ_BULK_TRANSMIT);
 399                break;
 400        default:
 401                return VCHIQ_ERROR;
 402        }
 403
 404        return status;
 405}
 406EXPORT_SYMBOL(vchiq_bulk_transmit);
 407
 408VCHIQ_STATUS_T
 409vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
 410        unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
 411{
 412        VCHIQ_STATUS_T status;
 413
 414        switch (mode) {
 415        case VCHIQ_BULK_MODE_NOCALLBACK:
 416        case VCHIQ_BULK_MODE_CALLBACK:
 417                status = vchiq_bulk_transfer(handle, data, size, userdata,
 418                                             mode, VCHIQ_BULK_RECEIVE);
 419                break;
 420        case VCHIQ_BULK_MODE_BLOCKING:
 421                status = vchiq_blocking_bulk_transfer(handle,
 422                        (void *)data, size, VCHIQ_BULK_RECEIVE);
 423                break;
 424        default:
 425                return VCHIQ_ERROR;
 426        }
 427
 428        return status;
 429}
 430EXPORT_SYMBOL(vchiq_bulk_receive);
 431
 432static VCHIQ_STATUS_T
 433vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
 434        unsigned int size, VCHIQ_BULK_DIR_T dir)
 435{
 436        VCHIQ_INSTANCE_T instance;
 437        struct vchiq_service *service;
 438        VCHIQ_STATUS_T status;
 439        struct bulk_waiter_node *waiter = NULL;
 440
 441        service = find_service_by_handle(handle);
 442        if (!service)
 443                return VCHIQ_ERROR;
 444
 445        instance = service->instance;
 446
 447        unlock_service(service);
 448
 449        mutex_lock(&instance->bulk_waiter_list_mutex);
 450        list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
 451                if (waiter->pid == current->pid) {
 452                        list_del(&waiter->list);
 453                        break;
 454                }
 455        }
 456        mutex_unlock(&instance->bulk_waiter_list_mutex);
 457
 458        if (waiter) {
 459                struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
 460
 461                if (bulk) {
 462                        /* This thread has an outstanding bulk transfer. */
 463                        if ((bulk->data != data) ||
 464                                (bulk->size != size)) {
 465                                /* This is not a retry of the previous one.
 466                                 * Cancel the signal when the transfer
 467                                 * completes.
 468                                 */
 469                                spin_lock(&bulk_waiter_spinlock);
 470                                bulk->userdata = NULL;
 471                                spin_unlock(&bulk_waiter_spinlock);
 472                        }
 473                }
 474        }
 475
 476        if (!waiter) {
 477                waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
 478                if (!waiter) {
 479                        vchiq_log_error(vchiq_core_log_level,
 480                                "%s - out of memory", __func__);
 481                        return VCHIQ_ERROR;
 482                }
 483        }
 484
 485        status = vchiq_bulk_transfer(handle, data, size, &waiter->bulk_waiter,
 486                                     VCHIQ_BULK_MODE_BLOCKING, dir);
 487        if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
 488                !waiter->bulk_waiter.bulk) {
 489                struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
 490
 491                if (bulk) {
 492                        /* Cancel the signal when the transfer
 493                         * completes.
 494                         */
 495                        spin_lock(&bulk_waiter_spinlock);
 496                        bulk->userdata = NULL;
 497                        spin_unlock(&bulk_waiter_spinlock);
 498                }
 499                kfree(waiter);
 500        } else {
 501                waiter->pid = current->pid;
 502                mutex_lock(&instance->bulk_waiter_list_mutex);
 503                list_add(&waiter->list, &instance->bulk_waiter_list);
 504                mutex_unlock(&instance->bulk_waiter_list_mutex);
 505                vchiq_log_info(vchiq_arm_log_level,
 506                                "saved bulk_waiter %pK for pid %d",
 507                                waiter, current->pid);
 508        }
 509
 510        return status;
 511}
 512/****************************************************************************
 513*
 514*   add_completion
 515*
 516***************************************************************************/
 517
 518static VCHIQ_STATUS_T
 519add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
 520               struct vchiq_header *header, struct user_service *user_service,
 521               void *bulk_userdata)
 522{
 523        struct vchiq_completion_data *completion;
 524        int insert;
 525
 526        DEBUG_INITIALISE(g_state.local)
 527
 528        insert = instance->completion_insert;
 529        while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
 530                /* Out of space - wait for the client */
 531                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 532                vchiq_log_trace(vchiq_arm_log_level,
 533                        "%s - completion queue full", __func__);
 534                DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
 535                if (wait_for_completion_interruptible(
 536                                        &instance->remove_event)) {
 537                        vchiq_log_info(vchiq_arm_log_level,
 538                                "service_callback interrupted");
 539                        return VCHIQ_RETRY;
 540                } else if (instance->closing) {
 541                        vchiq_log_info(vchiq_arm_log_level,
 542                                "service_callback closing");
 543                        return VCHIQ_SUCCESS;
 544                }
 545                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 546        }
 547
 548        completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
 549
 550        completion->header = header;
 551        completion->reason = reason;
 552        /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
 553        completion->service_userdata = user_service->service;
 554        completion->bulk_userdata = bulk_userdata;
 555
 556        if (reason == VCHIQ_SERVICE_CLOSED) {
 557                /* Take an extra reference, to be held until
 558                   this CLOSED notification is delivered. */
 559                lock_service(user_service->service);
 560                if (instance->use_close_delivered)
 561                        user_service->close_pending = 1;
 562        }
 563
 564        /* A write barrier is needed here to ensure that the entire completion
 565                record is written out before the insert point. */
 566        wmb();
 567
 568        if (reason == VCHIQ_MESSAGE_AVAILABLE)
 569                user_service->message_available_pos = insert;
 570
 571        insert++;
 572        instance->completion_insert = insert;
 573
 574        complete(&instance->insert_event);
 575
 576        return VCHIQ_SUCCESS;
 577}
 578
 579/****************************************************************************
 580*
 581*   service_callback
 582*
 583***************************************************************************/
 584
 585static VCHIQ_STATUS_T
 586service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
 587                 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
 588{
 589        /* How do we ensure the callback goes to the right client?
 590        ** The service_user data points to a user_service record
 591        ** containing the original callback and the user state structure, which
 592        ** contains a circular buffer for completion records.
 593        */
 594        struct user_service *user_service;
 595        struct vchiq_service *service;
 596        VCHIQ_INSTANCE_T instance;
 597        bool skip_completion = false;
 598
 599        DEBUG_INITIALISE(g_state.local)
 600
 601        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 602
 603        service = handle_to_service(handle);
 604        BUG_ON(!service);
 605        user_service = (struct user_service *)service->base.userdata;
 606        instance = user_service->instance;
 607
 608        if (!instance || instance->closing)
 609                return VCHIQ_SUCCESS;
 610
 611        vchiq_log_trace(vchiq_arm_log_level,
 612                "%s - service %lx(%d,%p), reason %d, header %lx, "
 613                "instance %lx, bulk_userdata %lx",
 614                __func__, (unsigned long)user_service,
 615                service->localport, user_service->userdata,
 616                reason, (unsigned long)header,
 617                (unsigned long)instance, (unsigned long)bulk_userdata);
 618
 619        if (header && user_service->is_vchi) {
 620                spin_lock(&msg_queue_spinlock);
 621                while (user_service->msg_insert ==
 622                        (user_service->msg_remove + MSG_QUEUE_SIZE)) {
 623                        spin_unlock(&msg_queue_spinlock);
 624                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 625                        DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
 626                        vchiq_log_trace(vchiq_arm_log_level,
 627                                "service_callback - msg queue full");
 628                        /* If there is no MESSAGE_AVAILABLE in the completion
 629                        ** queue, add one
 630                        */
 631                        if ((user_service->message_available_pos -
 632                                instance->completion_remove) < 0) {
 633                                VCHIQ_STATUS_T status;
 634
 635                                vchiq_log_info(vchiq_arm_log_level,
 636                                        "Inserting extra MESSAGE_AVAILABLE");
 637                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 638                                status = add_completion(instance, reason,
 639                                        NULL, user_service, bulk_userdata);
 640                                if (status != VCHIQ_SUCCESS) {
 641                                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 642                                        return status;
 643                                }
 644                        }
 645
 646                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 647                        if (wait_for_completion_interruptible(
 648                                                &user_service->remove_event)) {
 649                                vchiq_log_info(vchiq_arm_log_level,
 650                                        "%s interrupted", __func__);
 651                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 652                                return VCHIQ_RETRY;
 653                        } else if (instance->closing) {
 654                                vchiq_log_info(vchiq_arm_log_level,
 655                                        "%s closing", __func__);
 656                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 657                                return VCHIQ_ERROR;
 658                        }
 659                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 660                        spin_lock(&msg_queue_spinlock);
 661                }
 662
 663                user_service->msg_queue[user_service->msg_insert &
 664                        (MSG_QUEUE_SIZE - 1)] = header;
 665                user_service->msg_insert++;
 666
 667                /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
 668                ** there is a MESSAGE_AVAILABLE in the completion queue then
 669                ** bypass the completion queue.
 670                */
 671                if (((user_service->message_available_pos -
 672                        instance->completion_remove) >= 0) ||
 673                        user_service->dequeue_pending) {
 674                        user_service->dequeue_pending = 0;
 675                        skip_completion = true;
 676                }
 677
 678                spin_unlock(&msg_queue_spinlock);
 679                complete(&user_service->insert_event);
 680
 681                header = NULL;
 682        }
 683        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 684
 685        if (skip_completion)
 686                return VCHIQ_SUCCESS;
 687
 688        return add_completion(instance, reason, header, user_service,
 689                bulk_userdata);
 690}
 691
 692/****************************************************************************
 693*
 694*   user_service_free
 695*
 696***************************************************************************/
 697static void
 698user_service_free(void *userdata)
 699{
 700        kfree(userdata);
 701}
 702
 703/****************************************************************************
 704*
 705*   close_delivered
 706*
 707***************************************************************************/
 708static void close_delivered(struct user_service *user_service)
 709{
 710        vchiq_log_info(vchiq_arm_log_level,
 711                "%s(handle=%x)",
 712                __func__, user_service->service->handle);
 713
 714        if (user_service->close_pending) {
 715                /* Allow the underlying service to be culled */
 716                unlock_service(user_service->service);
 717
 718                /* Wake the user-thread blocked in close_ or remove_service */
 719                complete(&user_service->close_event);
 720
 721                user_service->close_pending = 0;
 722        }
 723}
 724
 725struct vchiq_io_copy_callback_context {
 726        struct vchiq_element *element;
 727        size_t element_offset;
 728        unsigned long elements_to_go;
 729};
 730
 731static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
 732                                           size_t offset, size_t maxsize)
 733{
 734        struct vchiq_io_copy_callback_context *cc = context;
 735        size_t total_bytes_copied = 0;
 736        size_t bytes_this_round;
 737
 738        while (total_bytes_copied < maxsize) {
 739                if (!cc->elements_to_go)
 740                        return total_bytes_copied;
 741
 742                if (!cc->element->size) {
 743                        cc->elements_to_go--;
 744                        cc->element++;
 745                        cc->element_offset = 0;
 746                        continue;
 747                }
 748
 749                bytes_this_round = min(cc->element->size - cc->element_offset,
 750                                       maxsize - total_bytes_copied);
 751
 752                if (copy_from_user(dest + total_bytes_copied,
 753                                  cc->element->data + cc->element_offset,
 754                                  bytes_this_round))
 755                        return -EFAULT;
 756
 757                cc->element_offset += bytes_this_round;
 758                total_bytes_copied += bytes_this_round;
 759
 760                if (cc->element_offset == cc->element->size) {
 761                        cc->elements_to_go--;
 762                        cc->element++;
 763                        cc->element_offset = 0;
 764                }
 765        }
 766
 767        return maxsize;
 768}
 769
 770/**************************************************************************
 771 *
 772 *   vchiq_ioc_queue_message
 773 *
 774 **************************************************************************/
 775static VCHIQ_STATUS_T
 776vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
 777                        struct vchiq_element *elements,
 778                        unsigned long count)
 779{
 780        struct vchiq_io_copy_callback_context context;
 781        unsigned long i;
 782        size_t total_size = 0;
 783
 784        context.element = elements;
 785        context.element_offset = 0;
 786        context.elements_to_go = count;
 787
 788        for (i = 0; i < count; i++) {
 789                if (!elements[i].data && elements[i].size != 0)
 790                        return -EFAULT;
 791
 792                total_size += elements[i].size;
 793        }
 794
 795        return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
 796                                   &context, total_size);
 797}
 798
 799/****************************************************************************
 800*
 801*   vchiq_ioctl
 802*
 803***************************************************************************/
 804static long
 805vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 806{
 807        VCHIQ_INSTANCE_T instance = file->private_data;
 808        VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 809        struct vchiq_service *service = NULL;
 810        long ret = 0;
 811        int i, rc;
 812
 813        DEBUG_INITIALISE(g_state.local)
 814
 815        vchiq_log_trace(vchiq_arm_log_level,
 816                "%s - instance %pK, cmd %s, arg %lx",
 817                __func__, instance,
 818                ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
 819                (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
 820                ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
 821
 822        switch (cmd) {
 823        case VCHIQ_IOC_SHUTDOWN:
 824                if (!instance->connected)
 825                        break;
 826
 827                /* Remove all services */
 828                i = 0;
 829                while ((service = next_service_by_instance(instance->state,
 830                        instance, &i)) != NULL) {
 831                        status = vchiq_remove_service(service->handle);
 832                        unlock_service(service);
 833                        if (status != VCHIQ_SUCCESS)
 834                                break;
 835                }
 836                service = NULL;
 837
 838                if (status == VCHIQ_SUCCESS) {
 839                        /* Wake the completion thread and ask it to exit */
 840                        instance->closing = 1;
 841                        complete(&instance->insert_event);
 842                }
 843
 844                break;
 845
 846        case VCHIQ_IOC_CONNECT:
 847                if (instance->connected) {
 848                        ret = -EINVAL;
 849                        break;
 850                }
 851                rc = mutex_lock_killable(&instance->state->mutex);
 852                if (rc) {
 853                        vchiq_log_error(vchiq_arm_log_level,
 854                                "vchiq: connect: could not lock mutex for "
 855                                "state %d: %d",
 856                                instance->state->id, rc);
 857                        ret = -EINTR;
 858                        break;
 859                }
 860                status = vchiq_connect_internal(instance->state, instance);
 861                mutex_unlock(&instance->state->mutex);
 862
 863                if (status == VCHIQ_SUCCESS)
 864                        instance->connected = 1;
 865                else
 866                        vchiq_log_error(vchiq_arm_log_level,
 867                                "vchiq: could not connect: %d", status);
 868                break;
 869
 870        case VCHIQ_IOC_CREATE_SERVICE: {
 871                struct vchiq_create_service args;
 872                struct user_service *user_service = NULL;
 873                void *userdata;
 874                int srvstate;
 875
 876                if (copy_from_user(&args, (const void __user *)arg,
 877                                   sizeof(args))) {
 878                        ret = -EFAULT;
 879                        break;
 880                }
 881
 882                user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
 883                if (!user_service) {
 884                        ret = -ENOMEM;
 885                        break;
 886                }
 887
 888                if (args.is_open) {
 889                        if (!instance->connected) {
 890                                ret = -ENOTCONN;
 891                                kfree(user_service);
 892                                break;
 893                        }
 894                        srvstate = VCHIQ_SRVSTATE_OPENING;
 895                } else {
 896                        srvstate =
 897                                 instance->connected ?
 898                                 VCHIQ_SRVSTATE_LISTENING :
 899                                 VCHIQ_SRVSTATE_HIDDEN;
 900                }
 901
 902                userdata = args.params.userdata;
 903                args.params.callback = service_callback;
 904                args.params.userdata = user_service;
 905                service = vchiq_add_service_internal(
 906                                instance->state,
 907                                &args.params, srvstate,
 908                                instance, user_service_free);
 909
 910                if (service != NULL) {
 911                        user_service->service = service;
 912                        user_service->userdata = userdata;
 913                        user_service->instance = instance;
 914                        user_service->is_vchi = (args.is_vchi != 0);
 915                        user_service->dequeue_pending = 0;
 916                        user_service->close_pending = 0;
 917                        user_service->message_available_pos =
 918                                instance->completion_remove - 1;
 919                        user_service->msg_insert = 0;
 920                        user_service->msg_remove = 0;
 921                        init_completion(&user_service->insert_event);
 922                        init_completion(&user_service->remove_event);
 923                        init_completion(&user_service->close_event);
 924
 925                        if (args.is_open) {
 926                                status = vchiq_open_service_internal
 927                                        (service, instance->pid);
 928                                if (status != VCHIQ_SUCCESS) {
 929                                        vchiq_remove_service(service->handle);
 930                                        service = NULL;
 931                                        ret = (status == VCHIQ_RETRY) ?
 932                                                -EINTR : -EIO;
 933                                        break;
 934                                }
 935                        }
 936
 937                        if (copy_to_user((void __user *)
 938                                &(((struct vchiq_create_service __user *)
 939                                        arg)->handle),
 940                                (const void *)&service->handle,
 941                                sizeof(service->handle))) {
 942                                ret = -EFAULT;
 943                                vchiq_remove_service(service->handle);
 944                        }
 945
 946                        service = NULL;
 947                } else {
 948                        ret = -EEXIST;
 949                        kfree(user_service);
 950                }
 951        } break;
 952
 953        case VCHIQ_IOC_CLOSE_SERVICE:
 954        case VCHIQ_IOC_REMOVE_SERVICE: {
 955                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
 956                struct user_service *user_service;
 957
 958                service = find_service_for_instance(instance, handle);
 959                if (!service) {
 960                        ret = -EINVAL;
 961                        break;
 962                }
 963
 964                user_service = service->base.userdata;
 965
 966                /* close_pending is false on first entry, and when the
 967                   wait in vchiq_close_service has been interrupted. */
 968                if (!user_service->close_pending) {
 969                        status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
 970                                 vchiq_close_service(service->handle) :
 971                                 vchiq_remove_service(service->handle);
 972                        if (status != VCHIQ_SUCCESS)
 973                                break;
 974                }
 975
 976                /* close_pending is true once the underlying service
 977                   has been closed until the client library calls the
 978                   CLOSE_DELIVERED ioctl, signalling close_event. */
 979                if (user_service->close_pending &&
 980                        wait_for_completion_interruptible(
 981                                &user_service->close_event))
 982                        status = VCHIQ_RETRY;
 983                break;
 984        }
 985
 986        case VCHIQ_IOC_USE_SERVICE:
 987        case VCHIQ_IOC_RELEASE_SERVICE: {
 988                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
 989
 990                service = find_service_for_instance(instance, handle);
 991                if (service != NULL) {
 992                        status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
 993                                vchiq_use_service_internal(service) :
 994                                vchiq_release_service_internal(service);
 995                        if (status != VCHIQ_SUCCESS) {
 996                                vchiq_log_error(vchiq_susp_log_level,
 997                                        "%s: cmd %s returned error %d for "
 998                                        "service %c%c%c%c:%03d",
 999                                        __func__,
1000                                        (cmd == VCHIQ_IOC_USE_SERVICE) ?
1001                                                "VCHIQ_IOC_USE_SERVICE" :
1002                                                "VCHIQ_IOC_RELEASE_SERVICE",
1003                                        status,
1004                                        VCHIQ_FOURCC_AS_4CHARS(
1005                                                service->base.fourcc),
1006                                        service->client_id);
1007                                ret = -EINVAL;
1008                        }
1009                } else
1010                        ret = -EINVAL;
1011        } break;
1012
1013        case VCHIQ_IOC_QUEUE_MESSAGE: {
1014                struct vchiq_queue_message args;
1015
1016                if (copy_from_user(&args, (const void __user *)arg,
1017                                   sizeof(args))) {
1018                        ret = -EFAULT;
1019                        break;
1020                }
1021
1022                service = find_service_for_instance(instance, args.handle);
1023
1024                if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
1025                        /* Copy elements into kernel space */
1026                        struct vchiq_element elements[MAX_ELEMENTS];
1027
1028                        if (copy_from_user(elements, args.elements,
1029                                args.count * sizeof(struct vchiq_element)) == 0)
1030                                status = vchiq_ioc_queue_message
1031                                        (args.handle,
1032                                        elements, args.count);
1033                        else
1034                                ret = -EFAULT;
1035                } else {
1036                        ret = -EINVAL;
1037                }
1038        } break;
1039
1040        case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1041        case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1042                struct vchiq_queue_bulk_transfer args;
1043                struct bulk_waiter_node *waiter = NULL;
1044
1045                VCHIQ_BULK_DIR_T dir =
1046                        (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1047                        VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1048
1049                if (copy_from_user(&args, (const void __user *)arg,
1050                                   sizeof(args))) {
1051                        ret = -EFAULT;
1052                        break;
1053                }
1054
1055                service = find_service_for_instance(instance, args.handle);
1056                if (!service) {
1057                        ret = -EINVAL;
1058                        break;
1059                }
1060
1061                if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
1062                        waiter = kzalloc(sizeof(struct bulk_waiter_node),
1063                                GFP_KERNEL);
1064                        if (!waiter) {
1065                                ret = -ENOMEM;
1066                                break;
1067                        }
1068
1069                        args.userdata = &waiter->bulk_waiter;
1070                } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
1071                        mutex_lock(&instance->bulk_waiter_list_mutex);
1072                        list_for_each_entry(waiter, &instance->bulk_waiter_list,
1073                                            list) {
1074                                if (waiter->pid == current->pid) {
1075                                        list_del(&waiter->list);
1076                                        break;
1077                                }
1078                        }
1079                        mutex_unlock(&instance->bulk_waiter_list_mutex);
1080                        if (!waiter) {
1081                                vchiq_log_error(vchiq_arm_log_level,
1082                                        "no bulk_waiter found for pid %d",
1083                                        current->pid);
1084                                ret = -ESRCH;
1085                                break;
1086                        }
1087                        vchiq_log_info(vchiq_arm_log_level,
1088                                "found bulk_waiter %pK for pid %d", waiter,
1089                                current->pid);
1090                        args.userdata = &waiter->bulk_waiter;
1091                }
1092
1093                status = vchiq_bulk_transfer(args.handle, args.data, args.size,
1094                                             args.userdata, args.mode, dir);
1095
1096                if (!waiter)
1097                        break;
1098
1099                if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1100                        !waiter->bulk_waiter.bulk) {
1101                        if (waiter->bulk_waiter.bulk) {
1102                                /* Cancel the signal when the transfer
1103                                ** completes. */
1104                                spin_lock(&bulk_waiter_spinlock);
1105                                waiter->bulk_waiter.bulk->userdata = NULL;
1106                                spin_unlock(&bulk_waiter_spinlock);
1107                        }
1108                        kfree(waiter);
1109                } else {
1110                        const VCHIQ_BULK_MODE_T mode_waiting =
1111                                VCHIQ_BULK_MODE_WAITING;
1112                        waiter->pid = current->pid;
1113                        mutex_lock(&instance->bulk_waiter_list_mutex);
1114                        list_add(&waiter->list, &instance->bulk_waiter_list);
1115                        mutex_unlock(&instance->bulk_waiter_list_mutex);
1116                        vchiq_log_info(vchiq_arm_log_level,
1117                                "saved bulk_waiter %pK for pid %d",
1118                                waiter, current->pid);
1119
1120                        if (copy_to_user((void __user *)
1121                                &(((struct vchiq_queue_bulk_transfer __user *)
1122                                        arg)->mode),
1123                                (const void *)&mode_waiting,
1124                                sizeof(mode_waiting)))
1125                                ret = -EFAULT;
1126                }
1127        } break;
1128
1129        case VCHIQ_IOC_AWAIT_COMPLETION: {
1130                struct vchiq_await_completion args;
1131
1132                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1133                if (!instance->connected) {
1134                        ret = -ENOTCONN;
1135                        break;
1136                }
1137
1138                if (copy_from_user(&args, (const void __user *)arg,
1139                        sizeof(args))) {
1140                        ret = -EFAULT;
1141                        break;
1142                }
1143
1144                mutex_lock(&instance->completion_mutex);
1145
1146                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1147                while ((instance->completion_remove ==
1148                        instance->completion_insert)
1149                        && !instance->closing) {
1150                        int rc;
1151
1152                        DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1153                        mutex_unlock(&instance->completion_mutex);
1154                        rc = wait_for_completion_interruptible(
1155                                                &instance->insert_event);
1156                        mutex_lock(&instance->completion_mutex);
1157                        if (rc) {
1158                                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1159                                vchiq_log_info(vchiq_arm_log_level,
1160                                        "AWAIT_COMPLETION interrupted");
1161                                ret = -EINTR;
1162                                break;
1163                        }
1164                }
1165                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1166
1167                if (ret == 0) {
1168                        int msgbufcount = args.msgbufcount;
1169                        int remove = instance->completion_remove;
1170
1171                        for (ret = 0; ret < args.count; ret++) {
1172                                struct vchiq_completion_data *completion;
1173                                struct vchiq_service *service;
1174                                struct user_service *user_service;
1175                                struct vchiq_header *header;
1176
1177                                if (remove == instance->completion_insert)
1178                                        break;
1179
1180                                completion = &instance->completions[
1181                                        remove & (MAX_COMPLETIONS - 1)];
1182
1183                                /*
1184                                 * A read memory barrier is needed to stop
1185                                 * prefetch of a stale completion record
1186                                 */
1187                                rmb();
1188
1189                                service = completion->service_userdata;
1190                                user_service = service->base.userdata;
1191                                completion->service_userdata =
1192                                        user_service->userdata;
1193
1194                                header = completion->header;
1195                                if (header) {
1196                                        void __user *msgbuf;
1197                                        int msglen;
1198
1199                                        msglen = header->size +
1200                                                sizeof(struct vchiq_header);
1201                                        /* This must be a VCHIQ-style service */
1202                                        if (args.msgbufsize < msglen) {
1203                                                vchiq_log_error(
1204                                                        vchiq_arm_log_level,
1205                                                        "header %pK: msgbufsize %x < msglen %x",
1206                                                        header, args.msgbufsize,
1207                                                        msglen);
1208                                                WARN(1, "invalid message "
1209                                                        "size\n");
1210                                                if (ret == 0)
1211                                                        ret = -EMSGSIZE;
1212                                                break;
1213                                        }
1214                                        if (msgbufcount <= 0)
1215                                                /* Stall here for lack of a
1216                                                ** buffer for the message. */
1217                                                break;
1218                                        /* Get the pointer from user space */
1219                                        msgbufcount--;
1220                                        if (copy_from_user(&msgbuf,
1221                                                (const void __user *)
1222                                                &args.msgbufs[msgbufcount],
1223                                                sizeof(msgbuf))) {
1224                                                if (ret == 0)
1225                                                        ret = -EFAULT;
1226                                                break;
1227                                        }
1228
1229                                        /* Copy the message to user space */
1230                                        if (copy_to_user(msgbuf, header,
1231                                                msglen)) {
1232                                                if (ret == 0)
1233                                                        ret = -EFAULT;
1234                                                break;
1235                                        }
1236
1237                                        /* Now it has been copied, the message
1238                                        ** can be released. */
1239                                        vchiq_release_message(service->handle,
1240                                                header);
1241
1242                                        /* The completion must point to the
1243                                        ** msgbuf. */
1244                                        completion->header = msgbuf;
1245                                }
1246
1247                                if ((completion->reason ==
1248                                        VCHIQ_SERVICE_CLOSED) &&
1249                                        !instance->use_close_delivered)
1250                                        unlock_service(service);
1251
1252                                if (copy_to_user((void __user *)(
1253                                        (size_t)args.buf + ret *
1254                                        sizeof(struct vchiq_completion_data)),
1255                                        completion,
1256                                        sizeof(struct vchiq_completion_data))) {
1257                                                if (ret == 0)
1258                                                        ret = -EFAULT;
1259                                        break;
1260                                }
1261
1262                                /*
1263                                 * Ensure that the above copy has completed
1264                                 * before advancing the remove pointer.
1265                                 */
1266                                mb();
1267                                remove++;
1268                                instance->completion_remove = remove;
1269                        }
1270
1271                        if (msgbufcount != args.msgbufcount) {
1272                                if (copy_to_user((void __user *)
1273                                        &((struct vchiq_await_completion *)arg)
1274                                                ->msgbufcount,
1275                                        &msgbufcount,
1276                                        sizeof(msgbufcount))) {
1277                                        ret = -EFAULT;
1278                                }
1279                        }
1280                }
1281
1282                if (ret)
1283                        complete(&instance->remove_event);
1284                mutex_unlock(&instance->completion_mutex);
1285                DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1286        } break;
1287
1288        case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1289                struct vchiq_dequeue_message args;
1290                struct user_service *user_service;
1291                struct vchiq_header *header;
1292
1293                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1294                if (copy_from_user(&args, (const void __user *)arg,
1295                                   sizeof(args))) {
1296                        ret = -EFAULT;
1297                        break;
1298                }
1299                service = find_service_for_instance(instance, args.handle);
1300                if (!service) {
1301                        ret = -EINVAL;
1302                        break;
1303                }
1304                user_service = (struct user_service *)service->base.userdata;
1305                if (user_service->is_vchi == 0) {
1306                        ret = -EINVAL;
1307                        break;
1308                }
1309
1310                spin_lock(&msg_queue_spinlock);
1311                if (user_service->msg_remove == user_service->msg_insert) {
1312                        if (!args.blocking) {
1313                                spin_unlock(&msg_queue_spinlock);
1314                                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1315                                ret = -EWOULDBLOCK;
1316                                break;
1317                        }
1318                        user_service->dequeue_pending = 1;
1319                        do {
1320                                spin_unlock(&msg_queue_spinlock);
1321                                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1322                                if (wait_for_completion_interruptible(
1323                                        &user_service->insert_event)) {
1324                                        vchiq_log_info(vchiq_arm_log_level,
1325                                                "DEQUEUE_MESSAGE interrupted");
1326                                        ret = -EINTR;
1327                                        break;
1328                                }
1329                                spin_lock(&msg_queue_spinlock);
1330                        } while (user_service->msg_remove ==
1331                                user_service->msg_insert);
1332
1333                        if (ret)
1334                                break;
1335                }
1336
1337                BUG_ON((int)(user_service->msg_insert -
1338                        user_service->msg_remove) < 0);
1339
1340                header = user_service->msg_queue[user_service->msg_remove &
1341                        (MSG_QUEUE_SIZE - 1)];
1342                user_service->msg_remove++;
1343                spin_unlock(&msg_queue_spinlock);
1344
1345                complete(&user_service->remove_event);
1346                if (header == NULL)
1347                        ret = -ENOTCONN;
1348                else if (header->size <= args.bufsize) {
1349                        /* Copy to user space if msgbuf is not NULL */
1350                        if ((args.buf == NULL) ||
1351                                (copy_to_user((void __user *)args.buf,
1352                                header->data,
1353                                header->size) == 0)) {
1354                                ret = header->size;
1355                                vchiq_release_message(
1356                                        service->handle,
1357                                        header);
1358                        } else
1359                                ret = -EFAULT;
1360                } else {
1361                        vchiq_log_error(vchiq_arm_log_level,
1362                                "header %pK: bufsize %x < size %x",
1363                                header, args.bufsize, header->size);
1364                        WARN(1, "invalid size\n");
1365                        ret = -EMSGSIZE;
1366                }
1367                DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1368        } break;
1369
1370        case VCHIQ_IOC_GET_CLIENT_ID: {
1371                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1372
1373                ret = vchiq_get_client_id(handle);
1374        } break;
1375
1376        case VCHIQ_IOC_GET_CONFIG: {
1377                struct vchiq_get_config args;
1378                struct vchiq_config config;
1379
1380                if (copy_from_user(&args, (const void __user *)arg,
1381                                   sizeof(args))) {
1382                        ret = -EFAULT;
1383                        break;
1384                }
1385                if (args.config_size > sizeof(config)) {
1386                        ret = -EINVAL;
1387                        break;
1388                }
1389
1390                vchiq_get_config(&config);
1391                if (copy_to_user(args.pconfig, &config, args.config_size)) {
1392                        ret = -EFAULT;
1393                        break;
1394                }
1395        } break;
1396
1397        case VCHIQ_IOC_SET_SERVICE_OPTION: {
1398                struct vchiq_set_service_option args;
1399
1400                if (copy_from_user(&args, (const void __user *)arg,
1401                                   sizeof(args))) {
1402                        ret = -EFAULT;
1403                        break;
1404                }
1405
1406                service = find_service_for_instance(instance, args.handle);
1407                if (!service) {
1408                        ret = -EINVAL;
1409                        break;
1410                }
1411
1412                status = vchiq_set_service_option(
1413                                args.handle, args.option, args.value);
1414        } break;
1415
1416        case VCHIQ_IOC_LIB_VERSION: {
1417                unsigned int lib_version = (unsigned int)arg;
1418
1419                if (lib_version < VCHIQ_VERSION_MIN)
1420                        ret = -EINVAL;
1421                else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1422                        instance->use_close_delivered = 1;
1423        } break;
1424
1425        case VCHIQ_IOC_CLOSE_DELIVERED: {
1426                VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1427
1428                service = find_closed_service_for_instance(instance, handle);
1429                if (service != NULL) {
1430                        struct user_service *user_service =
1431                                (struct user_service *)service->base.userdata;
1432                        close_delivered(user_service);
1433                } else
1434                        ret = -EINVAL;
1435        } break;
1436
1437        default:
1438                ret = -ENOTTY;
1439                break;
1440        }
1441
1442        if (service)
1443                unlock_service(service);
1444
1445        if (ret == 0) {
1446                if (status == VCHIQ_ERROR)
1447                        ret = -EIO;
1448                else if (status == VCHIQ_RETRY)
1449                        ret = -EINTR;
1450        }
1451
1452        if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1453                (ret != -EWOULDBLOCK))
1454                vchiq_log_info(vchiq_arm_log_level,
1455                        "  ioctl instance %pK, cmd %s -> status %d, %ld",
1456                        instance,
1457                        (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1458                                ioctl_names[_IOC_NR(cmd)] :
1459                                "<invalid>",
1460                        status, ret);
1461        else
1462                vchiq_log_trace(vchiq_arm_log_level,
1463                        "  ioctl instance %pK, cmd %s -> status %d, %ld",
1464                        instance,
1465                        (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1466                                ioctl_names[_IOC_NR(cmd)] :
1467                                "<invalid>",
1468                        status, ret);
1469
1470        return ret;
1471}
1472
1473#if defined(CONFIG_COMPAT)
1474
1475struct vchiq_service_params32 {
1476        int fourcc;
1477        compat_uptr_t callback;
1478        compat_uptr_t userdata;
1479        short version; /* Increment for non-trivial changes */
1480        short version_min; /* Update for incompatible changes */
1481};
1482
1483struct vchiq_create_service32 {
1484        struct vchiq_service_params32 params;
1485        int is_open;
1486        int is_vchi;
1487        unsigned int handle; /* OUT */
1488};
1489
1490#define VCHIQ_IOC_CREATE_SERVICE32 \
1491        _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1492
1493static long
1494vchiq_compat_ioctl_create_service(
1495        struct file *file,
1496        unsigned int cmd,
1497        unsigned long arg)
1498{
1499        struct vchiq_create_service __user *args;
1500        struct vchiq_create_service32 __user *ptrargs32 =
1501                (struct vchiq_create_service32 __user *)arg;
1502        struct vchiq_create_service32 args32;
1503        long ret;
1504
1505        args = compat_alloc_user_space(sizeof(*args));
1506        if (!args)
1507                return -EFAULT;
1508
1509        if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1510                return -EFAULT;
1511
1512        if (put_user(args32.params.fourcc, &args->params.fourcc) ||
1513            put_user(compat_ptr(args32.params.callback),
1514                     &args->params.callback) ||
1515            put_user(compat_ptr(args32.params.userdata),
1516                     &args->params.userdata) ||
1517            put_user(args32.params.version, &args->params.version) ||
1518            put_user(args32.params.version_min,
1519                     &args->params.version_min) ||
1520            put_user(args32.is_open, &args->is_open) ||
1521            put_user(args32.is_vchi, &args->is_vchi) ||
1522            put_user(args32.handle, &args->handle))
1523                return -EFAULT;
1524
1525        ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
1526
1527        if (ret < 0)
1528                return ret;
1529
1530        if (get_user(args32.handle, &args->handle))
1531                return -EFAULT;
1532
1533        if (copy_to_user(&ptrargs32->handle,
1534                         &args32.handle,
1535                         sizeof(args32.handle)))
1536                return -EFAULT;
1537
1538        return 0;
1539}
1540
1541struct vchiq_element32 {
1542        compat_uptr_t data;
1543        unsigned int size;
1544};
1545
1546struct vchiq_queue_message32 {
1547        unsigned int handle;
1548        unsigned int count;
1549        compat_uptr_t elements;
1550};
1551
1552#define VCHIQ_IOC_QUEUE_MESSAGE32 \
1553        _IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1554
1555static long
1556vchiq_compat_ioctl_queue_message(struct file *file,
1557                                 unsigned int cmd,
1558                                 unsigned long arg)
1559{
1560        struct vchiq_queue_message __user *args;
1561        struct vchiq_element __user *elements;
1562        struct vchiq_queue_message32 args32;
1563        unsigned int count;
1564
1565        if (copy_from_user(&args32,
1566                           (struct vchiq_queue_message32 __user *)arg,
1567                           sizeof(args32)))
1568                return -EFAULT;
1569
1570        args = compat_alloc_user_space(sizeof(*args) +
1571                                       (sizeof(*elements) * MAX_ELEMENTS));
1572
1573        if (!args)
1574                return -EFAULT;
1575
1576        if (put_user(args32.handle, &args->handle) ||
1577            put_user(args32.count, &args->count) ||
1578            put_user(compat_ptr(args32.elements), &args->elements))
1579                return -EFAULT;
1580
1581        if (args32.count > MAX_ELEMENTS)
1582                return -EINVAL;
1583
1584        if (args32.elements && args32.count) {
1585                struct vchiq_element32 tempelement32[MAX_ELEMENTS];
1586
1587                elements = (struct vchiq_element __user *)(args + 1);
1588
1589                if (copy_from_user(&tempelement32,
1590                                   compat_ptr(args32.elements),
1591                                   sizeof(tempelement32)))
1592                        return -EFAULT;
1593
1594                for (count = 0; count < args32.count; count++) {
1595                        if (put_user(compat_ptr(tempelement32[count].data),
1596                                     &elements[count].data) ||
1597                            put_user(tempelement32[count].size,
1598                                     &elements[count].size))
1599                                return -EFAULT;
1600                }
1601
1602                if (put_user(elements, &args->elements))
1603                        return -EFAULT;
1604        }
1605
1606        return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
1607}
1608
1609struct vchiq_queue_bulk_transfer32 {
1610        unsigned int handle;
1611        compat_uptr_t data;
1612        unsigned int size;
1613        compat_uptr_t userdata;
1614        VCHIQ_BULK_MODE_T mode;
1615};
1616
1617#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1618        _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1619#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1620        _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1621
1622static long
1623vchiq_compat_ioctl_queue_bulk(struct file *file,
1624                              unsigned int cmd,
1625                              unsigned long arg)
1626{
1627        struct vchiq_queue_bulk_transfer __user *args;
1628        struct vchiq_queue_bulk_transfer32 args32;
1629        struct vchiq_queue_bulk_transfer32 __user *ptrargs32 =
1630                (struct vchiq_queue_bulk_transfer32 __user *)arg;
1631        long ret;
1632
1633        args = compat_alloc_user_space(sizeof(*args));
1634        if (!args)
1635                return -EFAULT;
1636
1637        if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1638                return -EFAULT;
1639
1640        if (put_user(args32.handle, &args->handle) ||
1641            put_user(compat_ptr(args32.data), &args->data) ||
1642            put_user(args32.size, &args->size) ||
1643            put_user(compat_ptr(args32.userdata), &args->userdata) ||
1644            put_user(args32.mode, &args->mode))
1645                return -EFAULT;
1646
1647        if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
1648                cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
1649        else
1650                cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
1651
1652        ret = vchiq_ioctl(file, cmd, (unsigned long)args);
1653
1654        if (ret < 0)
1655                return ret;
1656
1657        if (get_user(args32.mode, &args->mode))
1658                return -EFAULT;
1659
1660        if (copy_to_user(&ptrargs32->mode,
1661                         &args32.mode,
1662                         sizeof(args32.mode)))
1663                return -EFAULT;
1664
1665        return 0;
1666}
1667
1668struct vchiq_completion_data32 {
1669        VCHIQ_REASON_T reason;
1670        compat_uptr_t header;
1671        compat_uptr_t service_userdata;
1672        compat_uptr_t bulk_userdata;
1673};
1674
1675struct vchiq_await_completion32 {
1676        unsigned int count;
1677        compat_uptr_t buf;
1678        unsigned int msgbufsize;
1679        unsigned int msgbufcount; /* IN/OUT */
1680        compat_uptr_t msgbufs;
1681};
1682
1683#define VCHIQ_IOC_AWAIT_COMPLETION32 \
1684        _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1685
1686static long
1687vchiq_compat_ioctl_await_completion(struct file *file,
1688                                    unsigned int cmd,
1689                                    unsigned long arg)
1690{
1691        struct vchiq_await_completion __user *args;
1692        struct vchiq_completion_data __user *completion;
1693        struct vchiq_completion_data completiontemp;
1694        struct vchiq_await_completion32 args32;
1695        struct vchiq_completion_data32 completion32;
1696        unsigned int __user *msgbufcount32;
1697        unsigned int msgbufcount_native;
1698        compat_uptr_t msgbuf32;
1699        void __user *msgbuf;
1700        void * __user *msgbufptr;
1701        long ret;
1702
1703        args = compat_alloc_user_space(sizeof(*args) +
1704                                       sizeof(*completion) +
1705                                       sizeof(*msgbufptr));
1706        if (!args)
1707                return -EFAULT;
1708
1709        completion = (struct vchiq_completion_data __user *)(args + 1);
1710        msgbufptr = (void * __user *)(completion + 1);
1711
1712        if (copy_from_user(&args32,
1713                           (struct vchiq_completion_data32 __user *)arg,
1714                           sizeof(args32)))
1715                return -EFAULT;
1716
1717        if (put_user(args32.count, &args->count) ||
1718            put_user(compat_ptr(args32.buf), &args->buf) ||
1719            put_user(args32.msgbufsize, &args->msgbufsize) ||
1720            put_user(args32.msgbufcount, &args->msgbufcount) ||
1721            put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
1722                return -EFAULT;
1723
1724        /* These are simple cases, so just fall into the native handler */
1725        if (!args32.count || !args32.buf || !args32.msgbufcount)
1726                return vchiq_ioctl(file,
1727                                   VCHIQ_IOC_AWAIT_COMPLETION,
1728                                   (unsigned long)args);
1729
1730        /*
1731         * These are the more complex cases.  Typical applications of this
1732         * ioctl will use a very large count, with a very large msgbufcount.
1733         * Since the native ioctl can asynchronously fill in the returned
1734         * buffers and the application can in theory begin processing messages
1735         * even before the ioctl returns, a bit of a trick is used here.
1736         *
1737         * By forcing both count and msgbufcount to be 1, it forces the native
1738         * ioctl to only claim at most 1 message is available.   This tricks
1739         * the calling application into thinking only 1 message was actually
1740         * available in the queue so like all good applications it will retry
1741         * waiting until all the required messages are received.
1742         *
1743         * This trick has been tested and proven to work with vchiq_test,
1744         * Minecraft_PI, the "hello pi" examples, and various other
1745         * applications that are included in Raspbian.
1746         */
1747
1748        if (copy_from_user(&msgbuf32,
1749                           compat_ptr(args32.msgbufs) +
1750                           (sizeof(compat_uptr_t) *
1751                           (args32.msgbufcount - 1)),
1752                           sizeof(msgbuf32)))
1753                return -EFAULT;
1754
1755        msgbuf = compat_ptr(msgbuf32);
1756
1757        if (copy_to_user(msgbufptr,
1758                         &msgbuf,
1759                         sizeof(msgbuf)))
1760                return -EFAULT;
1761
1762        if (copy_to_user(&args->msgbufs,
1763                         &msgbufptr,
1764                         sizeof(msgbufptr)))
1765                return -EFAULT;
1766
1767        if (put_user(1U, &args->count) ||
1768            put_user(completion, &args->buf) ||
1769            put_user(1U, &args->msgbufcount))
1770                return -EFAULT;
1771
1772        ret = vchiq_ioctl(file,
1773                          VCHIQ_IOC_AWAIT_COMPLETION,
1774                          (unsigned long)args);
1775
1776        /*
1777         * An return value of 0 here means that no messages where available
1778         * in the message queue.  In this case the native ioctl does not
1779         * return any data to the application at all.  Not even to update
1780         * msgbufcount.  This functionality needs to be kept here for
1781         * compatibility.
1782         *
1783         * Of course, < 0 means that an error occurred and no data is being
1784         * returned.
1785         *
1786         * Since count and msgbufcount was forced to 1, that means
1787         * the only other possible return value is 1. Meaning that 1 message
1788         * was available, so that multiple message case does not need to be
1789         * handled here.
1790         */
1791        if (ret <= 0)
1792                return ret;
1793
1794        if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
1795                return -EFAULT;
1796
1797        completion32.reason = completiontemp.reason;
1798        completion32.header = ptr_to_compat(completiontemp.header);
1799        completion32.service_userdata =
1800                ptr_to_compat(completiontemp.service_userdata);
1801        completion32.bulk_userdata =
1802                ptr_to_compat(completiontemp.bulk_userdata);
1803
1804        if (copy_to_user(compat_ptr(args32.buf),
1805                         &completion32,
1806                         sizeof(completion32)))
1807                return -EFAULT;
1808
1809        if (get_user(msgbufcount_native, &args->msgbufcount))
1810                return -EFAULT;
1811
1812        if (!msgbufcount_native)
1813                args32.msgbufcount--;
1814
1815        msgbufcount32 =
1816                &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
1817
1818        if (copy_to_user(msgbufcount32,
1819                         &args32.msgbufcount,
1820                         sizeof(args32.msgbufcount)))
1821                return -EFAULT;
1822
1823        return 1;
1824}
1825
1826struct vchiq_dequeue_message32 {
1827        unsigned int handle;
1828        int blocking;
1829        unsigned int bufsize;
1830        compat_uptr_t buf;
1831};
1832
1833#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1834        _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1835
1836static long
1837vchiq_compat_ioctl_dequeue_message(struct file *file,
1838                                   unsigned int cmd,
1839                                   unsigned long arg)
1840{
1841        struct vchiq_dequeue_message __user *args;
1842        struct vchiq_dequeue_message32 args32;
1843
1844        args = compat_alloc_user_space(sizeof(*args));
1845        if (!args)
1846                return -EFAULT;
1847
1848        if (copy_from_user(&args32,
1849                           (struct vchiq_dequeue_message32 __user *)arg,
1850                           sizeof(args32)))
1851                return -EFAULT;
1852
1853        if (put_user(args32.handle, &args->handle) ||
1854            put_user(args32.blocking, &args->blocking) ||
1855            put_user(args32.bufsize, &args->bufsize) ||
1856            put_user(compat_ptr(args32.buf), &args->buf))
1857                return -EFAULT;
1858
1859        return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
1860                           (unsigned long)args);
1861}
1862
1863struct vchiq_get_config32 {
1864        unsigned int config_size;
1865        compat_uptr_t pconfig;
1866};
1867
1868#define VCHIQ_IOC_GET_CONFIG32 \
1869        _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1870
1871static long
1872vchiq_compat_ioctl_get_config(struct file *file,
1873                              unsigned int cmd,
1874                              unsigned long arg)
1875{
1876        struct vchiq_get_config __user *args;
1877        struct vchiq_get_config32 args32;
1878
1879        args = compat_alloc_user_space(sizeof(*args));
1880        if (!args)
1881                return -EFAULT;
1882
1883        if (copy_from_user(&args32,
1884                           (struct vchiq_get_config32 __user *)arg,
1885                           sizeof(args32)))
1886                return -EFAULT;
1887
1888        if (put_user(args32.config_size, &args->config_size) ||
1889            put_user(compat_ptr(args32.pconfig), &args->pconfig))
1890                return -EFAULT;
1891
1892        return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
1893}
1894
1895static long
1896vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1897{
1898        switch (cmd) {
1899        case VCHIQ_IOC_CREATE_SERVICE32:
1900                return vchiq_compat_ioctl_create_service(file, cmd, arg);
1901        case VCHIQ_IOC_QUEUE_MESSAGE32:
1902                return vchiq_compat_ioctl_queue_message(file, cmd, arg);
1903        case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1904        case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1905                return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
1906        case VCHIQ_IOC_AWAIT_COMPLETION32:
1907                return vchiq_compat_ioctl_await_completion(file, cmd, arg);
1908        case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1909                return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
1910        case VCHIQ_IOC_GET_CONFIG32:
1911                return vchiq_compat_ioctl_get_config(file, cmd, arg);
1912        default:
1913                return vchiq_ioctl(file, cmd, arg);
1914        }
1915}
1916
1917#endif
1918
1919static int vchiq_open(struct inode *inode, struct file *file)
1920{
1921        struct vchiq_state *state = vchiq_get_state();
1922        VCHIQ_INSTANCE_T instance;
1923
1924        vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1925
1926        if (!state) {
1927                vchiq_log_error(vchiq_arm_log_level,
1928                                "vchiq has no connection to VideoCore");
1929                return -ENOTCONN;
1930        }
1931
1932        instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1933        if (!instance)
1934                return -ENOMEM;
1935
1936        instance->state = state;
1937        instance->pid = current->tgid;
1938
1939        vchiq_debugfs_add_instance(instance);
1940
1941        init_completion(&instance->insert_event);
1942        init_completion(&instance->remove_event);
1943        mutex_init(&instance->completion_mutex);
1944        mutex_init(&instance->bulk_waiter_list_mutex);
1945        INIT_LIST_HEAD(&instance->bulk_waiter_list);
1946
1947        file->private_data = instance;
1948
1949        return 0;
1950}
1951
1952static int vchiq_release(struct inode *inode, struct file *file)
1953{
1954        VCHIQ_INSTANCE_T instance = file->private_data;
1955        struct vchiq_state *state = vchiq_get_state();
1956        struct vchiq_service *service;
1957        int ret = 0;
1958        int i;
1959
1960        vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1961                       (unsigned long)instance);
1962
1963        if (!state) {
1964                ret = -EPERM;
1965                goto out;
1966        }
1967
1968        /* Ensure videocore is awake to allow termination. */
1969        vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1970
1971        mutex_lock(&instance->completion_mutex);
1972
1973        /* Wake the completion thread and ask it to exit */
1974        instance->closing = 1;
1975        complete(&instance->insert_event);
1976
1977        mutex_unlock(&instance->completion_mutex);
1978
1979        /* Wake the slot handler if the completion queue is full. */
1980        complete(&instance->remove_event);
1981
1982        /* Mark all services for termination... */
1983        i = 0;
1984        while ((service = next_service_by_instance(state, instance, &i))) {
1985                struct user_service *user_service = service->base.userdata;
1986
1987                /* Wake the slot handler if the msg queue is full. */
1988                complete(&user_service->remove_event);
1989
1990                vchiq_terminate_service_internal(service);
1991                unlock_service(service);
1992        }
1993
1994        /* ...and wait for them to die */
1995        i = 0;
1996        while ((service = next_service_by_instance(state, instance, &i))) {
1997                struct user_service *user_service = service->base.userdata;
1998
1999                wait_for_completion(&service->remove_event);
2000
2001                BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
2002
2003                spin_lock(&msg_queue_spinlock);
2004
2005                while (user_service->msg_remove != user_service->msg_insert) {
2006                        struct vchiq_header *header;
2007                        int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
2008
2009                        header = user_service->msg_queue[m];
2010                        user_service->msg_remove++;
2011                        spin_unlock(&msg_queue_spinlock);
2012
2013                        if (header)
2014                                vchiq_release_message(service->handle, header);
2015                        spin_lock(&msg_queue_spinlock);
2016                }
2017
2018                spin_unlock(&msg_queue_spinlock);
2019
2020                unlock_service(service);
2021        }
2022
2023        /* Release any closed services */
2024        while (instance->completion_remove !=
2025                instance->completion_insert) {
2026                struct vchiq_completion_data *completion;
2027                struct vchiq_service *service;
2028
2029                completion = &instance->completions[
2030                        instance->completion_remove & (MAX_COMPLETIONS - 1)];
2031                service = completion->service_userdata;
2032                if (completion->reason == VCHIQ_SERVICE_CLOSED) {
2033                        struct user_service *user_service =
2034                                                        service->base.userdata;
2035
2036                        /* Wake any blocked user-thread */
2037                        if (instance->use_close_delivered)
2038                                complete(&user_service->close_event);
2039                        unlock_service(service);
2040                }
2041                instance->completion_remove++;
2042        }
2043
2044        /* Release the PEER service count. */
2045        vchiq_release_internal(instance->state, NULL);
2046
2047        {
2048                struct bulk_waiter_node *waiter, *next;
2049
2050                list_for_each_entry_safe(waiter, next,
2051                                         &instance->bulk_waiter_list, list) {
2052                        list_del(&waiter->list);
2053                        vchiq_log_info(vchiq_arm_log_level,
2054                                "bulk_waiter - cleaned up %pK for pid %d",
2055                                waiter, waiter->pid);
2056                        kfree(waiter);
2057                }
2058        }
2059
2060        vchiq_debugfs_remove_instance(instance);
2061
2062        kfree(instance);
2063        file->private_data = NULL;
2064
2065out:
2066        return ret;
2067}
2068
2069/****************************************************************************
2070*
2071*   vchiq_dump
2072*
2073***************************************************************************/
2074
2075void
2076vchiq_dump(void *dump_context, const char *str, int len)
2077{
2078        struct dump_context *context = (struct dump_context *)dump_context;
2079
2080        if (context->actual < context->space) {
2081                int copy_bytes;
2082
2083                if (context->offset > 0) {
2084                        int skip_bytes = min(len, (int)context->offset);
2085
2086                        str += skip_bytes;
2087                        len -= skip_bytes;
2088                        context->offset -= skip_bytes;
2089                        if (context->offset > 0)
2090                                return;
2091                }
2092                copy_bytes = min(len, (int)(context->space - context->actual));
2093                if (copy_bytes == 0)
2094                        return;
2095                if (copy_to_user(context->buf + context->actual, str,
2096                        copy_bytes))
2097                        context->actual = -EFAULT;
2098                context->actual += copy_bytes;
2099                len -= copy_bytes;
2100
2101                /* If tne terminating NUL is included in the length, then it
2102                ** marks the end of a line and should be replaced with a
2103                ** carriage return. */
2104                if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2105                        char cr = '\n';
2106
2107                        if (copy_to_user(context->buf + context->actual - 1,
2108                                &cr, 1))
2109                                context->actual = -EFAULT;
2110                }
2111        }
2112}
2113
2114/****************************************************************************
2115*
2116*   vchiq_dump_platform_instance_state
2117*
2118***************************************************************************/
2119
2120void
2121vchiq_dump_platform_instances(void *dump_context)
2122{
2123        struct vchiq_state *state = vchiq_get_state();
2124        char buf[80];
2125        int len;
2126        int i;
2127
2128        /* There is no list of instances, so instead scan all services,
2129                marking those that have been dumped. */
2130
2131        for (i = 0; i < state->unused_service; i++) {
2132                struct vchiq_service *service = state->services[i];
2133                VCHIQ_INSTANCE_T instance;
2134
2135                if (service && (service->base.callback == service_callback)) {
2136                        instance = service->instance;
2137                        if (instance)
2138                                instance->mark = 0;
2139                }
2140        }
2141
2142        for (i = 0; i < state->unused_service; i++) {
2143                struct vchiq_service *service = state->services[i];
2144                VCHIQ_INSTANCE_T instance;
2145
2146                if (service && (service->base.callback == service_callback)) {
2147                        instance = service->instance;
2148                        if (instance && !instance->mark) {
2149                                len = snprintf(buf, sizeof(buf),
2150                                        "Instance %pK: pid %d,%s completions %d/%d",
2151                                        instance, instance->pid,
2152                                        instance->connected ? " connected, " :
2153                                                "",
2154                                        instance->completion_insert -
2155                                                instance->completion_remove,
2156                                        MAX_COMPLETIONS);
2157
2158                                vchiq_dump(dump_context, buf, len + 1);
2159
2160                                instance->mark = 1;
2161                        }
2162                }
2163        }
2164}
2165
2166/****************************************************************************
2167*
2168*   vchiq_dump_platform_service_state
2169*
2170***************************************************************************/
2171
2172void
2173vchiq_dump_platform_service_state(void *dump_context,
2174                                  struct vchiq_service *service)
2175{
2176        struct user_service *user_service =
2177                        (struct user_service *)service->base.userdata;
2178        char buf[80];
2179        int len;
2180
2181        len = snprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2182
2183        if ((service->base.callback == service_callback) &&
2184                user_service->is_vchi) {
2185                len += snprintf(buf + len, sizeof(buf) - len,
2186                        ", %d/%d messages",
2187                        user_service->msg_insert - user_service->msg_remove,
2188                        MSG_QUEUE_SIZE);
2189
2190                if (user_service->dequeue_pending)
2191                        len += snprintf(buf + len, sizeof(buf) - len,
2192                                " (dequeue pending)");
2193        }
2194
2195        vchiq_dump(dump_context, buf, len + 1);
2196}
2197
2198/****************************************************************************
2199*
2200*   vchiq_read
2201*
2202***************************************************************************/
2203
2204static ssize_t
2205vchiq_read(struct file *file, char __user *buf,
2206        size_t count, loff_t *ppos)
2207{
2208        struct dump_context context;
2209
2210        context.buf = buf;
2211        context.actual = 0;
2212        context.space = count;
2213        context.offset = *ppos;
2214
2215        vchiq_dump_state(&context, &g_state);
2216
2217        *ppos += context.actual;
2218
2219        return context.actual;
2220}
2221
2222struct vchiq_state *
2223vchiq_get_state(void)
2224{
2225
2226        if (g_state.remote == NULL)
2227                printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2228        else if (g_state.remote->initialised != 1)
2229                printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2230                        __func__, g_state.remote->initialised);
2231
2232        return ((g_state.remote != NULL) &&
2233                (g_state.remote->initialised == 1)) ? &g_state : NULL;
2234}
2235
2236static const struct file_operations
2237vchiq_fops = {
2238        .owner = THIS_MODULE,
2239        .unlocked_ioctl = vchiq_ioctl,
2240#if defined(CONFIG_COMPAT)
2241        .compat_ioctl = vchiq_compat_ioctl,
2242#endif
2243        .open = vchiq_open,
2244        .release = vchiq_release,
2245        .read = vchiq_read
2246};
2247
2248/*
2249 * Autosuspend related functionality
2250 */
2251
2252int
2253vchiq_videocore_wanted(struct vchiq_state *state)
2254{
2255        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2256
2257        if (!arm_state)
2258                /* autosuspend not supported - always return wanted */
2259                return 1;
2260        else if (arm_state->blocked_count)
2261                return 1;
2262        else if (!arm_state->videocore_use_count)
2263                /* usage count zero - check for override unless we're forcing */
2264                if (arm_state->resume_blocked)
2265                        return 0;
2266                else
2267                        return vchiq_platform_videocore_wanted(state);
2268        else
2269                /* non-zero usage count - videocore still required */
2270                return 1;
2271}
2272
2273static VCHIQ_STATUS_T
2274vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
2275        struct vchiq_header *header,
2276        VCHIQ_SERVICE_HANDLE_T service_user,
2277        void *bulk_user)
2278{
2279        vchiq_log_error(vchiq_susp_log_level,
2280                "%s callback reason %d", __func__, reason);
2281        return 0;
2282}
2283
2284static int
2285vchiq_keepalive_thread_func(void *v)
2286{
2287        struct vchiq_state *state = (struct vchiq_state *)v;
2288        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2289
2290        VCHIQ_STATUS_T status;
2291        VCHIQ_INSTANCE_T instance;
2292        VCHIQ_SERVICE_HANDLE_T ka_handle;
2293
2294        struct vchiq_service_params params = {
2295                .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2296                .callback    = vchiq_keepalive_vchiq_callback,
2297                .version     = KEEPALIVE_VER,
2298                .version_min = KEEPALIVE_VER_MIN
2299        };
2300
2301        status = vchiq_initialise(&instance);
2302        if (status != VCHIQ_SUCCESS) {
2303                vchiq_log_error(vchiq_susp_log_level,
2304                        "%s vchiq_initialise failed %d", __func__, status);
2305                goto exit;
2306        }
2307
2308        status = vchiq_connect(instance);
2309        if (status != VCHIQ_SUCCESS) {
2310                vchiq_log_error(vchiq_susp_log_level,
2311                        "%s vchiq_connect failed %d", __func__, status);
2312                goto shutdown;
2313        }
2314
2315        status = vchiq_add_service(instance, &params, &ka_handle);
2316        if (status != VCHIQ_SUCCESS) {
2317                vchiq_log_error(vchiq_susp_log_level,
2318                        "%s vchiq_open_service failed %d", __func__, status);
2319                goto shutdown;
2320        }
2321
2322        while (1) {
2323                long rc = 0, uc = 0;
2324
2325                if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2326                        vchiq_log_error(vchiq_susp_log_level,
2327                                "%s interrupted", __func__);
2328                        flush_signals(current);
2329                        continue;
2330                }
2331
2332                /* read and clear counters.  Do release_count then use_count to
2333                 * prevent getting more releases than uses */
2334                rc = atomic_xchg(&arm_state->ka_release_count, 0);
2335                uc = atomic_xchg(&arm_state->ka_use_count, 0);
2336
2337                /* Call use/release service the requisite number of times.
2338                 * Process use before release so use counts don't go negative */
2339                while (uc--) {
2340                        atomic_inc(&arm_state->ka_use_ack_count);
2341                        status = vchiq_use_service(ka_handle);
2342                        if (status != VCHIQ_SUCCESS) {
2343                                vchiq_log_error(vchiq_susp_log_level,
2344                                        "%s vchiq_use_service error %d",
2345                                        __func__, status);
2346                        }
2347                }
2348                while (rc--) {
2349                        status = vchiq_release_service(ka_handle);
2350                        if (status != VCHIQ_SUCCESS) {
2351                                vchiq_log_error(vchiq_susp_log_level,
2352                                        "%s vchiq_release_service error %d",
2353                                        __func__, status);
2354                        }
2355                }
2356        }
2357
2358shutdown:
2359        vchiq_shutdown(instance);
2360exit:
2361        return 0;
2362}
2363
2364VCHIQ_STATUS_T
2365vchiq_arm_init_state(struct vchiq_state *state,
2366                     struct vchiq_arm_state *arm_state)
2367{
2368        if (arm_state) {
2369                rwlock_init(&arm_state->susp_res_lock);
2370
2371                init_completion(&arm_state->ka_evt);
2372                atomic_set(&arm_state->ka_use_count, 0);
2373                atomic_set(&arm_state->ka_use_ack_count, 0);
2374                atomic_set(&arm_state->ka_release_count, 0);
2375
2376                init_completion(&arm_state->vc_suspend_complete);
2377
2378                init_completion(&arm_state->vc_resume_complete);
2379                /* Initialise to 'done' state.  We only want to block on resume
2380                 * completion while videocore is suspended. */
2381                set_resume_state(arm_state, VC_RESUME_RESUMED);
2382
2383                init_completion(&arm_state->resume_blocker);
2384                /* Initialise to 'done' state.  We only want to block on this
2385                 * completion while resume is blocked */
2386                complete_all(&arm_state->resume_blocker);
2387
2388                init_completion(&arm_state->blocked_blocker);
2389                /* Initialise to 'done' state.  We only want to block on this
2390                 * completion while things are waiting on the resume blocker */
2391                complete_all(&arm_state->blocked_blocker);
2392
2393                arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
2394                arm_state->suspend_timer_running = 0;
2395                arm_state->state = state;
2396                timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
2397                            0);
2398
2399                arm_state->first_connect = 0;
2400
2401        }
2402        return VCHIQ_SUCCESS;
2403}
2404
2405/*
2406** Functions to modify the state variables;
2407**      set_suspend_state
2408**      set_resume_state
2409**
2410** There are more state variables than we might like, so ensure they remain in
2411** step.  Suspend and resume state are maintained separately, since most of
2412** these state machines can operate independently.  However, there are a few
2413** states where state transitions in one state machine cause a reset to the
2414** other state machine.  In addition, there are some completion events which
2415** need to occur on state machine reset and end-state(s), so these are also
2416** dealt with in these functions.
2417**
2418** In all states we set the state variable according to the input, but in some
2419** cases we perform additional steps outlined below;
2420**
2421** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2422**                      The suspend completion is completed after any suspend
2423**                      attempt.  When we reset the state machine we also reset
2424**                      the completion.  This reset occurs when videocore is
2425**                      resumed, and also if we initiate suspend after a suspend
2426**                      failure.
2427**
2428** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2429**                      suspend - ie from this point on we must try to suspend
2430**                      before resuming can occur.  We therefore also reset the
2431**                      resume state machine to VC_RESUME_IDLE in this state.
2432**
2433** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2434**                      complete_all on the suspend completion to notify
2435**                      anything waiting for suspend to happen.
2436**
2437** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2438**                      initiate resume, so no need to alter resume state.
2439**                      We call complete_all on the suspend completion to notify
2440**                      of suspend rejection.
2441**
2442** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
2443**                      suspend completion and reset the resume state machine.
2444**
2445** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
2446**                      resume completion is in it's 'done' state whenever
2447**                      videcore is running.  Therefore, the VC_RESUME_IDLE
2448**                      state implies that videocore is suspended.
2449**                      Hence, any thread which needs to wait until videocore is
2450**                      running can wait on this completion - it will only block
2451**                      if videocore is suspended.
2452**
2453** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
2454**                      Call complete_all on the resume completion to unblock
2455**                      any threads waiting for resume.  Also reset the suspend
2456**                      state machine to it's idle state.
2457**
2458** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2459*/
2460
2461void
2462set_suspend_state(struct vchiq_arm_state *arm_state,
2463                  enum vc_suspend_status new_state)
2464{
2465        /* set the state in all cases */
2466        arm_state->vc_suspend_state = new_state;
2467
2468        /* state specific additional actions */
2469        switch (new_state) {
2470        case VC_SUSPEND_FORCE_CANCELED:
2471                complete_all(&arm_state->vc_suspend_complete);
2472                break;
2473        case VC_SUSPEND_REJECTED:
2474                complete_all(&arm_state->vc_suspend_complete);
2475                break;
2476        case VC_SUSPEND_FAILED:
2477                complete_all(&arm_state->vc_suspend_complete);
2478                arm_state->vc_resume_state = VC_RESUME_RESUMED;
2479                complete_all(&arm_state->vc_resume_complete);
2480                break;
2481        case VC_SUSPEND_IDLE:
2482                reinit_completion(&arm_state->vc_suspend_complete);
2483                break;
2484        case VC_SUSPEND_REQUESTED:
2485                break;
2486        case VC_SUSPEND_IN_PROGRESS:
2487                set_resume_state(arm_state, VC_RESUME_IDLE);
2488                break;
2489        case VC_SUSPEND_SUSPENDED:
2490                complete_all(&arm_state->vc_suspend_complete);
2491                break;
2492        default:
2493                BUG();
2494                break;
2495        }
2496}
2497
2498void
2499set_resume_state(struct vchiq_arm_state *arm_state,
2500                 enum vc_resume_status new_state)
2501{
2502        /* set the state in all cases */
2503        arm_state->vc_resume_state = new_state;
2504
2505        /* state specific additional actions */
2506        switch (new_state) {
2507        case VC_RESUME_FAILED:
2508                break;
2509        case VC_RESUME_IDLE:
2510                reinit_completion(&arm_state->vc_resume_complete);
2511                break;
2512        case VC_RESUME_REQUESTED:
2513                break;
2514        case VC_RESUME_IN_PROGRESS:
2515                break;
2516        case VC_RESUME_RESUMED:
2517                complete_all(&arm_state->vc_resume_complete);
2518                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2519                break;
2520        default:
2521                BUG();
2522                break;
2523        }
2524}
2525
2526/* should be called with the write lock held */
2527inline void
2528start_suspend_timer(struct vchiq_arm_state *arm_state)
2529{
2530        del_timer(&arm_state->suspend_timer);
2531        arm_state->suspend_timer.expires = jiffies +
2532                msecs_to_jiffies(arm_state->suspend_timer_timeout);
2533        add_timer(&arm_state->suspend_timer);
2534        arm_state->suspend_timer_running = 1;
2535}
2536
2537/* should be called with the write lock held */
2538static inline void
2539stop_suspend_timer(struct vchiq_arm_state *arm_state)
2540{
2541        if (arm_state->suspend_timer_running) {
2542                del_timer(&arm_state->suspend_timer);
2543                arm_state->suspend_timer_running = 0;
2544        }
2545}
2546
2547static inline int
2548need_resume(struct vchiq_state *state)
2549{
2550        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2551
2552        return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
2553                        (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
2554                        vchiq_videocore_wanted(state);
2555}
2556
2557static inline void
2558unblock_resume(struct vchiq_arm_state *arm_state)
2559{
2560        complete_all(&arm_state->resume_blocker);
2561        arm_state->resume_blocked = 0;
2562}
2563
2564/* Initiate suspend via slot handler. Should be called with the write lock
2565 * held */
2566VCHIQ_STATUS_T
2567vchiq_arm_vcsuspend(struct vchiq_state *state)
2568{
2569        VCHIQ_STATUS_T status = VCHIQ_ERROR;
2570        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2571
2572        if (!arm_state)
2573                goto out;
2574
2575        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2576        status = VCHIQ_SUCCESS;
2577
2578        switch (arm_state->vc_suspend_state) {
2579        case VC_SUSPEND_REQUESTED:
2580                vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2581                        "requested", __func__);
2582                break;
2583        case VC_SUSPEND_IN_PROGRESS:
2584                vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2585                        "progress", __func__);
2586                break;
2587
2588        default:
2589                /* We don't expect to be in other states, so log but continue
2590                 * anyway */
2591                vchiq_log_error(vchiq_susp_log_level,
2592                        "%s unexpected suspend state %s", __func__,
2593                        suspend_state_names[arm_state->vc_suspend_state +
2594                                                VC_SUSPEND_NUM_OFFSET]);
2595                /* fall through */
2596        case VC_SUSPEND_REJECTED:
2597        case VC_SUSPEND_FAILED:
2598                /* Ensure any idle state actions have been run */
2599                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2600                /* fall through */
2601        case VC_SUSPEND_IDLE:
2602                vchiq_log_info(vchiq_susp_log_level,
2603                        "%s: suspending", __func__);
2604                set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2605                /* kick the slot handler thread to initiate suspend */
2606                request_poll(state, NULL, 0);
2607                break;
2608        }
2609
2610out:
2611        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2612        return status;
2613}
2614
2615void
2616vchiq_platform_check_suspend(struct vchiq_state *state)
2617{
2618        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2619        int susp = 0;
2620
2621        if (!arm_state)
2622                goto out;
2623
2624        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2625
2626        write_lock_bh(&arm_state->susp_res_lock);
2627        if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2628                        arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2629                set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2630                susp = 1;
2631        }
2632        write_unlock_bh(&arm_state->susp_res_lock);
2633
2634        if (susp)
2635                vchiq_platform_suspend(state);
2636
2637out:
2638        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2639        return;
2640}
2641
2642void
2643vchiq_check_suspend(struct vchiq_state *state)
2644{
2645        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2646
2647        if (!arm_state)
2648                goto out;
2649
2650        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2651
2652        write_lock_bh(&arm_state->susp_res_lock);
2653        if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2654                        arm_state->first_connect &&
2655                        !vchiq_videocore_wanted(state)) {
2656                vchiq_arm_vcsuspend(state);
2657        }
2658        write_unlock_bh(&arm_state->susp_res_lock);
2659
2660out:
2661        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2662}
2663
2664/* This function should be called with the write lock held */
2665int
2666vchiq_check_resume(struct vchiq_state *state)
2667{
2668        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2669        int resume = 0;
2670
2671        if (!arm_state)
2672                goto out;
2673
2674        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2675
2676        if (need_resume(state)) {
2677                set_resume_state(arm_state, VC_RESUME_REQUESTED);
2678                request_poll(state, NULL, 0);
2679                resume = 1;
2680        }
2681
2682out:
2683        vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2684        return resume;
2685}
2686
2687VCHIQ_STATUS_T
2688vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2689                   enum USE_TYPE_E use_type)
2690{
2691        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2692        VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2693        char entity[16];
2694        int *entity_uc;
2695        int local_uc, local_entity_uc;
2696
2697        if (!arm_state)
2698                goto out;
2699
2700        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2701
2702        if (use_type == USE_TYPE_VCHIQ) {
2703                sprintf(entity, "VCHIQ:   ");
2704                entity_uc = &arm_state->peer_use_count;
2705        } else if (service) {
2706                sprintf(entity, "%c%c%c%c:%03d",
2707                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2708                        service->client_id);
2709                entity_uc = &service->service_use_count;
2710        } else {
2711                vchiq_log_error(vchiq_susp_log_level, "%s null service "
2712                                "ptr", __func__);
2713                ret = VCHIQ_ERROR;
2714                goto out;
2715        }
2716
2717        write_lock_bh(&arm_state->susp_res_lock);
2718        while (arm_state->resume_blocked) {
2719                /* If we call 'use' while force suspend is waiting for suspend,
2720                 * then we're about to block the thread which the force is
2721                 * waiting to complete, so we're bound to just time out. In this
2722                 * case, set the suspend state such that the wait will be
2723                 * canceled, so we can complete as quickly as possible. */
2724                if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2725                                VC_SUSPEND_IDLE) {
2726                        set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2727                        break;
2728                }
2729                /* If suspend is already in progress then we need to block */
2730                if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2731                        /* Indicate that there are threads waiting on the resume
2732                         * blocker.  These need to be allowed to complete before
2733                         * a _second_ call to force suspend can complete,
2734                         * otherwise low priority threads might never actually
2735                         * continue */
2736                        arm_state->blocked_count++;
2737                        write_unlock_bh(&arm_state->susp_res_lock);
2738                        vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2739                                "blocked - waiting...", __func__, entity);
2740                        if (wait_for_completion_killable(
2741                                        &arm_state->resume_blocker)) {
2742                                vchiq_log_error(vchiq_susp_log_level, "%s %s "
2743                                        "wait for resume blocker interrupted",
2744                                        __func__, entity);
2745                                ret = VCHIQ_ERROR;
2746                                write_lock_bh(&arm_state->susp_res_lock);
2747                                arm_state->blocked_count--;
2748                                write_unlock_bh(&arm_state->susp_res_lock);
2749                                goto out;
2750                        }
2751                        vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2752                                "unblocked", __func__, entity);
2753                        write_lock_bh(&arm_state->susp_res_lock);
2754                        if (--arm_state->blocked_count == 0)
2755                                complete_all(&arm_state->blocked_blocker);
2756                }
2757        }
2758
2759        stop_suspend_timer(arm_state);
2760
2761        local_uc = ++arm_state->videocore_use_count;
2762        local_entity_uc = ++(*entity_uc);
2763
2764        /* If there's a pending request which hasn't yet been serviced then
2765         * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
2766         * vc_resume_complete will block until we either resume or fail to
2767         * suspend */
2768        if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2769                set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2770
2771        if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2772                set_resume_state(arm_state, VC_RESUME_REQUESTED);
2773                vchiq_log_info(vchiq_susp_log_level,
2774                        "%s %s count %d, state count %d",
2775                        __func__, entity, local_entity_uc, local_uc);
2776                request_poll(state, NULL, 0);
2777        } else
2778                vchiq_log_trace(vchiq_susp_log_level,
2779                        "%s %s count %d, state count %d",
2780                        __func__, entity, *entity_uc, local_uc);
2781
2782        write_unlock_bh(&arm_state->susp_res_lock);
2783
2784        /* Completion is in a done state when we're not suspended, so this won't
2785         * block for the non-suspended case. */
2786        if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2787                vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2788                        __func__, entity);
2789                if (wait_for_completion_killable(
2790                                &arm_state->vc_resume_complete)) {
2791                        vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2792                                "resume interrupted", __func__, entity);
2793                        ret = VCHIQ_ERROR;
2794                        goto out;
2795                }
2796                vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2797                        entity);
2798        }
2799
2800        if (ret == VCHIQ_SUCCESS) {
2801                VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2802                long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2803
2804                while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2805                        /* Send the use notify to videocore */
2806                        status = vchiq_send_remote_use_active(state);
2807                        if (status == VCHIQ_SUCCESS)
2808                                ack_cnt--;
2809                        else
2810                                atomic_add(ack_cnt,
2811                                        &arm_state->ka_use_ack_count);
2812                }
2813        }
2814
2815out:
2816        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2817        return ret;
2818}
2819
2820VCHIQ_STATUS_T
2821vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2822{
2823        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2824        VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2825        char entity[16];
2826        int *entity_uc;
2827        int local_uc, local_entity_uc;
2828
2829        if (!arm_state)
2830                goto out;
2831
2832        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2833
2834        if (service) {
2835                sprintf(entity, "%c%c%c%c:%03d",
2836                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2837                        service->client_id);
2838                entity_uc = &service->service_use_count;
2839        } else {
2840                sprintf(entity, "PEER:   ");
2841                entity_uc = &arm_state->peer_use_count;
2842        }
2843
2844        write_lock_bh(&arm_state->susp_res_lock);
2845        if (!arm_state->videocore_use_count || !(*entity_uc)) {
2846                /* Don't use BUG_ON - don't allow user thread to crash kernel */
2847                WARN_ON(!arm_state->videocore_use_count);
2848                WARN_ON(!(*entity_uc));
2849                ret = VCHIQ_ERROR;
2850                goto unlock;
2851        }
2852        local_uc = --arm_state->videocore_use_count;
2853        local_entity_uc = --(*entity_uc);
2854
2855        if (!vchiq_videocore_wanted(state)) {
2856                if (vchiq_platform_use_suspend_timer() &&
2857                                !arm_state->resume_blocked) {
2858                        /* Only use the timer if we're not trying to force
2859                         * suspend (=> resume_blocked) */
2860                        start_suspend_timer(arm_state);
2861                } else {
2862                        vchiq_log_info(vchiq_susp_log_level,
2863                                "%s %s count %d, state count %d - suspending",
2864                                __func__, entity, *entity_uc,
2865                                arm_state->videocore_use_count);
2866                        vchiq_arm_vcsuspend(state);
2867                }
2868        } else
2869                vchiq_log_trace(vchiq_susp_log_level,
2870                        "%s %s count %d, state count %d",
2871                        __func__, entity, *entity_uc,
2872                        arm_state->videocore_use_count);
2873
2874unlock:
2875        write_unlock_bh(&arm_state->susp_res_lock);
2876
2877out:
2878        vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2879        return ret;
2880}
2881
2882void
2883vchiq_on_remote_use(struct vchiq_state *state)
2884{
2885        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2886
2887        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2888        atomic_inc(&arm_state->ka_use_count);
2889        complete(&arm_state->ka_evt);
2890}
2891
2892void
2893vchiq_on_remote_release(struct vchiq_state *state)
2894{
2895        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2896
2897        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2898        atomic_inc(&arm_state->ka_release_count);
2899        complete(&arm_state->ka_evt);
2900}
2901
2902VCHIQ_STATUS_T
2903vchiq_use_service_internal(struct vchiq_service *service)
2904{
2905        return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2906}
2907
2908VCHIQ_STATUS_T
2909vchiq_release_service_internal(struct vchiq_service *service)
2910{
2911        return vchiq_release_internal(service->state, service);
2912}
2913
2914struct vchiq_debugfs_node *
2915vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2916{
2917        return &instance->debugfs_node;
2918}
2919
2920int
2921vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2922{
2923        struct vchiq_service *service;
2924        int use_count = 0, i;
2925
2926        i = 0;
2927        while ((service = next_service_by_instance(instance->state,
2928                instance, &i)) != NULL) {
2929                use_count += service->service_use_count;
2930                unlock_service(service);
2931        }
2932        return use_count;
2933}
2934
2935int
2936vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2937{
2938        return instance->pid;
2939}
2940
2941int
2942vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2943{
2944        return instance->trace;
2945}
2946
2947void
2948vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2949{
2950        struct vchiq_service *service;
2951        int i;
2952
2953        i = 0;
2954        while ((service = next_service_by_instance(instance->state,
2955                instance, &i)) != NULL) {
2956                service->trace = trace;
2957                unlock_service(service);
2958        }
2959        instance->trace = (trace != 0);
2960}
2961
2962static void suspend_timer_callback(struct timer_list *t)
2963{
2964        struct vchiq_arm_state *arm_state =
2965                                        from_timer(arm_state, t, suspend_timer);
2966        struct vchiq_state *state = arm_state->state;
2967
2968        vchiq_log_info(vchiq_susp_log_level,
2969                "%s - suspend timer expired - check suspend", __func__);
2970        vchiq_check_suspend(state);
2971}
2972
2973VCHIQ_STATUS_T
2974vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2975{
2976        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2977        struct vchiq_service *service = find_service_by_handle(handle);
2978
2979        if (service) {
2980                ret = vchiq_use_internal(service->state, service,
2981                                USE_TYPE_SERVICE);
2982                unlock_service(service);
2983        }
2984        return ret;
2985}
2986
2987VCHIQ_STATUS_T
2988vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2989{
2990        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2991        struct vchiq_service *service = find_service_by_handle(handle);
2992
2993        if (service) {
2994                ret = vchiq_release_internal(service->state, service);
2995                unlock_service(service);
2996        }
2997        return ret;
2998}
2999
3000struct service_data_struct {
3001        int fourcc;
3002        int clientid;
3003        int use_count;
3004};
3005
3006void
3007vchiq_dump_service_use_state(struct vchiq_state *state)
3008{
3009        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3010        struct service_data_struct *service_data;
3011        int i, found = 0;
3012        /* If there's more than 64 services, only dump ones with
3013         * non-zero counts */
3014        int only_nonzero = 0;
3015        static const char *nz = "<-- preventing suspend";
3016
3017        enum vc_suspend_status vc_suspend_state;
3018        enum vc_resume_status  vc_resume_state;
3019        int peer_count;
3020        int vc_use_count;
3021        int active_services;
3022
3023        if (!arm_state)
3024                return;
3025
3026        service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
3027                                     GFP_KERNEL);
3028        if (!service_data)
3029                return;
3030
3031        read_lock_bh(&arm_state->susp_res_lock);
3032        vc_suspend_state = arm_state->vc_suspend_state;
3033        vc_resume_state  = arm_state->vc_resume_state;
3034        peer_count = arm_state->peer_use_count;
3035        vc_use_count = arm_state->videocore_use_count;
3036        active_services = state->unused_service;
3037        if (active_services > MAX_SERVICES)
3038                only_nonzero = 1;
3039
3040        for (i = 0; i < active_services; i++) {
3041                struct vchiq_service *service_ptr = state->services[i];
3042
3043                if (!service_ptr)
3044                        continue;
3045
3046                if (only_nonzero && !service_ptr->service_use_count)
3047                        continue;
3048
3049                if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
3050                        continue;
3051
3052                service_data[found].fourcc = service_ptr->base.fourcc;
3053                service_data[found].clientid = service_ptr->client_id;
3054                service_data[found].use_count = service_ptr->service_use_count;
3055                found++;
3056                if (found >= MAX_SERVICES)
3057                        break;
3058        }
3059
3060        read_unlock_bh(&arm_state->susp_res_lock);
3061
3062        vchiq_log_warning(vchiq_susp_log_level,
3063                "-- Videcore suspend state: %s --",
3064                suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
3065        vchiq_log_warning(vchiq_susp_log_level,
3066                "-- Videcore resume state: %s --",
3067                resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
3068
3069        if (only_nonzero)
3070                vchiq_log_warning(vchiq_susp_log_level, "Too many active "
3071                        "services (%d).  Only dumping up to first %d services "
3072                        "with non-zero use-count", active_services, found);
3073
3074        for (i = 0; i < found; i++) {
3075                vchiq_log_warning(vchiq_susp_log_level,
3076                        "----- %c%c%c%c:%d service count %d %s",
3077                        VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
3078                        service_data[i].clientid,
3079                        service_data[i].use_count,
3080                        service_data[i].use_count ? nz : "");
3081        }
3082        vchiq_log_warning(vchiq_susp_log_level,
3083                "----- VCHIQ use count count %d", peer_count);
3084        vchiq_log_warning(vchiq_susp_log_level,
3085                "--- Overall vchiq instance use count %d", vc_use_count);
3086
3087        kfree(service_data);
3088
3089        vchiq_dump_platform_use_state(state);
3090}
3091
3092VCHIQ_STATUS_T
3093vchiq_check_service(struct vchiq_service *service)
3094{
3095        struct vchiq_arm_state *arm_state;
3096        VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3097
3098        if (!service || !service->state)
3099                goto out;
3100
3101        vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3102
3103        arm_state = vchiq_platform_get_arm_state(service->state);
3104
3105        read_lock_bh(&arm_state->susp_res_lock);
3106        if (service->service_use_count)
3107                ret = VCHIQ_SUCCESS;
3108        read_unlock_bh(&arm_state->susp_res_lock);
3109
3110        if (ret == VCHIQ_ERROR) {
3111                vchiq_log_error(vchiq_susp_log_level,
3112                        "%s ERROR - %c%c%c%c:%d service count %d, "
3113                        "state count %d, videocore suspend state %s", __func__,
3114                        VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3115                        service->client_id, service->service_use_count,
3116                        arm_state->videocore_use_count,
3117                        suspend_state_names[arm_state->vc_suspend_state +
3118                                                VC_SUSPEND_NUM_OFFSET]);
3119                vchiq_dump_service_use_state(service->state);
3120        }
3121out:
3122        return ret;
3123}
3124
3125/* stub functions */
3126void vchiq_on_remote_use_active(struct vchiq_state *state)
3127{
3128        (void)state;
3129}
3130
3131void vchiq_platform_conn_state_changed(struct vchiq_state *state,
3132                                       VCHIQ_CONNSTATE_T oldstate,
3133                                       VCHIQ_CONNSTATE_T newstate)
3134{
3135        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3136
3137        vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
3138                get_conn_state_name(oldstate), get_conn_state_name(newstate));
3139        if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
3140                write_lock_bh(&arm_state->susp_res_lock);
3141                if (!arm_state->first_connect) {
3142                        char threadname[16];
3143
3144                        arm_state->first_connect = 1;
3145                        write_unlock_bh(&arm_state->susp_res_lock);
3146                        snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
3147                                state->id);
3148                        arm_state->ka_thread = kthread_create(
3149                                &vchiq_keepalive_thread_func,
3150                                (void *)state,
3151                                threadname);
3152                        if (IS_ERR(arm_state->ka_thread)) {
3153                                vchiq_log_error(vchiq_susp_log_level,
3154                                        "vchiq: FATAL: couldn't create thread %s",
3155                                        threadname);
3156                        } else {
3157                                wake_up_process(arm_state->ka_thread);
3158                        }
3159                } else
3160                        write_unlock_bh(&arm_state->susp_res_lock);
3161        }
3162}
3163
3164static const struct of_device_id vchiq_of_match[] = {
3165        { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
3166        { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
3167        {},
3168};
3169MODULE_DEVICE_TABLE(of, vchiq_of_match);
3170
3171static struct platform_device *
3172vchiq_register_child(struct platform_device *pdev, const char *name)
3173{
3174        struct platform_device_info pdevinfo;
3175        struct platform_device *child;
3176
3177        memset(&pdevinfo, 0, sizeof(pdevinfo));
3178
3179        pdevinfo.parent = &pdev->dev;
3180        pdevinfo.name = name;
3181        pdevinfo.id = PLATFORM_DEVID_NONE;
3182        pdevinfo.dma_mask = DMA_BIT_MASK(32);
3183
3184        child = platform_device_register_full(&pdevinfo);
3185        if (IS_ERR(child)) {
3186                dev_warn(&pdev->dev, "%s not registered\n", name);
3187                child = NULL;
3188        }
3189
3190        return child;
3191}
3192
3193static int vchiq_probe(struct platform_device *pdev)
3194{
3195        struct device_node *fw_node;
3196        const struct of_device_id *of_id;
3197        struct vchiq_drvdata *drvdata;
3198        struct device *vchiq_dev;
3199        int err;
3200
3201        of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
3202        drvdata = (struct vchiq_drvdata *)of_id->data;
3203        if (!drvdata)
3204                return -EINVAL;
3205
3206        fw_node = of_find_compatible_node(NULL, NULL,
3207                                          "raspberrypi,bcm2835-firmware");
3208        if (!fw_node) {
3209                dev_err(&pdev->dev, "Missing firmware node\n");
3210                return -ENOENT;
3211        }
3212
3213        drvdata->fw = rpi_firmware_get(fw_node);
3214        of_node_put(fw_node);
3215        if (!drvdata->fw)
3216                return -EPROBE_DEFER;
3217
3218        platform_set_drvdata(pdev, drvdata);
3219
3220        err = vchiq_platform_init(pdev, &g_state);
3221        if (err)
3222                goto failed_platform_init;
3223
3224        cdev_init(&vchiq_cdev, &vchiq_fops);
3225        vchiq_cdev.owner = THIS_MODULE;
3226        err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
3227        if (err) {
3228                vchiq_log_error(vchiq_arm_log_level,
3229                        "Unable to register device");
3230                goto failed_platform_init;
3231        }
3232
3233        vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3234                                  "vchiq");
3235        if (IS_ERR(vchiq_dev)) {
3236                err = PTR_ERR(vchiq_dev);
3237                goto failed_device_create;
3238        }
3239
3240        vchiq_debugfs_init();
3241
3242        vchiq_log_info(vchiq_arm_log_level,
3243                "vchiq: initialised - version %d (min %d), device %d.%d",
3244                VCHIQ_VERSION, VCHIQ_VERSION_MIN,
3245                MAJOR(vchiq_devid), MINOR(vchiq_devid));
3246
3247        bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
3248        bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
3249
3250        return 0;
3251
3252failed_device_create:
3253        cdev_del(&vchiq_cdev);
3254failed_platform_init:
3255        vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
3256        return err;
3257}
3258
3259static int vchiq_remove(struct platform_device *pdev)
3260{
3261        platform_device_unregister(bcm2835_camera);
3262        vchiq_debugfs_deinit();
3263        device_destroy(vchiq_class, vchiq_devid);
3264        cdev_del(&vchiq_cdev);
3265
3266        return 0;
3267}
3268
3269static struct platform_driver vchiq_driver = {
3270        .driver = {
3271                .name = "bcm2835_vchiq",
3272                .of_match_table = vchiq_of_match,
3273        },
3274        .probe = vchiq_probe,
3275        .remove = vchiq_remove,
3276};
3277
3278static int __init vchiq_driver_init(void)
3279{
3280        int ret;
3281
3282        vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3283        if (IS_ERR(vchiq_class)) {
3284                pr_err("Failed to create vchiq class\n");
3285                return PTR_ERR(vchiq_class);
3286        }
3287
3288        ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
3289        if (ret) {
3290                pr_err("Failed to allocate vchiq's chrdev region\n");
3291                goto class_destroy;
3292        }
3293
3294        ret = platform_driver_register(&vchiq_driver);
3295        if (ret) {
3296                pr_err("Failed to register vchiq driver\n");
3297                goto region_unregister;
3298        }
3299
3300        return 0;
3301
3302region_unregister:
3303        platform_driver_unregister(&vchiq_driver);
3304
3305class_destroy:
3306        class_destroy(vchiq_class);
3307
3308        return ret;
3309}
3310module_init(vchiq_driver_init);
3311
3312static void __exit vchiq_driver_exit(void)
3313{
3314        platform_driver_unregister(&vchiq_driver);
3315        unregister_chrdev_region(vchiq_devid, 1);
3316        class_destroy(vchiq_class);
3317}
3318module_exit(vchiq_driver_exit);
3319
3320MODULE_LICENSE("Dual BSD/GPL");
3321MODULE_DESCRIPTION("Videocore VCHIQ driver");
3322MODULE_AUTHOR("Broadcom Corporation");
3323