linux/drivers/staging/tidspbridge/core/msg_sm.c
<<
>>
Prefs
   1/*
   2 * msg_sm.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * Implements upper edge functions for Bridge message module.
   7 *
   8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
   9 *
  10 * This package is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  17 */
  18#include <linux/types.h>
  19
  20/*  ----------------------------------- DSP/BIOS Bridge */
  21#include <dspbridge/dbdefs.h>
  22
  23/*  ----------------------------------- OS Adaptation Layer */
  24#include <dspbridge/sync.h>
  25
  26/*  ----------------------------------- Platform Manager */
  27#include <dspbridge/dev.h>
  28
  29/*  ----------------------------------- Others */
  30#include <dspbridge/io_sm.h>
  31
  32/*  ----------------------------------- This */
  33#include <_msg_sm.h>
  34#include <dspbridge/dspmsg.h>
  35
  36/*  ----------------------------------- Function Prototypes */
  37static int add_new_msg(struct list_head *msg_list);
  38static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
  39static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
  40static void free_msg_list(struct list_head *msg_list);
  41
  42/*
  43 *  ======== bridge_msg_create ========
  44 *      Create an object to manage message queues. Only one of these objects
  45 *      can exist per device object.
  46 */
  47int bridge_msg_create(struct msg_mgr **msg_man,
  48                             struct dev_object *hdev_obj,
  49                             msg_onexit msg_callback)
  50{
  51        struct msg_mgr *msg_mgr_obj;
  52        struct io_mgr *hio_mgr;
  53        int status = 0;
  54
  55        if (!msg_man || !msg_callback || !hdev_obj)
  56                return -EFAULT;
  57
  58        dev_get_io_mgr(hdev_obj, &hio_mgr);
  59        if (!hio_mgr)
  60                return -EFAULT;
  61
  62        *msg_man = NULL;
  63        /* Allocate msg_ctrl manager object */
  64        msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
  65        if (!msg_mgr_obj)
  66                return -ENOMEM;
  67
  68        msg_mgr_obj->on_exit = msg_callback;
  69        msg_mgr_obj->iomgr = hio_mgr;
  70        /* List of MSG_QUEUEs */
  71        INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
  72        /*
  73         * Queues of message frames for messages to the DSP. Message
  74         * frames will only be added to the free queue when a
  75         * msg_queue object is created.
  76         */
  77        INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
  78        INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
  79        spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
  80
  81        /*
  82         * Create an event to be used by bridge_msg_put() in waiting
  83         * for an available free frame from the message manager.
  84         */
  85        msg_mgr_obj->sync_event =
  86                kzalloc(sizeof(struct sync_object), GFP_KERNEL);
  87        if (!msg_mgr_obj->sync_event) {
  88                kfree(msg_mgr_obj);
  89                return -ENOMEM;
  90        }
  91        sync_init_event(msg_mgr_obj->sync_event);
  92
  93        *msg_man = msg_mgr_obj;
  94
  95        return status;
  96}
  97
  98/*
  99 *  ======== bridge_msg_create_queue ========
 100 *      Create a msg_queue for sending/receiving messages to/from a node
 101 *      on the DSP.
 102 */
 103int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
 104                                u32 msgq_id, u32 max_msgs, void *arg)
 105{
 106        u32 i;
 107        u32 num_allocated = 0;
 108        struct msg_queue *msg_q;
 109        int status = 0;
 110
 111        if (!hmsg_mgr || msgq == NULL)
 112                return -EFAULT;
 113
 114        *msgq = NULL;
 115        /* Allocate msg_queue object */
 116        msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
 117        if (!msg_q)
 118                return -ENOMEM;
 119
 120        msg_q->max_msgs = max_msgs;
 121        msg_q->msg_mgr = hmsg_mgr;
 122        msg_q->arg = arg;       /* Node handle */
 123        msg_q->msgq_id = msgq_id;       /* Node env (not valid yet) */
 124        /* Queues of Message frames for messages from the DSP */
 125        INIT_LIST_HEAD(&msg_q->msg_free_list);
 126        INIT_LIST_HEAD(&msg_q->msg_used_list);
 127
 128        /*  Create event that will be signalled when a message from
 129         *  the DSP is available. */
 130        msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
 131        if (!msg_q->sync_event) {
 132                status = -ENOMEM;
 133                goto out_err;
 134
 135        }
 136        sync_init_event(msg_q->sync_event);
 137
 138        /* Create a notification list for message ready notification. */
 139        msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
 140        if (!msg_q->ntfy_obj) {
 141                status = -ENOMEM;
 142                goto out_err;
 143        }
 144        ntfy_init(msg_q->ntfy_obj);
 145
 146        /*  Create events that will be used to synchronize cleanup
 147         *  when the object is deleted. sync_done will be set to
 148         *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
 149         *  will be set by the unblocked thread to signal that it
 150         *  is unblocked and will no longer reference the object. */
 151        msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
 152        if (!msg_q->sync_done) {
 153                status = -ENOMEM;
 154                goto out_err;
 155        }
 156        sync_init_event(msg_q->sync_done);
 157
 158        msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
 159        if (!msg_q->sync_done_ack) {
 160                status = -ENOMEM;
 161                goto out_err;
 162        }
 163        sync_init_event(msg_q->sync_done_ack);
 164
 165        /* Enter critical section */
 166        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 167        /* Initialize message frames and put in appropriate queues */
 168        for (i = 0; i < max_msgs && !status; i++) {
 169                status = add_new_msg(&hmsg_mgr->msg_free_list);
 170                if (!status) {
 171                        num_allocated++;
 172                        status = add_new_msg(&msg_q->msg_free_list);
 173                }
 174        }
 175        if (status) {
 176                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 177                goto out_err;
 178        }
 179
 180        list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
 181        *msgq = msg_q;
 182        /* Signal that free frames are now available */
 183        if (!list_empty(&hmsg_mgr->msg_free_list))
 184                sync_set_event(hmsg_mgr->sync_event);
 185
 186        /* Exit critical section */
 187        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 188
 189        return 0;
 190out_err:
 191        delete_msg_queue(msg_q, num_allocated);
 192        return status;
 193}
 194
 195/*
 196 *  ======== bridge_msg_delete ========
 197 *      Delete a msg_ctrl manager allocated in bridge_msg_create().
 198 */
 199void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
 200{
 201        delete_msg_mgr(hmsg_mgr);
 202}
 203
 204/*
 205 *  ======== bridge_msg_delete_queue ========
 206 *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
 207 */
 208void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
 209{
 210        struct msg_mgr *hmsg_mgr;
 211        u32 io_msg_pend;
 212
 213        if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
 214                return;
 215
 216        hmsg_mgr = msg_queue_obj->msg_mgr;
 217        msg_queue_obj->done = true;
 218        /*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
 219        io_msg_pend = msg_queue_obj->io_msg_pend;
 220        while (io_msg_pend) {
 221                /* Unblock thread */
 222                sync_set_event(msg_queue_obj->sync_done);
 223                /* Wait for acknowledgement */
 224                sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
 225                io_msg_pend = msg_queue_obj->io_msg_pend;
 226        }
 227        /* Remove message queue from hmsg_mgr->queue_list */
 228        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 229        list_del(&msg_queue_obj->list_elem);
 230        /* Free the message queue object */
 231        delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
 232        if (list_empty(&hmsg_mgr->msg_free_list))
 233                sync_reset_event(hmsg_mgr->sync_event);
 234        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 235}
 236
 237/*
 238 *  ======== bridge_msg_get ========
 239 *      Get a message from a msg_ctrl queue.
 240 */
 241int bridge_msg_get(struct msg_queue *msg_queue_obj,
 242                          struct dsp_msg *pmsg, u32 utimeout)
 243{
 244        struct msg_frame *msg_frame_obj;
 245        struct msg_mgr *hmsg_mgr;
 246        struct sync_object *syncs[2];
 247        u32 index;
 248        int status = 0;
 249
 250        if (!msg_queue_obj || pmsg == NULL)
 251                return -ENOMEM;
 252
 253        hmsg_mgr = msg_queue_obj->msg_mgr;
 254
 255        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 256        /* If a message is already there, get it */
 257        if (!list_empty(&msg_queue_obj->msg_used_list)) {
 258                msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
 259                                struct msg_frame, list_elem);
 260                list_del(&msg_frame_obj->list_elem);
 261                *pmsg = msg_frame_obj->msg_data.msg;
 262                list_add_tail(&msg_frame_obj->list_elem,
 263                                &msg_queue_obj->msg_free_list);
 264                if (list_empty(&msg_queue_obj->msg_used_list))
 265                        sync_reset_event(msg_queue_obj->sync_event);
 266                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 267                return 0;
 268        }
 269
 270        if (msg_queue_obj->done) {
 271                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 272                return -EPERM;
 273        }
 274        msg_queue_obj->io_msg_pend++;
 275        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 276
 277        /*
 278         * Wait til message is available, timeout, or done. We don't
 279         * have to schedule the DPC, since the DSP will send messages
 280         * when they are available.
 281         */
 282        syncs[0] = msg_queue_obj->sync_event;
 283        syncs[1] = msg_queue_obj->sync_done;
 284        status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
 285
 286        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 287        if (msg_queue_obj->done) {
 288                msg_queue_obj->io_msg_pend--;
 289                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 290                /*
 291                 * Signal that we're not going to access msg_queue_obj
 292                 * anymore, so it can be deleted.
 293                 */
 294                sync_set_event(msg_queue_obj->sync_done_ack);
 295                return -EPERM;
 296        }
 297        if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
 298                /* Get msg from used list */
 299                msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
 300                                struct msg_frame, list_elem);
 301                list_del(&msg_frame_obj->list_elem);
 302                /* Copy message into pmsg and put frame on the free list */
 303                *pmsg = msg_frame_obj->msg_data.msg;
 304                list_add_tail(&msg_frame_obj->list_elem,
 305                                &msg_queue_obj->msg_free_list);
 306        }
 307        msg_queue_obj->io_msg_pend--;
 308        /* Reset the event if there are still queued messages */
 309        if (!list_empty(&msg_queue_obj->msg_used_list))
 310                sync_set_event(msg_queue_obj->sync_event);
 311
 312        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 313
 314        return status;
 315}
 316
 317/*
 318 *  ======== bridge_msg_put ========
 319 *      Put a message onto a msg_ctrl queue.
 320 */
 321int bridge_msg_put(struct msg_queue *msg_queue_obj,
 322                          const struct dsp_msg *pmsg, u32 utimeout)
 323{
 324        struct msg_frame *msg_frame_obj;
 325        struct msg_mgr *hmsg_mgr;
 326        struct sync_object *syncs[2];
 327        u32 index;
 328        int status;
 329
 330        if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
 331                return -EFAULT;
 332
 333        hmsg_mgr = msg_queue_obj->msg_mgr;
 334
 335        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 336
 337        /* If a message frame is available, use it */
 338        if (!list_empty(&hmsg_mgr->msg_free_list)) {
 339                msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
 340                                struct msg_frame, list_elem);
 341                list_del(&msg_frame_obj->list_elem);
 342                msg_frame_obj->msg_data.msg = *pmsg;
 343                msg_frame_obj->msg_data.msgq_id =
 344                        msg_queue_obj->msgq_id;
 345                list_add_tail(&msg_frame_obj->list_elem,
 346                                &hmsg_mgr->msg_used_list);
 347                hmsg_mgr->msgs_pending++;
 348
 349                if (list_empty(&hmsg_mgr->msg_free_list))
 350                        sync_reset_event(hmsg_mgr->sync_event);
 351
 352                /* Release critical section before scheduling DPC */
 353                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 354                /* Schedule a DPC, to do the actual data transfer: */
 355                iosm_schedule(hmsg_mgr->iomgr);
 356                return 0;
 357        }
 358
 359        if (msg_queue_obj->done) {
 360                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 361                return -EPERM;
 362        }
 363        msg_queue_obj->io_msg_pend++;
 364
 365        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 366
 367        /* Wait til a free message frame is available, timeout, or done */
 368        syncs[0] = hmsg_mgr->sync_event;
 369        syncs[1] = msg_queue_obj->sync_done;
 370        status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
 371        if (status)
 372                return status;
 373
 374        /* Enter critical section */
 375        spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 376        if (msg_queue_obj->done) {
 377                msg_queue_obj->io_msg_pend--;
 378                /* Exit critical section */
 379                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 380                /*
 381                 * Signal that we're not going to access msg_queue_obj
 382                 * anymore, so it can be deleted.
 383                 */
 384                sync_set_event(msg_queue_obj->sync_done_ack);
 385                return -EPERM;
 386        }
 387
 388        if (list_empty(&hmsg_mgr->msg_free_list)) {
 389                spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 390                return -EFAULT;
 391        }
 392
 393        /* Get msg from free list */
 394        msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
 395                        struct msg_frame, list_elem);
 396        /*
 397         * Copy message into pmsg and put frame on the
 398         * used list.
 399         */
 400        list_del(&msg_frame_obj->list_elem);
 401        msg_frame_obj->msg_data.msg = *pmsg;
 402        msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
 403        list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
 404        hmsg_mgr->msgs_pending++;
 405        /*
 406         * Schedule a DPC, to do the actual
 407         * data transfer.
 408         */
 409        iosm_schedule(hmsg_mgr->iomgr);
 410
 411        msg_queue_obj->io_msg_pend--;
 412        /* Reset event if there are still frames available */
 413        if (!list_empty(&hmsg_mgr->msg_free_list))
 414                sync_set_event(hmsg_mgr->sync_event);
 415
 416        /* Exit critical section */
 417        spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 418
 419        return 0;
 420}
 421
 422/*
 423 *  ======== bridge_msg_register_notify ========
 424 */
 425int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
 426                                   u32 event_mask, u32 notify_type,
 427                                   struct dsp_notification *hnotification)
 428{
 429        int status = 0;
 430
 431        if (!msg_queue_obj || !hnotification) {
 432                status = -ENOMEM;
 433                goto func_end;
 434        }
 435
 436        if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
 437                status = -EPERM;
 438                goto func_end;
 439        }
 440
 441        if (notify_type != DSP_SIGNALEVENT) {
 442                status = -EBADR;
 443                goto func_end;
 444        }
 445
 446        if (event_mask)
 447                status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
 448                                                event_mask, notify_type);
 449        else
 450                status = ntfy_unregister(msg_queue_obj->ntfy_obj,
 451                                                        hnotification);
 452
 453        if (status == -EINVAL) {
 454                /*  Not registered. Ok, since we couldn't have known. Node
 455                 *  notifications are split between node state change handled
 456                 *  by NODE, and message ready handled by msg_ctrl. */
 457                status = 0;
 458        }
 459func_end:
 460        return status;
 461}
 462
 463/*
 464 *  ======== bridge_msg_set_queue_id ========
 465 */
 466void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
 467{
 468        /*
 469         *  A message queue must be created when a node is allocated,
 470         *  so that node_register_notify() can be called before the node
 471         *  is created. Since we don't know the node environment until the
 472         *  node is created, we need this function to set msg_queue_obj->msgq_id
 473         *  to the node environment, after the node is created.
 474         */
 475        if (msg_queue_obj)
 476                msg_queue_obj->msgq_id = msgq_id;
 477}
 478
 479/*
 480 *  ======== add_new_msg ========
 481 *      Must be called in message manager critical section.
 482 */
 483static int add_new_msg(struct list_head *msg_list)
 484{
 485        struct msg_frame *pmsg;
 486
 487        pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
 488        if (!pmsg)
 489                return -ENOMEM;
 490
 491        list_add_tail(&pmsg->list_elem, msg_list);
 492
 493        return 0;
 494}
 495
 496/*
 497 *  ======== delete_msg_mgr ========
 498 */
 499static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
 500{
 501        if (!hmsg_mgr)
 502                return;
 503
 504        /* FIXME: free elements from queue_list? */
 505        free_msg_list(&hmsg_mgr->msg_free_list);
 506        free_msg_list(&hmsg_mgr->msg_used_list);
 507        kfree(hmsg_mgr->sync_event);
 508        kfree(hmsg_mgr);
 509}
 510
 511/*
 512 *  ======== delete_msg_queue ========
 513 */
 514static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
 515{
 516        struct msg_mgr *hmsg_mgr;
 517        struct msg_frame *pmsg, *tmp;
 518        u32 i;
 519
 520        if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
 521                return;
 522
 523        hmsg_mgr = msg_queue_obj->msg_mgr;
 524
 525        /* Pull off num_to_dsp message frames from Msg manager and free */
 526        i = 0;
 527        list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
 528                        list_elem) {
 529                list_del(&pmsg->list_elem);
 530                kfree(pmsg);
 531                if (i++ >= num_to_dsp)
 532                        break;
 533        }
 534
 535        free_msg_list(&msg_queue_obj->msg_free_list);
 536        free_msg_list(&msg_queue_obj->msg_used_list);
 537
 538        if (msg_queue_obj->ntfy_obj) {
 539                ntfy_delete(msg_queue_obj->ntfy_obj);
 540                kfree(msg_queue_obj->ntfy_obj);
 541        }
 542
 543        kfree(msg_queue_obj->sync_event);
 544        kfree(msg_queue_obj->sync_done);
 545        kfree(msg_queue_obj->sync_done_ack);
 546
 547        kfree(msg_queue_obj);
 548}
 549
 550/*
 551 *  ======== free_msg_list ========
 552 */
 553static void free_msg_list(struct list_head *msg_list)
 554{
 555        struct msg_frame *pmsg, *tmp;
 556
 557        if (!msg_list)
 558                return;
 559
 560        list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
 561                list_del(&pmsg->list_elem);
 562                kfree(pmsg);
 563        }
 564}
 565