linux/sound/core/seq/seq_queue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   ALSA sequencer Timing queue handling
   4 *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
   5 *
   6 * MAJOR CHANGES
   7 *   Nov. 13, 1999      Takashi Iwai <iwai@ww.uni-erlangen.de>
   8 *     - Queues are allocated dynamically via ioctl.
   9 *     - When owner client is deleted, all owned queues are deleted, too.
  10 *     - Owner of unlocked queue is kept unmodified even if it is
  11 *       manipulated by other clients.
  12 *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
  13 *       caller client.  i.e. Changing owner to a third client is not
  14 *       allowed.
  15 *
  16 *  Aug. 30, 2000       Takashi Iwai
  17 *     - Queues are managed in static array again, but with better way.
  18 *       The API itself is identical.
  19 *     - The queue is locked when struct snd_seq_queue pointer is returned via
  20 *       queueptr().  This pointer *MUST* be released afterward by
  21 *       queuefree(ptr).
  22 *     - Addition of experimental sync support.
  23 */
  24
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <sound/core.h>
  28
  29#include "seq_memory.h"
  30#include "seq_queue.h"
  31#include "seq_clientmgr.h"
  32#include "seq_fifo.h"
  33#include "seq_timer.h"
  34#include "seq_info.h"
  35
  36/* list of allocated queues */
  37static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
  38static DEFINE_SPINLOCK(queue_list_lock);
  39/* number of queues allocated */
  40static int num_queues;
  41
  42int snd_seq_queue_get_cur_queues(void)
  43{
  44        return num_queues;
  45}
  46
  47/*----------------------------------------------------------------*/
  48
  49/* assign queue id and insert to list */
  50static int queue_list_add(struct snd_seq_queue *q)
  51{
  52        int i;
  53        unsigned long flags;
  54
  55        spin_lock_irqsave(&queue_list_lock, flags);
  56        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
  57                if (! queue_list[i]) {
  58                        queue_list[i] = q;
  59                        q->queue = i;
  60                        num_queues++;
  61                        spin_unlock_irqrestore(&queue_list_lock, flags);
  62                        return i;
  63                }
  64        }
  65        spin_unlock_irqrestore(&queue_list_lock, flags);
  66        return -1;
  67}
  68
  69static struct snd_seq_queue *queue_list_remove(int id, int client)
  70{
  71        struct snd_seq_queue *q;
  72        unsigned long flags;
  73
  74        spin_lock_irqsave(&queue_list_lock, flags);
  75        q = queue_list[id];
  76        if (q) {
  77                spin_lock(&q->owner_lock);
  78                if (q->owner == client) {
  79                        /* found */
  80                        q->klocked = 1;
  81                        spin_unlock(&q->owner_lock);
  82                        queue_list[id] = NULL;
  83                        num_queues--;
  84                        spin_unlock_irqrestore(&queue_list_lock, flags);
  85                        return q;
  86                }
  87                spin_unlock(&q->owner_lock);
  88        }
  89        spin_unlock_irqrestore(&queue_list_lock, flags);
  90        return NULL;
  91}
  92
  93/*----------------------------------------------------------------*/
  94
  95/* create new queue (constructor) */
  96static struct snd_seq_queue *queue_new(int owner, int locked)
  97{
  98        struct snd_seq_queue *q;
  99
 100        q = kzalloc(sizeof(*q), GFP_KERNEL);
 101        if (!q)
 102                return NULL;
 103
 104        spin_lock_init(&q->owner_lock);
 105        spin_lock_init(&q->check_lock);
 106        mutex_init(&q->timer_mutex);
 107        snd_use_lock_init(&q->use_lock);
 108        q->queue = -1;
 109
 110        q->tickq = snd_seq_prioq_new();
 111        q->timeq = snd_seq_prioq_new();
 112        q->timer = snd_seq_timer_new();
 113        if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
 114                snd_seq_prioq_delete(&q->tickq);
 115                snd_seq_prioq_delete(&q->timeq);
 116                snd_seq_timer_delete(&q->timer);
 117                kfree(q);
 118                return NULL;
 119        }
 120
 121        q->owner = owner;
 122        q->locked = locked;
 123        q->klocked = 0;
 124
 125        return q;
 126}
 127
 128/* delete queue (destructor) */
 129static void queue_delete(struct snd_seq_queue *q)
 130{
 131        /* stop and release the timer */
 132        mutex_lock(&q->timer_mutex);
 133        snd_seq_timer_stop(q->timer);
 134        snd_seq_timer_close(q);
 135        mutex_unlock(&q->timer_mutex);
 136        /* wait until access free */
 137        snd_use_lock_sync(&q->use_lock);
 138        /* release resources... */
 139        snd_seq_prioq_delete(&q->tickq);
 140        snd_seq_prioq_delete(&q->timeq);
 141        snd_seq_timer_delete(&q->timer);
 142
 143        kfree(q);
 144}
 145
 146
 147/*----------------------------------------------------------------*/
 148
 149/* delete all existing queues */
 150void snd_seq_queues_delete(void)
 151{
 152        int i;
 153
 154        /* clear list */
 155        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 156                if (queue_list[i])
 157                        queue_delete(queue_list[i]);
 158        }
 159}
 160
 161static void queue_use(struct snd_seq_queue *queue, int client, int use);
 162
 163/* allocate a new queue -
 164 * return pointer to new queue or ERR_PTR(-errno) for error
 165 * The new queue's use_lock is set to 1. It is the caller's responsibility to
 166 * call snd_use_lock_free(&q->use_lock).
 167 */
 168struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 169{
 170        struct snd_seq_queue *q;
 171
 172        q = queue_new(client, locked);
 173        if (q == NULL)
 174                return ERR_PTR(-ENOMEM);
 175        q->info_flags = info_flags;
 176        queue_use(q, client, 1);
 177        snd_use_lock_use(&q->use_lock);
 178        if (queue_list_add(q) < 0) {
 179                snd_use_lock_free(&q->use_lock);
 180                queue_delete(q);
 181                return ERR_PTR(-ENOMEM);
 182        }
 183        return q;
 184}
 185
 186/* delete a queue - queue must be owned by the client */
 187int snd_seq_queue_delete(int client, int queueid)
 188{
 189        struct snd_seq_queue *q;
 190
 191        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 192                return -EINVAL;
 193        q = queue_list_remove(queueid, client);
 194        if (q == NULL)
 195                return -EINVAL;
 196        queue_delete(q);
 197
 198        return 0;
 199}
 200
 201
 202/* return pointer to queue structure for specified id */
 203struct snd_seq_queue *queueptr(int queueid)
 204{
 205        struct snd_seq_queue *q;
 206        unsigned long flags;
 207
 208        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 209                return NULL;
 210        spin_lock_irqsave(&queue_list_lock, flags);
 211        q = queue_list[queueid];
 212        if (q)
 213                snd_use_lock_use(&q->use_lock);
 214        spin_unlock_irqrestore(&queue_list_lock, flags);
 215        return q;
 216}
 217
 218/* return the (first) queue matching with the specified name */
 219struct snd_seq_queue *snd_seq_queue_find_name(char *name)
 220{
 221        int i;
 222        struct snd_seq_queue *q;
 223
 224        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 225                q = queueptr(i);
 226                if (q) {
 227                        if (strncmp(q->name, name, sizeof(q->name)) == 0)
 228                                return q;
 229                        queuefree(q);
 230                }
 231        }
 232        return NULL;
 233}
 234
 235
 236/* -------------------------------------------------------- */
 237
 238void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
 239{
 240        unsigned long flags;
 241        struct snd_seq_event_cell *cell;
 242        snd_seq_tick_time_t cur_tick;
 243        snd_seq_real_time_t cur_time;
 244
 245        if (q == NULL)
 246                return;
 247
 248        /* make this function non-reentrant */
 249        spin_lock_irqsave(&q->check_lock, flags);
 250        if (q->check_blocked) {
 251                q->check_again = 1;
 252                spin_unlock_irqrestore(&q->check_lock, flags);
 253                return;         /* other thread is already checking queues */
 254        }
 255        q->check_blocked = 1;
 256        spin_unlock_irqrestore(&q->check_lock, flags);
 257
 258      __again:
 259        /* Process tick queue... */
 260        cur_tick = snd_seq_timer_get_cur_tick(q->timer);
 261        for (;;) {
 262                cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
 263                if (!cell)
 264                        break;
 265                snd_seq_dispatch_event(cell, atomic, hop);
 266        }
 267
 268        /* Process time queue... */
 269        cur_time = snd_seq_timer_get_cur_time(q->timer, false);
 270        for (;;) {
 271                cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
 272                if (!cell)
 273                        break;
 274                snd_seq_dispatch_event(cell, atomic, hop);
 275        }
 276
 277        /* free lock */
 278        spin_lock_irqsave(&q->check_lock, flags);
 279        if (q->check_again) {
 280                q->check_again = 0;
 281                spin_unlock_irqrestore(&q->check_lock, flags);
 282                goto __again;
 283        }
 284        q->check_blocked = 0;
 285        spin_unlock_irqrestore(&q->check_lock, flags);
 286}
 287
 288
 289/* enqueue a event to singe queue */
 290int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 291{
 292        int dest, err;
 293        struct snd_seq_queue *q;
 294
 295        if (snd_BUG_ON(!cell))
 296                return -EINVAL;
 297        dest = cell->event.queue;       /* destination queue */
 298        q = queueptr(dest);
 299        if (q == NULL)
 300                return -EINVAL;
 301        /* handle relative time stamps, convert them into absolute */
 302        if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
 303                switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 304                case SNDRV_SEQ_TIME_STAMP_TICK:
 305                        cell->event.time.tick += q->timer->tick.cur_tick;
 306                        break;
 307
 308                case SNDRV_SEQ_TIME_STAMP_REAL:
 309                        snd_seq_inc_real_time(&cell->event.time.time,
 310                                              &q->timer->cur_time);
 311                        break;
 312                }
 313                cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
 314                cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
 315        }
 316        /* enqueue event in the real-time or midi queue */
 317        switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 318        case SNDRV_SEQ_TIME_STAMP_TICK:
 319                err = snd_seq_prioq_cell_in(q->tickq, cell);
 320                break;
 321
 322        case SNDRV_SEQ_TIME_STAMP_REAL:
 323        default:
 324                err = snd_seq_prioq_cell_in(q->timeq, cell);
 325                break;
 326        }
 327
 328        if (err < 0) {
 329                queuefree(q); /* unlock */
 330                return err;
 331        }
 332
 333        /* trigger dispatching */
 334        snd_seq_check_queue(q, atomic, hop);
 335
 336        queuefree(q); /* unlock */
 337
 338        return 0;
 339}
 340
 341
 342/*----------------------------------------------------------------*/
 343
 344static inline int check_access(struct snd_seq_queue *q, int client)
 345{
 346        return (q->owner == client) || (!q->locked && !q->klocked);
 347}
 348
 349/* check if the client has permission to modify queue parameters.
 350 * if it does, lock the queue
 351 */
 352static int queue_access_lock(struct snd_seq_queue *q, int client)
 353{
 354        unsigned long flags;
 355        int access_ok;
 356        
 357        spin_lock_irqsave(&q->owner_lock, flags);
 358        access_ok = check_access(q, client);
 359        if (access_ok)
 360                q->klocked = 1;
 361        spin_unlock_irqrestore(&q->owner_lock, flags);
 362        return access_ok;
 363}
 364
 365/* unlock the queue */
 366static inline void queue_access_unlock(struct snd_seq_queue *q)
 367{
 368        unsigned long flags;
 369
 370        spin_lock_irqsave(&q->owner_lock, flags);
 371        q->klocked = 0;
 372        spin_unlock_irqrestore(&q->owner_lock, flags);
 373}
 374
 375/* exported - only checking permission */
 376int snd_seq_queue_check_access(int queueid, int client)
 377{
 378        struct snd_seq_queue *q = queueptr(queueid);
 379        int access_ok;
 380        unsigned long flags;
 381
 382        if (! q)
 383                return 0;
 384        spin_lock_irqsave(&q->owner_lock, flags);
 385        access_ok = check_access(q, client);
 386        spin_unlock_irqrestore(&q->owner_lock, flags);
 387        queuefree(q);
 388        return access_ok;
 389}
 390
 391/*----------------------------------------------------------------*/
 392
 393/*
 394 * change queue's owner and permission
 395 */
 396int snd_seq_queue_set_owner(int queueid, int client, int locked)
 397{
 398        struct snd_seq_queue *q = queueptr(queueid);
 399        unsigned long flags;
 400
 401        if (q == NULL)
 402                return -EINVAL;
 403
 404        if (! queue_access_lock(q, client)) {
 405                queuefree(q);
 406                return -EPERM;
 407        }
 408
 409        spin_lock_irqsave(&q->owner_lock, flags);
 410        q->locked = locked ? 1 : 0;
 411        q->owner = client;
 412        spin_unlock_irqrestore(&q->owner_lock, flags);
 413        queue_access_unlock(q);
 414        queuefree(q);
 415
 416        return 0;
 417}
 418
 419
 420/*----------------------------------------------------------------*/
 421
 422/* open timer -
 423 * q->use mutex should be down before calling this function to avoid
 424 * confliction with snd_seq_queue_use()
 425 */
 426int snd_seq_queue_timer_open(int queueid)
 427{
 428        int result = 0;
 429        struct snd_seq_queue *queue;
 430        struct snd_seq_timer *tmr;
 431
 432        queue = queueptr(queueid);
 433        if (queue == NULL)
 434                return -EINVAL;
 435        tmr = queue->timer;
 436        result = snd_seq_timer_open(queue);
 437        if (result < 0) {
 438                snd_seq_timer_defaults(tmr);
 439                result = snd_seq_timer_open(queue);
 440        }
 441        queuefree(queue);
 442        return result;
 443}
 444
 445/* close timer -
 446 * q->use mutex should be down before calling this function
 447 */
 448int snd_seq_queue_timer_close(int queueid)
 449{
 450        struct snd_seq_queue *queue;
 451        int result = 0;
 452
 453        queue = queueptr(queueid);
 454        if (queue == NULL)
 455                return -EINVAL;
 456        snd_seq_timer_close(queue);
 457        queuefree(queue);
 458        return result;
 459}
 460
 461/* change queue tempo and ppq */
 462int snd_seq_queue_timer_set_tempo(int queueid, int client,
 463                                  struct snd_seq_queue_tempo *info)
 464{
 465        struct snd_seq_queue *q = queueptr(queueid);
 466        int result;
 467
 468        if (q == NULL)
 469                return -EINVAL;
 470        if (! queue_access_lock(q, client)) {
 471                queuefree(q);
 472                return -EPERM;
 473        }
 474
 475        result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
 476        if (result >= 0 && info->skew_base > 0)
 477                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
 478                                                info->skew_base);
 479        queue_access_unlock(q);
 480        queuefree(q);
 481        return result;
 482}
 483
 484/* use or unuse this queue */
 485static void queue_use(struct snd_seq_queue *queue, int client, int use)
 486{
 487        if (use) {
 488                if (!test_and_set_bit(client, queue->clients_bitmap))
 489                        queue->clients++;
 490        } else {
 491                if (test_and_clear_bit(client, queue->clients_bitmap))
 492                        queue->clients--;
 493        }
 494        if (queue->clients) {
 495                if (use && queue->clients == 1)
 496                        snd_seq_timer_defaults(queue->timer);
 497                snd_seq_timer_open(queue);
 498        } else {
 499                snd_seq_timer_close(queue);
 500        }
 501}
 502
 503/* use or unuse this queue -
 504 * if it is the first client, starts the timer.
 505 * if it is not longer used by any clients, stop the timer.
 506 */
 507int snd_seq_queue_use(int queueid, int client, int use)
 508{
 509        struct snd_seq_queue *queue;
 510
 511        queue = queueptr(queueid);
 512        if (queue == NULL)
 513                return -EINVAL;
 514        mutex_lock(&queue->timer_mutex);
 515        queue_use(queue, client, use);
 516        mutex_unlock(&queue->timer_mutex);
 517        queuefree(queue);
 518        return 0;
 519}
 520
 521/*
 522 * check if queue is used by the client
 523 * return negative value if the queue is invalid.
 524 * return 0 if not used, 1 if used.
 525 */
 526int snd_seq_queue_is_used(int queueid, int client)
 527{
 528        struct snd_seq_queue *q;
 529        int result;
 530
 531        q = queueptr(queueid);
 532        if (q == NULL)
 533                return -EINVAL; /* invalid queue */
 534        result = test_bit(client, q->clients_bitmap) ? 1 : 0;
 535        queuefree(q);
 536        return result;
 537}
 538
 539
 540/*----------------------------------------------------------------*/
 541
 542/* final stage notification -
 543 * remove cells for no longer exist client (for non-owned queue)
 544 * or delete this queue (for owned queue)
 545 */
 546void snd_seq_queue_client_leave(int client)
 547{
 548        int i;
 549        struct snd_seq_queue *q;
 550
 551        /* delete own queues from queue list */
 552        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 553                q = queue_list_remove(i, client);
 554                if (q)
 555                        queue_delete(q);
 556        }
 557
 558        /* remove cells from existing queues -
 559         * they are not owned by this client
 560         */
 561        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 562                q = queueptr(i);
 563                if (!q)
 564                        continue;
 565                if (test_bit(client, q->clients_bitmap)) {
 566                        snd_seq_prioq_leave(q->tickq, client, 0);
 567                        snd_seq_prioq_leave(q->timeq, client, 0);
 568                        snd_seq_queue_use(q->queue, client, 0);
 569                }
 570                queuefree(q);
 571        }
 572}
 573
 574
 575
 576/*----------------------------------------------------------------*/
 577
 578/* remove cells from all queues */
 579void snd_seq_queue_client_leave_cells(int client)
 580{
 581        int i;
 582        struct snd_seq_queue *q;
 583
 584        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 585                q = queueptr(i);
 586                if (!q)
 587                        continue;
 588                snd_seq_prioq_leave(q->tickq, client, 0);
 589                snd_seq_prioq_leave(q->timeq, client, 0);
 590                queuefree(q);
 591        }
 592}
 593
 594/* remove cells based on flush criteria */
 595void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
 596{
 597        int i;
 598        struct snd_seq_queue *q;
 599
 600        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 601                q = queueptr(i);
 602                if (!q)
 603                        continue;
 604                if (test_bit(client, q->clients_bitmap) &&
 605                    (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
 606                     q->queue == info->queue)) {
 607                        snd_seq_prioq_remove_events(q->tickq, client, info);
 608                        snd_seq_prioq_remove_events(q->timeq, client, info);
 609                }
 610                queuefree(q);
 611        }
 612}
 613
 614/*----------------------------------------------------------------*/
 615
 616/*
 617 * send events to all subscribed ports
 618 */
 619static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
 620                                  int atomic, int hop)
 621{
 622        struct snd_seq_event sev;
 623
 624        sev = *ev;
 625        
 626        sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
 627        sev.time.tick = q->timer->tick.cur_tick;
 628        sev.queue = q->queue;
 629        sev.data.queue.queue = q->queue;
 630
 631        /* broadcast events from Timer port */
 632        sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
 633        sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
 634        sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
 635        snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
 636}
 637
 638/*
 639 * process a received queue-control event.
 640 * this function is exported for seq_sync.c.
 641 */
 642static void snd_seq_queue_process_event(struct snd_seq_queue *q,
 643                                        struct snd_seq_event *ev,
 644                                        int atomic, int hop)
 645{
 646        switch (ev->type) {
 647        case SNDRV_SEQ_EVENT_START:
 648                snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
 649                snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
 650                if (! snd_seq_timer_start(q->timer))
 651                        queue_broadcast_event(q, ev, atomic, hop);
 652                break;
 653
 654        case SNDRV_SEQ_EVENT_CONTINUE:
 655                if (! snd_seq_timer_continue(q->timer))
 656                        queue_broadcast_event(q, ev, atomic, hop);
 657                break;
 658
 659        case SNDRV_SEQ_EVENT_STOP:
 660                snd_seq_timer_stop(q->timer);
 661                queue_broadcast_event(q, ev, atomic, hop);
 662                break;
 663
 664        case SNDRV_SEQ_EVENT_TEMPO:
 665                snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
 666                queue_broadcast_event(q, ev, atomic, hop);
 667                break;
 668
 669        case SNDRV_SEQ_EVENT_SETPOS_TICK:
 670                if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
 671                        queue_broadcast_event(q, ev, atomic, hop);
 672                }
 673                break;
 674
 675        case SNDRV_SEQ_EVENT_SETPOS_TIME:
 676                if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
 677                        queue_broadcast_event(q, ev, atomic, hop);
 678                }
 679                break;
 680        case SNDRV_SEQ_EVENT_QUEUE_SKEW:
 681                if (snd_seq_timer_set_skew(q->timer,
 682                                           ev->data.queue.param.skew.value,
 683                                           ev->data.queue.param.skew.base) == 0) {
 684                        queue_broadcast_event(q, ev, atomic, hop);
 685                }
 686                break;
 687        }
 688}
 689
 690
 691/*
 692 * Queue control via timer control port:
 693 * this function is exported as a callback of timer port.
 694 */
 695int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
 696{
 697        struct snd_seq_queue *q;
 698
 699        if (snd_BUG_ON(!ev))
 700                return -EINVAL;
 701        q = queueptr(ev->data.queue.queue);
 702
 703        if (q == NULL)
 704                return -EINVAL;
 705
 706        if (! queue_access_lock(q, ev->source.client)) {
 707                queuefree(q);
 708                return -EPERM;
 709        }
 710
 711        snd_seq_queue_process_event(q, ev, atomic, hop);
 712
 713        queue_access_unlock(q);
 714        queuefree(q);
 715        return 0;
 716}
 717
 718
 719/*----------------------------------------------------------------*/
 720
 721#ifdef CONFIG_SND_PROC_FS
 722/* exported to seq_info.c */
 723void snd_seq_info_queues_read(struct snd_info_entry *entry, 
 724                              struct snd_info_buffer *buffer)
 725{
 726        int i, bpm;
 727        struct snd_seq_queue *q;
 728        struct snd_seq_timer *tmr;
 729        bool locked;
 730        int owner;
 731
 732        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 733                q = queueptr(i);
 734                if (!q)
 735                        continue;
 736
 737                tmr = q->timer;
 738                if (tmr->tempo)
 739                        bpm = 60000000 / tmr->tempo;
 740                else
 741                        bpm = 0;
 742
 743                spin_lock_irq(&q->owner_lock);
 744                locked = q->locked;
 745                owner = q->owner;
 746                spin_unlock_irq(&q->owner_lock);
 747
 748                snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
 749                snd_iprintf(buffer, "owned by client    : %d\n", owner);
 750                snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
 751                snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
 752                snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
 753                snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
 754                snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
 755                snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
 756                snd_iprintf(buffer, "current BPM        : %d\n", bpm);
 757                snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
 758                snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
 759                snd_iprintf(buffer, "\n");
 760                queuefree(q);
 761        }
 762}
 763#endif /* CONFIG_SND_PROC_FS */
 764
 765