linux/sound/core/seq/seq_queue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   ALSA sequencer Timing queue handling
   4 *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
   5 *
   6 * MAJOR CHANGES
   7 *   Nov. 13, 1999      Takashi Iwai <iwai@ww.uni-erlangen.de>
   8 *     - Queues are allocated dynamically via ioctl.
   9 *     - When owner client is deleted, all owned queues are deleted, too.
  10 *     - Owner of unlocked queue is kept unmodified even if it is
  11 *       manipulated by other clients.
  12 *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
  13 *       caller client.  i.e. Changing owner to a third client is not
  14 *       allowed.
  15 *
  16 *  Aug. 30, 2000       Takashi Iwai
  17 *     - Queues are managed in static array again, but with better way.
  18 *       The API itself is identical.
  19 *     - The queue is locked when struct snd_seq_queue pointer is returned via
  20 *       queueptr().  This pointer *MUST* be released afterward by
  21 *       queuefree(ptr).
  22 *     - Addition of experimental sync support.
  23 */
  24
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <sound/core.h>
  28
  29#include "seq_memory.h"
  30#include "seq_queue.h"
  31#include "seq_clientmgr.h"
  32#include "seq_fifo.h"
  33#include "seq_timer.h"
  34#include "seq_info.h"
  35
  36/* list of allocated queues */
  37static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
  38static DEFINE_SPINLOCK(queue_list_lock);
  39/* number of queues allocated */
  40static int num_queues;
  41
  42int snd_seq_queue_get_cur_queues(void)
  43{
  44        return num_queues;
  45}
  46
  47/*----------------------------------------------------------------*/
  48
  49/* assign queue id and insert to list */
  50static int queue_list_add(struct snd_seq_queue *q)
  51{
  52        int i;
  53        unsigned long flags;
  54
  55        spin_lock_irqsave(&queue_list_lock, flags);
  56        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
  57                if (! queue_list[i]) {
  58                        queue_list[i] = q;
  59                        q->queue = i;
  60                        num_queues++;
  61                        spin_unlock_irqrestore(&queue_list_lock, flags);
  62                        return i;
  63                }
  64        }
  65        spin_unlock_irqrestore(&queue_list_lock, flags);
  66        return -1;
  67}
  68
  69static struct snd_seq_queue *queue_list_remove(int id, int client)
  70{
  71        struct snd_seq_queue *q;
  72        unsigned long flags;
  73
  74        spin_lock_irqsave(&queue_list_lock, flags);
  75        q = queue_list[id];
  76        if (q) {
  77                spin_lock(&q->owner_lock);
  78                if (q->owner == client) {
  79                        /* found */
  80                        q->klocked = 1;
  81                        spin_unlock(&q->owner_lock);
  82                        queue_list[id] = NULL;
  83                        num_queues--;
  84                        spin_unlock_irqrestore(&queue_list_lock, flags);
  85                        return q;
  86                }
  87                spin_unlock(&q->owner_lock);
  88        }
  89        spin_unlock_irqrestore(&queue_list_lock, flags);
  90        return NULL;
  91}
  92
  93/*----------------------------------------------------------------*/
  94
  95/* create new queue (constructor) */
  96static struct snd_seq_queue *queue_new(int owner, int locked)
  97{
  98        struct snd_seq_queue *q;
  99
 100        q = kzalloc(sizeof(*q), GFP_KERNEL);
 101        if (!q)
 102                return NULL;
 103
 104        spin_lock_init(&q->owner_lock);
 105        spin_lock_init(&q->check_lock);
 106        mutex_init(&q->timer_mutex);
 107        snd_use_lock_init(&q->use_lock);
 108        q->queue = -1;
 109
 110        q->tickq = snd_seq_prioq_new();
 111        q->timeq = snd_seq_prioq_new();
 112        q->timer = snd_seq_timer_new();
 113        if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
 114                snd_seq_prioq_delete(&q->tickq);
 115                snd_seq_prioq_delete(&q->timeq);
 116                snd_seq_timer_delete(&q->timer);
 117                kfree(q);
 118                return NULL;
 119        }
 120
 121        q->owner = owner;
 122        q->locked = locked;
 123        q->klocked = 0;
 124
 125        return q;
 126}
 127
 128/* delete queue (destructor) */
 129static void queue_delete(struct snd_seq_queue *q)
 130{
 131        /* stop and release the timer */
 132        mutex_lock(&q->timer_mutex);
 133        snd_seq_timer_stop(q->timer);
 134        snd_seq_timer_close(q);
 135        mutex_unlock(&q->timer_mutex);
 136        /* wait until access free */
 137        snd_use_lock_sync(&q->use_lock);
 138        /* release resources... */
 139        snd_seq_prioq_delete(&q->tickq);
 140        snd_seq_prioq_delete(&q->timeq);
 141        snd_seq_timer_delete(&q->timer);
 142
 143        kfree(q);
 144}
 145
 146
 147/*----------------------------------------------------------------*/
 148
 149/* delete all existing queues */
 150void snd_seq_queues_delete(void)
 151{
 152        int i;
 153
 154        /* clear list */
 155        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 156                if (queue_list[i])
 157                        queue_delete(queue_list[i]);
 158        }
 159}
 160
 161static void queue_use(struct snd_seq_queue *queue, int client, int use);
 162
 163/* allocate a new queue -
 164 * return pointer to new queue or ERR_PTR(-errno) for error
 165 * The new queue's use_lock is set to 1. It is the caller's responsibility to
 166 * call snd_use_lock_free(&q->use_lock).
 167 */
 168struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 169{
 170        struct snd_seq_queue *q;
 171
 172        q = queue_new(client, locked);
 173        if (q == NULL)
 174                return ERR_PTR(-ENOMEM);
 175        q->info_flags = info_flags;
 176        queue_use(q, client, 1);
 177        snd_use_lock_use(&q->use_lock);
 178        if (queue_list_add(q) < 0) {
 179                snd_use_lock_free(&q->use_lock);
 180                queue_delete(q);
 181                return ERR_PTR(-ENOMEM);
 182        }
 183        return q;
 184}
 185
 186/* delete a queue - queue must be owned by the client */
 187int snd_seq_queue_delete(int client, int queueid)
 188{
 189        struct snd_seq_queue *q;
 190
 191        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 192                return -EINVAL;
 193        q = queue_list_remove(queueid, client);
 194        if (q == NULL)
 195                return -EINVAL;
 196        queue_delete(q);
 197
 198        return 0;
 199}
 200
 201
 202/* return pointer to queue structure for specified id */
 203struct snd_seq_queue *queueptr(int queueid)
 204{
 205        struct snd_seq_queue *q;
 206        unsigned long flags;
 207
 208        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 209                return NULL;
 210        spin_lock_irqsave(&queue_list_lock, flags);
 211        q = queue_list[queueid];
 212        if (q)
 213                snd_use_lock_use(&q->use_lock);
 214        spin_unlock_irqrestore(&queue_list_lock, flags);
 215        return q;
 216}
 217
 218/* return the (first) queue matching with the specified name */
 219struct snd_seq_queue *snd_seq_queue_find_name(char *name)
 220{
 221        int i;
 222        struct snd_seq_queue *q;
 223
 224        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 225                if ((q = queueptr(i)) != NULL) {
 226                        if (strncmp(q->name, name, sizeof(q->name)) == 0)
 227                                return q;
 228                        queuefree(q);
 229                }
 230        }
 231        return NULL;
 232}
 233
 234
 235/* -------------------------------------------------------- */
 236
 237void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
 238{
 239        unsigned long flags;
 240        struct snd_seq_event_cell *cell;
 241        snd_seq_tick_time_t cur_tick;
 242        snd_seq_real_time_t cur_time;
 243
 244        if (q == NULL)
 245                return;
 246
 247        /* make this function non-reentrant */
 248        spin_lock_irqsave(&q->check_lock, flags);
 249        if (q->check_blocked) {
 250                q->check_again = 1;
 251                spin_unlock_irqrestore(&q->check_lock, flags);
 252                return;         /* other thread is already checking queues */
 253        }
 254        q->check_blocked = 1;
 255        spin_unlock_irqrestore(&q->check_lock, flags);
 256
 257      __again:
 258        /* Process tick queue... */
 259        cur_tick = snd_seq_timer_get_cur_tick(q->timer);
 260        for (;;) {
 261                cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
 262                if (!cell)
 263                        break;
 264                snd_seq_dispatch_event(cell, atomic, hop);
 265        }
 266
 267        /* Process time queue... */
 268        cur_time = snd_seq_timer_get_cur_time(q->timer, false);
 269        for (;;) {
 270                cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
 271                if (!cell)
 272                        break;
 273                snd_seq_dispatch_event(cell, atomic, hop);
 274        }
 275
 276        /* free lock */
 277        spin_lock_irqsave(&q->check_lock, flags);
 278        if (q->check_again) {
 279                q->check_again = 0;
 280                spin_unlock_irqrestore(&q->check_lock, flags);
 281                goto __again;
 282        }
 283        q->check_blocked = 0;
 284        spin_unlock_irqrestore(&q->check_lock, flags);
 285}
 286
 287
 288/* enqueue a event to singe queue */
 289int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 290{
 291        int dest, err;
 292        struct snd_seq_queue *q;
 293
 294        if (snd_BUG_ON(!cell))
 295                return -EINVAL;
 296        dest = cell->event.queue;       /* destination queue */
 297        q = queueptr(dest);
 298        if (q == NULL)
 299                return -EINVAL;
 300        /* handle relative time stamps, convert them into absolute */
 301        if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
 302                switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 303                case SNDRV_SEQ_TIME_STAMP_TICK:
 304                        cell->event.time.tick += q->timer->tick.cur_tick;
 305                        break;
 306
 307                case SNDRV_SEQ_TIME_STAMP_REAL:
 308                        snd_seq_inc_real_time(&cell->event.time.time,
 309                                              &q->timer->cur_time);
 310                        break;
 311                }
 312                cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
 313                cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
 314        }
 315        /* enqueue event in the real-time or midi queue */
 316        switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 317        case SNDRV_SEQ_TIME_STAMP_TICK:
 318                err = snd_seq_prioq_cell_in(q->tickq, cell);
 319                break;
 320
 321        case SNDRV_SEQ_TIME_STAMP_REAL:
 322        default:
 323                err = snd_seq_prioq_cell_in(q->timeq, cell);
 324                break;
 325        }
 326
 327        if (err < 0) {
 328                queuefree(q); /* unlock */
 329                return err;
 330        }
 331
 332        /* trigger dispatching */
 333        snd_seq_check_queue(q, atomic, hop);
 334
 335        queuefree(q); /* unlock */
 336
 337        return 0;
 338}
 339
 340
 341/*----------------------------------------------------------------*/
 342
 343static inline int check_access(struct snd_seq_queue *q, int client)
 344{
 345        return (q->owner == client) || (!q->locked && !q->klocked);
 346}
 347
 348/* check if the client has permission to modify queue parameters.
 349 * if it does, lock the queue
 350 */
 351static int queue_access_lock(struct snd_seq_queue *q, int client)
 352{
 353        unsigned long flags;
 354        int access_ok;
 355        
 356        spin_lock_irqsave(&q->owner_lock, flags);
 357        access_ok = check_access(q, client);
 358        if (access_ok)
 359                q->klocked = 1;
 360        spin_unlock_irqrestore(&q->owner_lock, flags);
 361        return access_ok;
 362}
 363
 364/* unlock the queue */
 365static inline void queue_access_unlock(struct snd_seq_queue *q)
 366{
 367        unsigned long flags;
 368
 369        spin_lock_irqsave(&q->owner_lock, flags);
 370        q->klocked = 0;
 371        spin_unlock_irqrestore(&q->owner_lock, flags);
 372}
 373
 374/* exported - only checking permission */
 375int snd_seq_queue_check_access(int queueid, int client)
 376{
 377        struct snd_seq_queue *q = queueptr(queueid);
 378        int access_ok;
 379        unsigned long flags;
 380
 381        if (! q)
 382                return 0;
 383        spin_lock_irqsave(&q->owner_lock, flags);
 384        access_ok = check_access(q, client);
 385        spin_unlock_irqrestore(&q->owner_lock, flags);
 386        queuefree(q);
 387        return access_ok;
 388}
 389
 390/*----------------------------------------------------------------*/
 391
 392/*
 393 * change queue's owner and permission
 394 */
 395int snd_seq_queue_set_owner(int queueid, int client, int locked)
 396{
 397        struct snd_seq_queue *q = queueptr(queueid);
 398        unsigned long flags;
 399
 400        if (q == NULL)
 401                return -EINVAL;
 402
 403        if (! queue_access_lock(q, client)) {
 404                queuefree(q);
 405                return -EPERM;
 406        }
 407
 408        spin_lock_irqsave(&q->owner_lock, flags);
 409        q->locked = locked ? 1 : 0;
 410        q->owner = client;
 411        spin_unlock_irqrestore(&q->owner_lock, flags);
 412        queue_access_unlock(q);
 413        queuefree(q);
 414
 415        return 0;
 416}
 417
 418
 419/*----------------------------------------------------------------*/
 420
 421/* open timer -
 422 * q->use mutex should be down before calling this function to avoid
 423 * confliction with snd_seq_queue_use()
 424 */
 425int snd_seq_queue_timer_open(int queueid)
 426{
 427        int result = 0;
 428        struct snd_seq_queue *queue;
 429        struct snd_seq_timer *tmr;
 430
 431        queue = queueptr(queueid);
 432        if (queue == NULL)
 433                return -EINVAL;
 434        tmr = queue->timer;
 435        if ((result = snd_seq_timer_open(queue)) < 0) {
 436                snd_seq_timer_defaults(tmr);
 437                result = snd_seq_timer_open(queue);
 438        }
 439        queuefree(queue);
 440        return result;
 441}
 442
 443/* close timer -
 444 * q->use mutex should be down before calling this function
 445 */
 446int snd_seq_queue_timer_close(int queueid)
 447{
 448        struct snd_seq_queue *queue;
 449        int result = 0;
 450
 451        queue = queueptr(queueid);
 452        if (queue == NULL)
 453                return -EINVAL;
 454        snd_seq_timer_close(queue);
 455        queuefree(queue);
 456        return result;
 457}
 458
 459/* change queue tempo and ppq */
 460int snd_seq_queue_timer_set_tempo(int queueid, int client,
 461                                  struct snd_seq_queue_tempo *info)
 462{
 463        struct snd_seq_queue *q = queueptr(queueid);
 464        int result;
 465
 466        if (q == NULL)
 467                return -EINVAL;
 468        if (! queue_access_lock(q, client)) {
 469                queuefree(q);
 470                return -EPERM;
 471        }
 472
 473        result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
 474        if (result >= 0 && info->skew_base > 0)
 475                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
 476                                                info->skew_base);
 477        queue_access_unlock(q);
 478        queuefree(q);
 479        return result;
 480}
 481
 482/* use or unuse this queue */
 483static void queue_use(struct snd_seq_queue *queue, int client, int use)
 484{
 485        if (use) {
 486                if (!test_and_set_bit(client, queue->clients_bitmap))
 487                        queue->clients++;
 488        } else {
 489                if (test_and_clear_bit(client, queue->clients_bitmap))
 490                        queue->clients--;
 491        }
 492        if (queue->clients) {
 493                if (use && queue->clients == 1)
 494                        snd_seq_timer_defaults(queue->timer);
 495                snd_seq_timer_open(queue);
 496        } else {
 497                snd_seq_timer_close(queue);
 498        }
 499}
 500
 501/* use or unuse this queue -
 502 * if it is the first client, starts the timer.
 503 * if it is not longer used by any clients, stop the timer.
 504 */
 505int snd_seq_queue_use(int queueid, int client, int use)
 506{
 507        struct snd_seq_queue *queue;
 508
 509        queue = queueptr(queueid);
 510        if (queue == NULL)
 511                return -EINVAL;
 512        mutex_lock(&queue->timer_mutex);
 513        queue_use(queue, client, use);
 514        mutex_unlock(&queue->timer_mutex);
 515        queuefree(queue);
 516        return 0;
 517}
 518
 519/*
 520 * check if queue is used by the client
 521 * return negative value if the queue is invalid.
 522 * return 0 if not used, 1 if used.
 523 */
 524int snd_seq_queue_is_used(int queueid, int client)
 525{
 526        struct snd_seq_queue *q;
 527        int result;
 528
 529        q = queueptr(queueid);
 530        if (q == NULL)
 531                return -EINVAL; /* invalid queue */
 532        result = test_bit(client, q->clients_bitmap) ? 1 : 0;
 533        queuefree(q);
 534        return result;
 535}
 536
 537
 538/*----------------------------------------------------------------*/
 539
 540/* notification that client has left the system -
 541 * stop the timer on all queues owned by this client
 542 */
 543void snd_seq_queue_client_termination(int client)
 544{
 545        unsigned long flags;
 546        int i;
 547        struct snd_seq_queue *q;
 548        bool matched;
 549
 550        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 551                if ((q = queueptr(i)) == NULL)
 552                        continue;
 553                spin_lock_irqsave(&q->owner_lock, flags);
 554                matched = (q->owner == client);
 555                if (matched)
 556                        q->klocked = 1;
 557                spin_unlock_irqrestore(&q->owner_lock, flags);
 558                if (matched) {
 559                        if (q->timer->running)
 560                                snd_seq_timer_stop(q->timer);
 561                        snd_seq_timer_reset(q->timer);
 562                }
 563                queuefree(q);
 564        }
 565}
 566
 567/* final stage notification -
 568 * remove cells for no longer exist client (for non-owned queue)
 569 * or delete this queue (for owned queue)
 570 */
 571void snd_seq_queue_client_leave(int client)
 572{
 573        int i;
 574        struct snd_seq_queue *q;
 575
 576        /* delete own queues from queue list */
 577        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 578                if ((q = queue_list_remove(i, client)) != NULL)
 579                        queue_delete(q);
 580        }
 581
 582        /* remove cells from existing queues -
 583         * they are not owned by this client
 584         */
 585        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 586                if ((q = queueptr(i)) == NULL)
 587                        continue;
 588                if (test_bit(client, q->clients_bitmap)) {
 589                        snd_seq_prioq_leave(q->tickq, client, 0);
 590                        snd_seq_prioq_leave(q->timeq, client, 0);
 591                        snd_seq_queue_use(q->queue, client, 0);
 592                }
 593                queuefree(q);
 594        }
 595}
 596
 597
 598
 599/*----------------------------------------------------------------*/
 600
 601/* remove cells from all queues */
 602void snd_seq_queue_client_leave_cells(int client)
 603{
 604        int i;
 605        struct snd_seq_queue *q;
 606
 607        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 608                if ((q = queueptr(i)) == NULL)
 609                        continue;
 610                snd_seq_prioq_leave(q->tickq, client, 0);
 611                snd_seq_prioq_leave(q->timeq, client, 0);
 612                queuefree(q);
 613        }
 614}
 615
 616/* remove cells based on flush criteria */
 617void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
 618{
 619        int i;
 620        struct snd_seq_queue *q;
 621
 622        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 623                if ((q = queueptr(i)) == NULL)
 624                        continue;
 625                if (test_bit(client, q->clients_bitmap) &&
 626                    (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
 627                     q->queue == info->queue)) {
 628                        snd_seq_prioq_remove_events(q->tickq, client, info);
 629                        snd_seq_prioq_remove_events(q->timeq, client, info);
 630                }
 631                queuefree(q);
 632        }
 633}
 634
 635/*----------------------------------------------------------------*/
 636
 637/*
 638 * send events to all subscribed ports
 639 */
 640static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
 641                                  int atomic, int hop)
 642{
 643        struct snd_seq_event sev;
 644
 645        sev = *ev;
 646        
 647        sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
 648        sev.time.tick = q->timer->tick.cur_tick;
 649        sev.queue = q->queue;
 650        sev.data.queue.queue = q->queue;
 651
 652        /* broadcast events from Timer port */
 653        sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
 654        sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
 655        sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
 656        snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
 657}
 658
 659/*
 660 * process a received queue-control event.
 661 * this function is exported for seq_sync.c.
 662 */
 663static void snd_seq_queue_process_event(struct snd_seq_queue *q,
 664                                        struct snd_seq_event *ev,
 665                                        int atomic, int hop)
 666{
 667        switch (ev->type) {
 668        case SNDRV_SEQ_EVENT_START:
 669                snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
 670                snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
 671                if (! snd_seq_timer_start(q->timer))
 672                        queue_broadcast_event(q, ev, atomic, hop);
 673                break;
 674
 675        case SNDRV_SEQ_EVENT_CONTINUE:
 676                if (! snd_seq_timer_continue(q->timer))
 677                        queue_broadcast_event(q, ev, atomic, hop);
 678                break;
 679
 680        case SNDRV_SEQ_EVENT_STOP:
 681                snd_seq_timer_stop(q->timer);
 682                queue_broadcast_event(q, ev, atomic, hop);
 683                break;
 684
 685        case SNDRV_SEQ_EVENT_TEMPO:
 686                snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
 687                queue_broadcast_event(q, ev, atomic, hop);
 688                break;
 689
 690        case SNDRV_SEQ_EVENT_SETPOS_TICK:
 691                if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
 692                        queue_broadcast_event(q, ev, atomic, hop);
 693                }
 694                break;
 695
 696        case SNDRV_SEQ_EVENT_SETPOS_TIME:
 697                if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
 698                        queue_broadcast_event(q, ev, atomic, hop);
 699                }
 700                break;
 701        case SNDRV_SEQ_EVENT_QUEUE_SKEW:
 702                if (snd_seq_timer_set_skew(q->timer,
 703                                           ev->data.queue.param.skew.value,
 704                                           ev->data.queue.param.skew.base) == 0) {
 705                        queue_broadcast_event(q, ev, atomic, hop);
 706                }
 707                break;
 708        }
 709}
 710
 711
 712/*
 713 * Queue control via timer control port:
 714 * this function is exported as a callback of timer port.
 715 */
 716int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
 717{
 718        struct snd_seq_queue *q;
 719
 720        if (snd_BUG_ON(!ev))
 721                return -EINVAL;
 722        q = queueptr(ev->data.queue.queue);
 723
 724        if (q == NULL)
 725                return -EINVAL;
 726
 727        if (! queue_access_lock(q, ev->source.client)) {
 728                queuefree(q);
 729                return -EPERM;
 730        }
 731
 732        snd_seq_queue_process_event(q, ev, atomic, hop);
 733
 734        queue_access_unlock(q);
 735        queuefree(q);
 736        return 0;
 737}
 738
 739
 740/*----------------------------------------------------------------*/
 741
 742#ifdef CONFIG_SND_PROC_FS
 743/* exported to seq_info.c */
 744void snd_seq_info_queues_read(struct snd_info_entry *entry, 
 745                              struct snd_info_buffer *buffer)
 746{
 747        int i, bpm;
 748        struct snd_seq_queue *q;
 749        struct snd_seq_timer *tmr;
 750        bool locked;
 751        int owner;
 752
 753        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 754                if ((q = queueptr(i)) == NULL)
 755                        continue;
 756
 757                tmr = q->timer;
 758                if (tmr->tempo)
 759                        bpm = 60000000 / tmr->tempo;
 760                else
 761                        bpm = 0;
 762
 763                spin_lock_irq(&q->owner_lock);
 764                locked = q->locked;
 765                owner = q->owner;
 766                spin_unlock_irq(&q->owner_lock);
 767
 768                snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
 769                snd_iprintf(buffer, "owned by client    : %d\n", owner);
 770                snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
 771                snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
 772                snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
 773                snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
 774                snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
 775                snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
 776                snd_iprintf(buffer, "current BPM        : %d\n", bpm);
 777                snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
 778                snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
 779                snd_iprintf(buffer, "\n");
 780                queuefree(q);
 781        }
 782}
 783#endif /* CONFIG_SND_PROC_FS */
 784
 785