linux/sound/core/seq/seq_queue.c
<<
>>
Prefs
   1/*
   2 *   ALSA sequencer Timing queue handling
   3 *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
   4 *
   5 *   This program is free software; you can redistribute it and/or modify
   6 *   it under the terms of the GNU General Public License as published by
   7 *   the Free Software Foundation; either version 2 of the License, or
   8 *   (at your option) any later version.
   9 *
  10 *   This program is distributed in the hope that it will be useful,
  11 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *   GNU General Public License for more details.
  14 *
  15 *   You should have received a copy of the GNU General Public License
  16 *   along with this program; if not, write to the Free Software
  17 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 *
  19 * MAJOR CHANGES
  20 *   Nov. 13, 1999      Takashi Iwai <iwai@ww.uni-erlangen.de>
  21 *     - Queues are allocated dynamically via ioctl.
  22 *     - When owner client is deleted, all owned queues are deleted, too.
  23 *     - Owner of unlocked queue is kept unmodified even if it is
  24 *       manipulated by other clients.
  25 *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
  26 *       caller client.  i.e. Changing owner to a third client is not
  27 *       allowed.
  28 *
  29 *  Aug. 30, 2000       Takashi Iwai
  30 *     - Queues are managed in static array again, but with better way.
  31 *       The API itself is identical.
  32 *     - The queue is locked when struct snd_seq_queue pointer is returned via
  33 *       queueptr().  This pointer *MUST* be released afterward by
  34 *       queuefree(ptr).
  35 *     - Addition of experimental sync support.
  36 */
  37
  38#include <linux/init.h>
  39#include <linux/slab.h>
  40#include <sound/core.h>
  41
  42#include "seq_memory.h"
  43#include "seq_queue.h"
  44#include "seq_clientmgr.h"
  45#include "seq_fifo.h"
  46#include "seq_timer.h"
  47#include "seq_info.h"
  48
  49/* list of allocated queues */
  50static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
  51static DEFINE_SPINLOCK(queue_list_lock);
  52/* number of queues allocated */
  53static int num_queues;
  54
  55int snd_seq_queue_get_cur_queues(void)
  56{
  57        return num_queues;
  58}
  59
  60/*----------------------------------------------------------------*/
  61
  62/* assign queue id and insert to list */
  63static int queue_list_add(struct snd_seq_queue *q)
  64{
  65        int i;
  66        unsigned long flags;
  67
  68        spin_lock_irqsave(&queue_list_lock, flags);
  69        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
  70                if (! queue_list[i]) {
  71                        queue_list[i] = q;
  72                        q->queue = i;
  73                        num_queues++;
  74                        spin_unlock_irqrestore(&queue_list_lock, flags);
  75                        return i;
  76                }
  77        }
  78        spin_unlock_irqrestore(&queue_list_lock, flags);
  79        return -1;
  80}
  81
  82static struct snd_seq_queue *queue_list_remove(int id, int client)
  83{
  84        struct snd_seq_queue *q;
  85        unsigned long flags;
  86
  87        spin_lock_irqsave(&queue_list_lock, flags);
  88        q = queue_list[id];
  89        if (q) {
  90                spin_lock(&q->owner_lock);
  91                if (q->owner == client) {
  92                        /* found */
  93                        q->klocked = 1;
  94                        spin_unlock(&q->owner_lock);
  95                        queue_list[id] = NULL;
  96                        num_queues--;
  97                        spin_unlock_irqrestore(&queue_list_lock, flags);
  98                        return q;
  99                }
 100                spin_unlock(&q->owner_lock);
 101        }
 102        spin_unlock_irqrestore(&queue_list_lock, flags);
 103        return NULL;
 104}
 105
 106/*----------------------------------------------------------------*/
 107
 108/* create new queue (constructor) */
 109static struct snd_seq_queue *queue_new(int owner, int locked)
 110{
 111        struct snd_seq_queue *q;
 112
 113        q = kzalloc(sizeof(*q), GFP_KERNEL);
 114        if (q == NULL) {
 115                snd_printd("malloc failed for snd_seq_queue_new()\n");
 116                return NULL;
 117        }
 118
 119        spin_lock_init(&q->owner_lock);
 120        spin_lock_init(&q->check_lock);
 121        mutex_init(&q->timer_mutex);
 122        snd_use_lock_init(&q->use_lock);
 123        q->queue = -1;
 124
 125        q->tickq = snd_seq_prioq_new();
 126        q->timeq = snd_seq_prioq_new();
 127        q->timer = snd_seq_timer_new();
 128        if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
 129                snd_seq_prioq_delete(&q->tickq);
 130                snd_seq_prioq_delete(&q->timeq);
 131                snd_seq_timer_delete(&q->timer);
 132                kfree(q);
 133                return NULL;
 134        }
 135
 136        q->owner = owner;
 137        q->locked = locked;
 138        q->klocked = 0;
 139
 140        return q;
 141}
 142
 143/* delete queue (destructor) */
 144static void queue_delete(struct snd_seq_queue *q)
 145{
 146        /* stop and release the timer */
 147        snd_seq_timer_stop(q->timer);
 148        snd_seq_timer_close(q);
 149        /* wait until access free */
 150        snd_use_lock_sync(&q->use_lock);
 151        /* release resources... */
 152        snd_seq_prioq_delete(&q->tickq);
 153        snd_seq_prioq_delete(&q->timeq);
 154        snd_seq_timer_delete(&q->timer);
 155
 156        kfree(q);
 157}
 158
 159
 160/*----------------------------------------------------------------*/
 161
 162/* setup queues */
 163int __init snd_seq_queues_init(void)
 164{
 165        /*
 166        memset(queue_list, 0, sizeof(queue_list));
 167        num_queues = 0;
 168        */
 169        return 0;
 170}
 171
 172/* delete all existing queues */
 173void __exit snd_seq_queues_delete(void)
 174{
 175        int i;
 176
 177        /* clear list */
 178        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 179                if (queue_list[i])
 180                        queue_delete(queue_list[i]);
 181        }
 182}
 183
 184/* allocate a new queue -
 185 * return queue index value or negative value for error
 186 */
 187int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 188{
 189        struct snd_seq_queue *q;
 190
 191        q = queue_new(client, locked);
 192        if (q == NULL)
 193                return -ENOMEM;
 194        q->info_flags = info_flags;
 195        if (queue_list_add(q) < 0) {
 196                queue_delete(q);
 197                return -ENOMEM;
 198        }
 199        snd_seq_queue_use(q->queue, client, 1); /* use this queue */
 200        return q->queue;
 201}
 202
 203/* delete a queue - queue must be owned by the client */
 204int snd_seq_queue_delete(int client, int queueid)
 205{
 206        struct snd_seq_queue *q;
 207
 208        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 209                return -EINVAL;
 210        q = queue_list_remove(queueid, client);
 211        if (q == NULL)
 212                return -EINVAL;
 213        queue_delete(q);
 214
 215        return 0;
 216}
 217
 218
 219/* return pointer to queue structure for specified id */
 220struct snd_seq_queue *queueptr(int queueid)
 221{
 222        struct snd_seq_queue *q;
 223        unsigned long flags;
 224
 225        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 226                return NULL;
 227        spin_lock_irqsave(&queue_list_lock, flags);
 228        q = queue_list[queueid];
 229        if (q)
 230                snd_use_lock_use(&q->use_lock);
 231        spin_unlock_irqrestore(&queue_list_lock, flags);
 232        return q;
 233}
 234
 235/* return the (first) queue matching with the specified name */
 236struct snd_seq_queue *snd_seq_queue_find_name(char *name)
 237{
 238        int i;
 239        struct snd_seq_queue *q;
 240
 241        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 242                if ((q = queueptr(i)) != NULL) {
 243                        if (strncmp(q->name, name, sizeof(q->name)) == 0)
 244                                return q;
 245                        queuefree(q);
 246                }
 247        }
 248        return NULL;
 249}
 250
 251
 252/* -------------------------------------------------------- */
 253
 254void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
 255{
 256        unsigned long flags;
 257        struct snd_seq_event_cell *cell;
 258
 259        if (q == NULL)
 260                return;
 261
 262        /* make this function non-reentrant */
 263        spin_lock_irqsave(&q->check_lock, flags);
 264        if (q->check_blocked) {
 265                q->check_again = 1;
 266                spin_unlock_irqrestore(&q->check_lock, flags);
 267                return;         /* other thread is already checking queues */
 268        }
 269        q->check_blocked = 1;
 270        spin_unlock_irqrestore(&q->check_lock, flags);
 271
 272      __again:
 273        /* Process tick queue... */
 274        while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
 275                if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
 276                                              &cell->event.time.tick)) {
 277                        cell = snd_seq_prioq_cell_out(q->tickq);
 278                        if (cell)
 279                                snd_seq_dispatch_event(cell, atomic, hop);
 280                } else {
 281                        /* event remains in the queue */
 282                        break;
 283                }
 284        }
 285
 286
 287        /* Process time queue... */
 288        while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
 289                if (snd_seq_compare_real_time(&q->timer->cur_time,
 290                                              &cell->event.time.time)) {
 291                        cell = snd_seq_prioq_cell_out(q->timeq);
 292                        if (cell)
 293                                snd_seq_dispatch_event(cell, atomic, hop);
 294                } else {
 295                        /* event remains in the queue */
 296                        break;
 297                }
 298        }
 299
 300        /* free lock */
 301        spin_lock_irqsave(&q->check_lock, flags);
 302        if (q->check_again) {
 303                q->check_again = 0;
 304                spin_unlock_irqrestore(&q->check_lock, flags);
 305                goto __again;
 306        }
 307        q->check_blocked = 0;
 308        spin_unlock_irqrestore(&q->check_lock, flags);
 309}
 310
 311
 312/* enqueue a event to singe queue */
 313int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 314{
 315        int dest, err;
 316        struct snd_seq_queue *q;
 317
 318        if (snd_BUG_ON(!cell))
 319                return -EINVAL;
 320        dest = cell->event.queue;       /* destination queue */
 321        q = queueptr(dest);
 322        if (q == NULL)
 323                return -EINVAL;
 324        /* handle relative time stamps, convert them into absolute */
 325        if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
 326                switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 327                case SNDRV_SEQ_TIME_STAMP_TICK:
 328                        cell->event.time.tick += q->timer->tick.cur_tick;
 329                        break;
 330
 331                case SNDRV_SEQ_TIME_STAMP_REAL:
 332                        snd_seq_inc_real_time(&cell->event.time.time,
 333                                              &q->timer->cur_time);
 334                        break;
 335                }
 336                cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
 337                cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
 338        }
 339        /* enqueue event in the real-time or midi queue */
 340        switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 341        case SNDRV_SEQ_TIME_STAMP_TICK:
 342                err = snd_seq_prioq_cell_in(q->tickq, cell);
 343                break;
 344
 345        case SNDRV_SEQ_TIME_STAMP_REAL:
 346        default:
 347                err = snd_seq_prioq_cell_in(q->timeq, cell);
 348                break;
 349        }
 350
 351        if (err < 0) {
 352                queuefree(q); /* unlock */
 353                return err;
 354        }
 355
 356        /* trigger dispatching */
 357        snd_seq_check_queue(q, atomic, hop);
 358
 359        queuefree(q); /* unlock */
 360
 361        return 0;
 362}
 363
 364
 365/*----------------------------------------------------------------*/
 366
 367static inline int check_access(struct snd_seq_queue *q, int client)
 368{
 369        return (q->owner == client) || (!q->locked && !q->klocked);
 370}
 371
 372/* check if the client has permission to modify queue parameters.
 373 * if it does, lock the queue
 374 */
 375static int queue_access_lock(struct snd_seq_queue *q, int client)
 376{
 377        unsigned long flags;
 378        int access_ok;
 379        
 380        spin_lock_irqsave(&q->owner_lock, flags);
 381        access_ok = check_access(q, client);
 382        if (access_ok)
 383                q->klocked = 1;
 384        spin_unlock_irqrestore(&q->owner_lock, flags);
 385        return access_ok;
 386}
 387
 388/* unlock the queue */
 389static inline void queue_access_unlock(struct snd_seq_queue *q)
 390{
 391        unsigned long flags;
 392
 393        spin_lock_irqsave(&q->owner_lock, flags);
 394        q->klocked = 0;
 395        spin_unlock_irqrestore(&q->owner_lock, flags);
 396}
 397
 398/* exported - only checking permission */
 399int snd_seq_queue_check_access(int queueid, int client)
 400{
 401        struct snd_seq_queue *q = queueptr(queueid);
 402        int access_ok;
 403        unsigned long flags;
 404
 405        if (! q)
 406                return 0;
 407        spin_lock_irqsave(&q->owner_lock, flags);
 408        access_ok = check_access(q, client);
 409        spin_unlock_irqrestore(&q->owner_lock, flags);
 410        queuefree(q);
 411        return access_ok;
 412}
 413
 414/*----------------------------------------------------------------*/
 415
 416/*
 417 * change queue's owner and permission
 418 */
 419int snd_seq_queue_set_owner(int queueid, int client, int locked)
 420{
 421        struct snd_seq_queue *q = queueptr(queueid);
 422
 423        if (q == NULL)
 424                return -EINVAL;
 425
 426        if (! queue_access_lock(q, client)) {
 427                queuefree(q);
 428                return -EPERM;
 429        }
 430
 431        q->locked = locked ? 1 : 0;
 432        q->owner = client;
 433        queue_access_unlock(q);
 434        queuefree(q);
 435
 436        return 0;
 437}
 438
 439
 440/*----------------------------------------------------------------*/
 441
 442/* open timer -
 443 * q->use mutex should be down before calling this function to avoid
 444 * confliction with snd_seq_queue_use()
 445 */
 446int snd_seq_queue_timer_open(int queueid)
 447{
 448        int result = 0;
 449        struct snd_seq_queue *queue;
 450        struct snd_seq_timer *tmr;
 451
 452        queue = queueptr(queueid);
 453        if (queue == NULL)
 454                return -EINVAL;
 455        tmr = queue->timer;
 456        if ((result = snd_seq_timer_open(queue)) < 0) {
 457                snd_seq_timer_defaults(tmr);
 458                result = snd_seq_timer_open(queue);
 459        }
 460        queuefree(queue);
 461        return result;
 462}
 463
 464/* close timer -
 465 * q->use mutex should be down before calling this function
 466 */
 467int snd_seq_queue_timer_close(int queueid)
 468{
 469        struct snd_seq_queue *queue;
 470        int result = 0;
 471
 472        queue = queueptr(queueid);
 473        if (queue == NULL)
 474                return -EINVAL;
 475        snd_seq_timer_close(queue);
 476        queuefree(queue);
 477        return result;
 478}
 479
 480/* change queue tempo and ppq */
 481int snd_seq_queue_timer_set_tempo(int queueid, int client,
 482                                  struct snd_seq_queue_tempo *info)
 483{
 484        struct snd_seq_queue *q = queueptr(queueid);
 485        int result;
 486
 487        if (q == NULL)
 488                return -EINVAL;
 489        if (! queue_access_lock(q, client)) {
 490                queuefree(q);
 491                return -EPERM;
 492        }
 493
 494        result = snd_seq_timer_set_tempo(q->timer, info->tempo);
 495        if (result >= 0)
 496                result = snd_seq_timer_set_ppq(q->timer, info->ppq);
 497        if (result >= 0 && info->skew_base > 0)
 498                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
 499                                                info->skew_base);
 500        queue_access_unlock(q);
 501        queuefree(q);
 502        return result;
 503}
 504
 505
 506/* use or unuse this queue -
 507 * if it is the first client, starts the timer.
 508 * if it is not longer used by any clients, stop the timer.
 509 */
 510int snd_seq_queue_use(int queueid, int client, int use)
 511{
 512        struct snd_seq_queue *queue;
 513
 514        queue = queueptr(queueid);
 515        if (queue == NULL)
 516                return -EINVAL;
 517        mutex_lock(&queue->timer_mutex);
 518        if (use) {
 519                if (!test_and_set_bit(client, queue->clients_bitmap))
 520                        queue->clients++;
 521        } else {
 522                if (test_and_clear_bit(client, queue->clients_bitmap))
 523                        queue->clients--;
 524        }
 525        if (queue->clients) {
 526                if (use && queue->clients == 1)
 527                        snd_seq_timer_defaults(queue->timer);
 528                snd_seq_timer_open(queue);
 529        } else {
 530                snd_seq_timer_close(queue);
 531        }
 532        mutex_unlock(&queue->timer_mutex);
 533        queuefree(queue);
 534        return 0;
 535}
 536
 537/*
 538 * check if queue is used by the client
 539 * return negative value if the queue is invalid.
 540 * return 0 if not used, 1 if used.
 541 */
 542int snd_seq_queue_is_used(int queueid, int client)
 543{
 544        struct snd_seq_queue *q;
 545        int result;
 546
 547        q = queueptr(queueid);
 548        if (q == NULL)
 549                return -EINVAL; /* invalid queue */
 550        result = test_bit(client, q->clients_bitmap) ? 1 : 0;
 551        queuefree(q);
 552        return result;
 553}
 554
 555
 556/*----------------------------------------------------------------*/
 557
 558/* notification that client has left the system -
 559 * stop the timer on all queues owned by this client
 560 */
 561void snd_seq_queue_client_termination(int client)
 562{
 563        unsigned long flags;
 564        int i;
 565        struct snd_seq_queue *q;
 566
 567        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 568                if ((q = queueptr(i)) == NULL)
 569                        continue;
 570                spin_lock_irqsave(&q->owner_lock, flags);
 571                if (q->owner == client)
 572                        q->klocked = 1;
 573                spin_unlock_irqrestore(&q->owner_lock, flags);
 574                if (q->owner == client) {
 575                        if (q->timer->running)
 576                                snd_seq_timer_stop(q->timer);
 577                        snd_seq_timer_reset(q->timer);
 578                }
 579                queuefree(q);
 580        }
 581}
 582
 583/* final stage notification -
 584 * remove cells for no longer exist client (for non-owned queue)
 585 * or delete this queue (for owned queue)
 586 */
 587void snd_seq_queue_client_leave(int client)
 588{
 589        int i;
 590        struct snd_seq_queue *q;
 591
 592        /* delete own queues from queue list */
 593        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 594                if ((q = queue_list_remove(i, client)) != NULL)
 595                        queue_delete(q);
 596        }
 597
 598        /* remove cells from existing queues -
 599         * they are not owned by this client
 600         */
 601        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 602                if ((q = queueptr(i)) == NULL)
 603                        continue;
 604                if (test_bit(client, q->clients_bitmap)) {
 605                        snd_seq_prioq_leave(q->tickq, client, 0);
 606                        snd_seq_prioq_leave(q->timeq, client, 0);
 607                        snd_seq_queue_use(q->queue, client, 0);
 608                }
 609                queuefree(q);
 610        }
 611}
 612
 613
 614
 615/*----------------------------------------------------------------*/
 616
 617/* remove cells from all queues */
 618void snd_seq_queue_client_leave_cells(int client)
 619{
 620        int i;
 621        struct snd_seq_queue *q;
 622
 623        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 624                if ((q = queueptr(i)) == NULL)
 625                        continue;
 626                snd_seq_prioq_leave(q->tickq, client, 0);
 627                snd_seq_prioq_leave(q->timeq, client, 0);
 628                queuefree(q);
 629        }
 630}
 631
 632/* remove cells based on flush criteria */
 633void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
 634{
 635        int i;
 636        struct snd_seq_queue *q;
 637
 638        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 639                if ((q = queueptr(i)) == NULL)
 640                        continue;
 641                if (test_bit(client, q->clients_bitmap) &&
 642                    (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
 643                     q->queue == info->queue)) {
 644                        snd_seq_prioq_remove_events(q->tickq, client, info);
 645                        snd_seq_prioq_remove_events(q->timeq, client, info);
 646                }
 647                queuefree(q);
 648        }
 649}
 650
 651/*----------------------------------------------------------------*/
 652
 653/*
 654 * send events to all subscribed ports
 655 */
 656static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
 657                                  int atomic, int hop)
 658{
 659        struct snd_seq_event sev;
 660
 661        sev = *ev;
 662        
 663        sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
 664        sev.time.tick = q->timer->tick.cur_tick;
 665        sev.queue = q->queue;
 666        sev.data.queue.queue = q->queue;
 667
 668        /* broadcast events from Timer port */
 669        sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
 670        sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
 671        sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
 672        snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
 673}
 674
 675/*
 676 * process a received queue-control event.
 677 * this function is exported for seq_sync.c.
 678 */
 679static void snd_seq_queue_process_event(struct snd_seq_queue *q,
 680                                        struct snd_seq_event *ev,
 681                                        int atomic, int hop)
 682{
 683        switch (ev->type) {
 684        case SNDRV_SEQ_EVENT_START:
 685                snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
 686                snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
 687                if (! snd_seq_timer_start(q->timer))
 688                        queue_broadcast_event(q, ev, atomic, hop);
 689                break;
 690
 691        case SNDRV_SEQ_EVENT_CONTINUE:
 692                if (! snd_seq_timer_continue(q->timer))
 693                        queue_broadcast_event(q, ev, atomic, hop);
 694                break;
 695
 696        case SNDRV_SEQ_EVENT_STOP:
 697                snd_seq_timer_stop(q->timer);
 698                queue_broadcast_event(q, ev, atomic, hop);
 699                break;
 700
 701        case SNDRV_SEQ_EVENT_TEMPO:
 702                snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
 703                queue_broadcast_event(q, ev, atomic, hop);
 704                break;
 705
 706        case SNDRV_SEQ_EVENT_SETPOS_TICK:
 707                if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
 708                        queue_broadcast_event(q, ev, atomic, hop);
 709                }
 710                break;
 711
 712        case SNDRV_SEQ_EVENT_SETPOS_TIME:
 713                if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
 714                        queue_broadcast_event(q, ev, atomic, hop);
 715                }
 716                break;
 717        case SNDRV_SEQ_EVENT_QUEUE_SKEW:
 718                if (snd_seq_timer_set_skew(q->timer,
 719                                           ev->data.queue.param.skew.value,
 720                                           ev->data.queue.param.skew.base) == 0) {
 721                        queue_broadcast_event(q, ev, atomic, hop);
 722                }
 723                break;
 724        }
 725}
 726
 727
 728/*
 729 * Queue control via timer control port:
 730 * this function is exported as a callback of timer port.
 731 */
 732int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
 733{
 734        struct snd_seq_queue *q;
 735
 736        if (snd_BUG_ON(!ev))
 737                return -EINVAL;
 738        q = queueptr(ev->data.queue.queue);
 739
 740        if (q == NULL)
 741                return -EINVAL;
 742
 743        if (! queue_access_lock(q, ev->source.client)) {
 744                queuefree(q);
 745                return -EPERM;
 746        }
 747
 748        snd_seq_queue_process_event(q, ev, atomic, hop);
 749
 750        queue_access_unlock(q);
 751        queuefree(q);
 752        return 0;
 753}
 754
 755
 756/*----------------------------------------------------------------*/
 757
 758#ifdef CONFIG_PROC_FS
 759/* exported to seq_info.c */
 760void snd_seq_info_queues_read(struct snd_info_entry *entry, 
 761                              struct snd_info_buffer *buffer)
 762{
 763        int i, bpm;
 764        struct snd_seq_queue *q;
 765        struct snd_seq_timer *tmr;
 766
 767        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 768                if ((q = queueptr(i)) == NULL)
 769                        continue;
 770
 771                tmr = q->timer;
 772                if (tmr->tempo)
 773                        bpm = 60000000 / tmr->tempo;
 774                else
 775                        bpm = 0;
 776
 777                snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
 778                snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
 779                snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
 780                snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
 781                snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
 782                snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
 783                snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
 784                snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
 785                snd_iprintf(buffer, "current BPM        : %d\n", bpm);
 786                snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
 787                snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
 788                snd_iprintf(buffer, "\n");
 789                queuefree(q);
 790        }
 791}
 792#endif /* CONFIG_PROC_FS */
 793
 794