linux/sound/core/seq/seq_queue.c
<<
>>
Prefs
   1/*
   2 *   ALSA sequencer Timing queue handling
   3 *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
   4 *
   5 *   This program is free software; you can redistribute it and/or modify
   6 *   it under the terms of the GNU General Public License as published by
   7 *   the Free Software Foundation; either version 2 of the License, or
   8 *   (at your option) any later version.
   9 *
  10 *   This program is distributed in the hope that it will be useful,
  11 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *   GNU General Public License for more details.
  14 *
  15 *   You should have received a copy of the GNU General Public License
  16 *   along with this program; if not, write to the Free Software
  17 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 *
  19 * MAJOR CHANGES
  20 *   Nov. 13, 1999      Takashi Iwai <iwai@ww.uni-erlangen.de>
  21 *     - Queues are allocated dynamically via ioctl.
  22 *     - When owner client is deleted, all owned queues are deleted, too.
  23 *     - Owner of unlocked queue is kept unmodified even if it is
  24 *       manipulated by other clients.
  25 *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
  26 *       caller client.  i.e. Changing owner to a third client is not
  27 *       allowed.
  28 *
  29 *  Aug. 30, 2000       Takashi Iwai
  30 *     - Queues are managed in static array again, but with better way.
  31 *       The API itself is identical.
  32 *     - The queue is locked when struct snd_seq_queue pointer is returned via
  33 *       queueptr().  This pointer *MUST* be released afterward by
  34 *       queuefree(ptr).
  35 *     - Addition of experimental sync support.
  36 */
  37
  38#include <sound/driver.h>
  39#include <linux/init.h>
  40#include <linux/slab.h>
  41#include <sound/core.h>
  42
  43#include "seq_memory.h"
  44#include "seq_queue.h"
  45#include "seq_clientmgr.h"
  46#include "seq_fifo.h"
  47#include "seq_timer.h"
  48#include "seq_info.h"
  49
  50/* list of allocated queues */
  51static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
  52static DEFINE_SPINLOCK(queue_list_lock);
  53/* number of queues allocated */
  54static int num_queues;
  55
  56int snd_seq_queue_get_cur_queues(void)
  57{
  58        return num_queues;
  59}
  60
  61/*----------------------------------------------------------------*/
  62
  63/* assign queue id and insert to list */
  64static int queue_list_add(struct snd_seq_queue *q)
  65{
  66        int i;
  67        unsigned long flags;
  68
  69        spin_lock_irqsave(&queue_list_lock, flags);
  70        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
  71                if (! queue_list[i]) {
  72                        queue_list[i] = q;
  73                        q->queue = i;
  74                        num_queues++;
  75                        spin_unlock_irqrestore(&queue_list_lock, flags);
  76                        return i;
  77                }
  78        }
  79        spin_unlock_irqrestore(&queue_list_lock, flags);
  80        return -1;
  81}
  82
  83static struct snd_seq_queue *queue_list_remove(int id, int client)
  84{
  85        struct snd_seq_queue *q;
  86        unsigned long flags;
  87
  88        spin_lock_irqsave(&queue_list_lock, flags);
  89        q = queue_list[id];
  90        if (q) {
  91                spin_lock(&q->owner_lock);
  92                if (q->owner == client) {
  93                        /* found */
  94                        q->klocked = 1;
  95                        spin_unlock(&q->owner_lock);
  96                        queue_list[id] = NULL;
  97                        num_queues--;
  98                        spin_unlock_irqrestore(&queue_list_lock, flags);
  99                        return q;
 100                }
 101                spin_unlock(&q->owner_lock);
 102        }
 103        spin_unlock_irqrestore(&queue_list_lock, flags);
 104        return NULL;
 105}
 106
 107/*----------------------------------------------------------------*/
 108
 109/* create new queue (constructor) */
 110static struct snd_seq_queue *queue_new(int owner, int locked)
 111{
 112        struct snd_seq_queue *q;
 113
 114        q = kzalloc(sizeof(*q), GFP_KERNEL);
 115        if (q == NULL) {
 116                snd_printd("malloc failed for snd_seq_queue_new()\n");
 117                return NULL;
 118        }
 119
 120        spin_lock_init(&q->owner_lock);
 121        spin_lock_init(&q->check_lock);
 122        mutex_init(&q->timer_mutex);
 123        snd_use_lock_init(&q->use_lock);
 124        q->queue = -1;
 125
 126        q->tickq = snd_seq_prioq_new();
 127        q->timeq = snd_seq_prioq_new();
 128        q->timer = snd_seq_timer_new();
 129        if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
 130                snd_seq_prioq_delete(&q->tickq);
 131                snd_seq_prioq_delete(&q->timeq);
 132                snd_seq_timer_delete(&q->timer);
 133                kfree(q);
 134                return NULL;
 135        }
 136
 137        q->owner = owner;
 138        q->locked = locked;
 139        q->klocked = 0;
 140
 141        return q;
 142}
 143
 144/* delete queue (destructor) */
 145static void queue_delete(struct snd_seq_queue *q)
 146{
 147        /* stop and release the timer */
 148        snd_seq_timer_stop(q->timer);
 149        snd_seq_timer_close(q);
 150        /* wait until access free */
 151        snd_use_lock_sync(&q->use_lock);
 152        /* release resources... */
 153        snd_seq_prioq_delete(&q->tickq);
 154        snd_seq_prioq_delete(&q->timeq);
 155        snd_seq_timer_delete(&q->timer);
 156
 157        kfree(q);
 158}
 159
 160
 161/*----------------------------------------------------------------*/
 162
 163/* setup queues */
 164int __init snd_seq_queues_init(void)
 165{
 166        /*
 167        memset(queue_list, 0, sizeof(queue_list));
 168        num_queues = 0;
 169        */
 170        return 0;
 171}
 172
 173/* delete all existing queues */
 174void __exit snd_seq_queues_delete(void)
 175{
 176        int i;
 177
 178        /* clear list */
 179        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 180                if (queue_list[i])
 181                        queue_delete(queue_list[i]);
 182        }
 183}
 184
 185/* allocate a new queue -
 186 * return queue index value or negative value for error
 187 */
 188int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 189{
 190        struct snd_seq_queue *q;
 191
 192        q = queue_new(client, locked);
 193        if (q == NULL)
 194                return -ENOMEM;
 195        q->info_flags = info_flags;
 196        if (queue_list_add(q) < 0) {
 197                queue_delete(q);
 198                return -ENOMEM;
 199        }
 200        snd_seq_queue_use(q->queue, client, 1); /* use this queue */
 201        return q->queue;
 202}
 203
 204/* delete a queue - queue must be owned by the client */
 205int snd_seq_queue_delete(int client, int queueid)
 206{
 207        struct snd_seq_queue *q;
 208
 209        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 210                return -EINVAL;
 211        q = queue_list_remove(queueid, client);
 212        if (q == NULL)
 213                return -EINVAL;
 214        queue_delete(q);
 215
 216        return 0;
 217}
 218
 219
 220/* return pointer to queue structure for specified id */
 221struct snd_seq_queue *queueptr(int queueid)
 222{
 223        struct snd_seq_queue *q;
 224        unsigned long flags;
 225
 226        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 227                return NULL;
 228        spin_lock_irqsave(&queue_list_lock, flags);
 229        q = queue_list[queueid];
 230        if (q)
 231                snd_use_lock_use(&q->use_lock);
 232        spin_unlock_irqrestore(&queue_list_lock, flags);
 233        return q;
 234}
 235
 236/* return the (first) queue matching with the specified name */
 237struct snd_seq_queue *snd_seq_queue_find_name(char *name)
 238{
 239        int i;
 240        struct snd_seq_queue *q;
 241
 242        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 243                if ((q = queueptr(i)) != NULL) {
 244                        if (strncmp(q->name, name, sizeof(q->name)) == 0)
 245                                return q;
 246                        queuefree(q);
 247                }
 248        }
 249        return NULL;
 250}
 251
 252
 253/* -------------------------------------------------------- */
 254
 255void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
 256{
 257        unsigned long flags;
 258        struct snd_seq_event_cell *cell;
 259
 260        if (q == NULL)
 261                return;
 262
 263        /* make this function non-reentrant */
 264        spin_lock_irqsave(&q->check_lock, flags);
 265        if (q->check_blocked) {
 266                q->check_again = 1;
 267                spin_unlock_irqrestore(&q->check_lock, flags);
 268                return;         /* other thread is already checking queues */
 269        }
 270        q->check_blocked = 1;
 271        spin_unlock_irqrestore(&q->check_lock, flags);
 272
 273      __again:
 274        /* Process tick queue... */
 275        while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
 276                if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
 277                                              &cell->event.time.tick)) {
 278                        cell = snd_seq_prioq_cell_out(q->tickq);
 279                        if (cell)
 280                                snd_seq_dispatch_event(cell, atomic, hop);
 281                } else {
 282                        /* event remains in the queue */
 283                        break;
 284                }
 285        }
 286
 287
 288        /* Process time queue... */
 289        while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
 290                if (snd_seq_compare_real_time(&q->timer->cur_time,
 291                                              &cell->event.time.time)) {
 292                        cell = snd_seq_prioq_cell_out(q->timeq);
 293                        if (cell)
 294                                snd_seq_dispatch_event(cell, atomic, hop);
 295                } else {
 296                        /* event remains in the queue */
 297                        break;
 298                }
 299        }
 300
 301        /* free lock */
 302        spin_lock_irqsave(&q->check_lock, flags);
 303        if (q->check_again) {
 304                q->check_again = 0;
 305                spin_unlock_irqrestore(&q->check_lock, flags);
 306                goto __again;
 307        }
 308        q->check_blocked = 0;
 309        spin_unlock_irqrestore(&q->check_lock, flags);
 310}
 311
 312
 313/* enqueue a event to singe queue */
 314int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 315{
 316        int dest, err;
 317        struct snd_seq_queue *q;
 318
 319        snd_assert(cell != NULL, return -EINVAL);
 320        dest = cell->event.queue;       /* destination queue */
 321        q = queueptr(dest);
 322        if (q == NULL)
 323                return -EINVAL;
 324        /* handle relative time stamps, convert them into absolute */
 325        if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
 326                switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 327                case SNDRV_SEQ_TIME_STAMP_TICK:
 328                        cell->event.time.tick += q->timer->tick.cur_tick;
 329                        break;
 330
 331                case SNDRV_SEQ_TIME_STAMP_REAL:
 332                        snd_seq_inc_real_time(&cell->event.time.time,
 333                                              &q->timer->cur_time);
 334                        break;
 335                }
 336                cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
 337                cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
 338        }
 339        /* enqueue event in the real-time or midi queue */
 340        switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 341        case SNDRV_SEQ_TIME_STAMP_TICK:
 342                err = snd_seq_prioq_cell_in(q->tickq, cell);
 343                break;
 344
 345        case SNDRV_SEQ_TIME_STAMP_REAL:
 346        default:
 347                err = snd_seq_prioq_cell_in(q->timeq, cell);
 348                break;
 349        }
 350
 351        if (err < 0) {
 352                queuefree(q); /* unlock */
 353                return err;
 354        }
 355
 356        /* trigger dispatching */
 357        snd_seq_check_queue(q, atomic, hop);
 358
 359        queuefree(q); /* unlock */
 360
 361        return 0;
 362}
 363
 364
 365/*----------------------------------------------------------------*/
 366
 367static inline int check_access(struct snd_seq_queue *q, int client)
 368{
 369        return (q->owner == client) || (!q->locked && !q->klocked);
 370}
 371
 372/* check if the client has permission to modify queue parameters.
 373 * if it does, lock the queue
 374 */
 375static int queue_access_lock(struct snd_seq_queue *q, int client)
 376{
 377        unsigned long flags;
 378        int access_ok;
 379        
 380        spin_lock_irqsave(&q->owner_lock, flags);
 381        access_ok = check_access(q, client);
 382        if (access_ok)
 383                q->klocked = 1;
 384        spin_unlock_irqrestore(&q->owner_lock, flags);
 385        return access_ok;
 386}
 387
 388/* unlock the queue */
 389static inline void queue_access_unlock(struct snd_seq_queue *q)
 390{
 391        unsigned long flags;
 392
 393        spin_lock_irqsave(&q->owner_lock, flags);
 394        q->klocked = 0;
 395        spin_unlock_irqrestore(&q->owner_lock, flags);
 396}
 397
 398/* exported - only checking permission */
 399int snd_seq_queue_check_access(int queueid, int client)
 400{
 401        struct snd_seq_queue *q = queueptr(queueid);
 402        int access_ok;
 403        unsigned long flags;
 404
 405        if (! q)
 406                return 0;
 407        spin_lock_irqsave(&q->owner_lock, flags);
 408        access_ok = check_access(q, client);
 409        spin_unlock_irqrestore(&q->owner_lock, flags);
 410        queuefree(q);
 411        return access_ok;
 412}
 413
 414/*----------------------------------------------------------------*/
 415
 416/*
 417 * change queue's owner and permission
 418 */
 419int snd_seq_queue_set_owner(int queueid, int client, int locked)
 420{
 421        struct snd_seq_queue *q = queueptr(queueid);
 422
 423        if (q == NULL)
 424                return -EINVAL;
 425
 426        if (! queue_access_lock(q, client)) {
 427                queuefree(q);
 428                return -EPERM;
 429        }
 430
 431        q->locked = locked ? 1 : 0;
 432        q->owner = client;
 433        queue_access_unlock(q);
 434        queuefree(q);
 435
 436        return 0;
 437}
 438
 439
 440/*----------------------------------------------------------------*/
 441
 442/* open timer -
 443 * q->use mutex should be down before calling this function to avoid
 444 * confliction with snd_seq_queue_use()
 445 */
 446int snd_seq_queue_timer_open(int queueid)
 447{
 448        int result = 0;
 449        struct snd_seq_queue *queue;
 450        struct snd_seq_timer *tmr;
 451
 452        queue = queueptr(queueid);
 453        if (queue == NULL)
 454                return -EINVAL;
 455        tmr = queue->timer;
 456        if ((result = snd_seq_timer_open(queue)) < 0) {
 457                snd_seq_timer_defaults(tmr);
 458                result = snd_seq_timer_open(queue);
 459        }
 460        queuefree(queue);
 461        return result;
 462}
 463
 464/* close timer -
 465 * q->use mutex should be down before calling this function
 466 */
 467int snd_seq_queue_timer_close(int queueid)
 468{
 469        struct snd_seq_queue *queue;
 470        struct snd_seq_timer *tmr;
 471        int result = 0;
 472
 473        queue = queueptr(queueid);
 474        if (queue == NULL)
 475                return -EINVAL;
 476        tmr = queue->timer;
 477        snd_seq_timer_close(queue);
 478        queuefree(queue);
 479        return result;
 480}
 481
 482/* change queue tempo and ppq */
 483int snd_seq_queue_timer_set_tempo(int queueid, int client,
 484                                  struct snd_seq_queue_tempo *info)
 485{
 486        struct snd_seq_queue *q = queueptr(queueid);
 487        int result;
 488
 489        if (q == NULL)
 490                return -EINVAL;
 491        if (! queue_access_lock(q, client)) {
 492                queuefree(q);
 493                return -EPERM;
 494        }
 495
 496        result = snd_seq_timer_set_tempo(q->timer, info->tempo);
 497        if (result >= 0)
 498                result = snd_seq_timer_set_ppq(q->timer, info->ppq);
 499        if (result >= 0 && info->skew_base > 0)
 500                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
 501                                                info->skew_base);
 502        queue_access_unlock(q);
 503        queuefree(q);
 504        return result;
 505}
 506
 507
 508/* use or unuse this queue -
 509 * if it is the first client, starts the timer.
 510 * if it is not longer used by any clients, stop the timer.
 511 */
 512int snd_seq_queue_use(int queueid, int client, int use)
 513{
 514        struct snd_seq_queue *queue;
 515
 516        queue = queueptr(queueid);
 517        if (queue == NULL)
 518                return -EINVAL;
 519        mutex_lock(&queue->timer_mutex);
 520        if (use) {
 521                if (!test_and_set_bit(client, queue->clients_bitmap))
 522                        queue->clients++;
 523        } else {
 524                if (test_and_clear_bit(client, queue->clients_bitmap))
 525                        queue->clients--;
 526        }
 527        if (queue->clients) {
 528                if (use && queue->clients == 1)
 529                        snd_seq_timer_defaults(queue->timer);
 530                snd_seq_timer_open(queue);
 531        } else {
 532                snd_seq_timer_close(queue);
 533        }
 534        mutex_unlock(&queue->timer_mutex);
 535        queuefree(queue);
 536        return 0;
 537}
 538
 539/*
 540 * check if queue is used by the client
 541 * return negative value if the queue is invalid.
 542 * return 0 if not used, 1 if used.
 543 */
 544int snd_seq_queue_is_used(int queueid, int client)
 545{
 546        struct snd_seq_queue *q;
 547        int result;
 548
 549        q = queueptr(queueid);
 550        if (q == NULL)
 551                return -EINVAL; /* invalid queue */
 552        result = test_bit(client, q->clients_bitmap) ? 1 : 0;
 553        queuefree(q);
 554        return result;
 555}
 556
 557
 558/*----------------------------------------------------------------*/
 559
 560/* notification that client has left the system -
 561 * stop the timer on all queues owned by this client
 562 */
 563void snd_seq_queue_client_termination(int client)
 564{
 565        unsigned long flags;
 566        int i;
 567        struct snd_seq_queue *q;
 568
 569        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 570                if ((q = queueptr(i)) == NULL)
 571                        continue;
 572                spin_lock_irqsave(&q->owner_lock, flags);
 573                if (q->owner == client)
 574                        q->klocked = 1;
 575                spin_unlock_irqrestore(&q->owner_lock, flags);
 576                if (q->owner == client) {
 577                        if (q->timer->running)
 578                                snd_seq_timer_stop(q->timer);
 579                        snd_seq_timer_reset(q->timer);
 580                }
 581                queuefree(q);
 582        }
 583}
 584
 585/* final stage notification -
 586 * remove cells for no longer exist client (for non-owned queue)
 587 * or delete this queue (for owned queue)
 588 */
 589void snd_seq_queue_client_leave(int client)
 590{
 591        int i;
 592        struct snd_seq_queue *q;
 593
 594        /* delete own queues from queue list */
 595        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 596                if ((q = queue_list_remove(i, client)) != NULL)
 597                        queue_delete(q);
 598        }
 599
 600        /* remove cells from existing queues -
 601         * they are not owned by this client
 602         */
 603        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 604                if ((q = queueptr(i)) == NULL)
 605                        continue;
 606                if (test_bit(client, q->clients_bitmap)) {
 607                        snd_seq_prioq_leave(q->tickq, client, 0);
 608                        snd_seq_prioq_leave(q->timeq, client, 0);
 609                        snd_seq_queue_use(q->queue, client, 0);
 610                }
 611                queuefree(q);
 612        }
 613}
 614
 615
 616
 617/*----------------------------------------------------------------*/
 618
 619/* remove cells from all queues */
 620void snd_seq_queue_client_leave_cells(int client)
 621{
 622        int i;
 623        struct snd_seq_queue *q;
 624
 625        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 626                if ((q = queueptr(i)) == NULL)
 627                        continue;
 628                snd_seq_prioq_leave(q->tickq, client, 0);
 629                snd_seq_prioq_leave(q->timeq, client, 0);
 630                queuefree(q);
 631        }
 632}
 633
 634/* remove cells based on flush criteria */
 635void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
 636{
 637        int i;
 638        struct snd_seq_queue *q;
 639
 640        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 641                if ((q = queueptr(i)) == NULL)
 642                        continue;
 643                if (test_bit(client, q->clients_bitmap) &&
 644                    (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
 645                     q->queue == info->queue)) {
 646                        snd_seq_prioq_remove_events(q->tickq, client, info);
 647                        snd_seq_prioq_remove_events(q->timeq, client, info);
 648                }
 649                queuefree(q);
 650        }
 651}
 652
 653/*----------------------------------------------------------------*/
 654
 655/*
 656 * send events to all subscribed ports
 657 */
 658static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
 659                                  int atomic, int hop)
 660{
 661        struct snd_seq_event sev;
 662
 663        sev = *ev;
 664        
 665        sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
 666        sev.time.tick = q->timer->tick.cur_tick;
 667        sev.queue = q->queue;
 668        sev.data.queue.queue = q->queue;
 669
 670        /* broadcast events from Timer port */
 671        sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
 672        sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
 673        sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
 674        snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
 675}
 676
 677/*
 678 * process a received queue-control event.
 679 * this function is exported for seq_sync.c.
 680 */
 681static void snd_seq_queue_process_event(struct snd_seq_queue *q,
 682                                        struct snd_seq_event *ev,
 683                                        int atomic, int hop)
 684{
 685        switch (ev->type) {
 686        case SNDRV_SEQ_EVENT_START:
 687                snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
 688                snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
 689                if (! snd_seq_timer_start(q->timer))
 690                        queue_broadcast_event(q, ev, atomic, hop);
 691                break;
 692
 693        case SNDRV_SEQ_EVENT_CONTINUE:
 694                if (! snd_seq_timer_continue(q->timer))
 695                        queue_broadcast_event(q, ev, atomic, hop);
 696                break;
 697
 698        case SNDRV_SEQ_EVENT_STOP:
 699                snd_seq_timer_stop(q->timer);
 700                queue_broadcast_event(q, ev, atomic, hop);
 701                break;
 702
 703        case SNDRV_SEQ_EVENT_TEMPO:
 704                snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
 705                queue_broadcast_event(q, ev, atomic, hop);
 706                break;
 707
 708        case SNDRV_SEQ_EVENT_SETPOS_TICK:
 709                if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
 710                        queue_broadcast_event(q, ev, atomic, hop);
 711                }
 712                break;
 713
 714        case SNDRV_SEQ_EVENT_SETPOS_TIME:
 715                if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
 716                        queue_broadcast_event(q, ev, atomic, hop);
 717                }
 718                break;
 719        case SNDRV_SEQ_EVENT_QUEUE_SKEW:
 720                if (snd_seq_timer_set_skew(q->timer,
 721                                           ev->data.queue.param.skew.value,
 722                                           ev->data.queue.param.skew.base) == 0) {
 723                        queue_broadcast_event(q, ev, atomic, hop);
 724                }
 725                break;
 726        }
 727}
 728
 729
 730/*
 731 * Queue control via timer control port:
 732 * this function is exported as a callback of timer port.
 733 */
 734int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
 735{
 736        struct snd_seq_queue *q;
 737
 738        snd_assert(ev != NULL, return -EINVAL);
 739        q = queueptr(ev->data.queue.queue);
 740
 741        if (q == NULL)
 742                return -EINVAL;
 743
 744        if (! queue_access_lock(q, ev->source.client)) {
 745                queuefree(q);
 746                return -EPERM;
 747        }
 748
 749        snd_seq_queue_process_event(q, ev, atomic, hop);
 750
 751        queue_access_unlock(q);
 752        queuefree(q);
 753        return 0;
 754}
 755
 756
 757/*----------------------------------------------------------------*/
 758
 759#ifdef CONFIG_PROC_FS
 760/* exported to seq_info.c */
 761void snd_seq_info_queues_read(struct snd_info_entry *entry, 
 762                              struct snd_info_buffer *buffer)
 763{
 764        int i, bpm;
 765        struct snd_seq_queue *q;
 766        struct snd_seq_timer *tmr;
 767
 768        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 769                if ((q = queueptr(i)) == NULL)
 770                        continue;
 771
 772                tmr = q->timer;
 773                if (tmr->tempo)
 774                        bpm = 60000000 / tmr->tempo;
 775                else
 776                        bpm = 0;
 777
 778                snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
 779                snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
 780                snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
 781                snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
 782                snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
 783                snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
 784                snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
 785                snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
 786                snd_iprintf(buffer, "current BPM        : %d\n", bpm);
 787                snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
 788                snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
 789                snd_iprintf(buffer, "\n");
 790                queuefree(q);
 791        }
 792}
 793#endif /* CONFIG_PROC_FS */
 794
 795