linux/sound/core/seq/seq_queue.c
<<
>>
Prefs
   1/*
   2 *   ALSA sequencer Timing queue handling
   3 *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
   4 *
   5 *   This program is free software; you can redistribute it and/or modify
   6 *   it under the terms of the GNU General Public License as published by
   7 *   the Free Software Foundation; either version 2 of the License, or
   8 *   (at your option) any later version.
   9 *
  10 *   This program is distributed in the hope that it will be useful,
  11 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *   GNU General Public License for more details.
  14 *
  15 *   You should have received a copy of the GNU General Public License
  16 *   along with this program; if not, write to the Free Software
  17 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 *
  19 * MAJOR CHANGES
  20 *   Nov. 13, 1999      Takashi Iwai <iwai@ww.uni-erlangen.de>
  21 *     - Queues are allocated dynamically via ioctl.
  22 *     - When owner client is deleted, all owned queues are deleted, too.
  23 *     - Owner of unlocked queue is kept unmodified even if it is
  24 *       manipulated by other clients.
  25 *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
  26 *       caller client.  i.e. Changing owner to a third client is not
  27 *       allowed.
  28 *
  29 *  Aug. 30, 2000       Takashi Iwai
  30 *     - Queues are managed in static array again, but with better way.
  31 *       The API itself is identical.
  32 *     - The queue is locked when struct snd_seq_queue pointer is returned via
  33 *       queueptr().  This pointer *MUST* be released afterward by
  34 *       queuefree(ptr).
  35 *     - Addition of experimental sync support.
  36 */
  37
  38#include <linux/init.h>
  39#include <linux/slab.h>
  40#include <sound/core.h>
  41
  42#include "seq_memory.h"
  43#include "seq_queue.h"
  44#include "seq_clientmgr.h"
  45#include "seq_fifo.h"
  46#include "seq_timer.h"
  47#include "seq_info.h"
  48
  49/* list of allocated queues */
  50static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
  51static DEFINE_SPINLOCK(queue_list_lock);
  52/* number of queues allocated */
  53static int num_queues;
  54
  55int snd_seq_queue_get_cur_queues(void)
  56{
  57        return num_queues;
  58}
  59
  60/*----------------------------------------------------------------*/
  61
  62/* assign queue id and insert to list */
  63static int queue_list_add(struct snd_seq_queue *q)
  64{
  65        int i;
  66        unsigned long flags;
  67
  68        spin_lock_irqsave(&queue_list_lock, flags);
  69        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
  70                if (! queue_list[i]) {
  71                        queue_list[i] = q;
  72                        q->queue = i;
  73                        num_queues++;
  74                        spin_unlock_irqrestore(&queue_list_lock, flags);
  75                        return i;
  76                }
  77        }
  78        spin_unlock_irqrestore(&queue_list_lock, flags);
  79        return -1;
  80}
  81
  82static struct snd_seq_queue *queue_list_remove(int id, int client)
  83{
  84        struct snd_seq_queue *q;
  85        unsigned long flags;
  86
  87        spin_lock_irqsave(&queue_list_lock, flags);
  88        q = queue_list[id];
  89        if (q) {
  90                spin_lock(&q->owner_lock);
  91                if (q->owner == client) {
  92                        /* found */
  93                        q->klocked = 1;
  94                        spin_unlock(&q->owner_lock);
  95                        queue_list[id] = NULL;
  96                        num_queues--;
  97                        spin_unlock_irqrestore(&queue_list_lock, flags);
  98                        return q;
  99                }
 100                spin_unlock(&q->owner_lock);
 101        }
 102        spin_unlock_irqrestore(&queue_list_lock, flags);
 103        return NULL;
 104}
 105
 106/*----------------------------------------------------------------*/
 107
 108/* create new queue (constructor) */
 109static struct snd_seq_queue *queue_new(int owner, int locked)
 110{
 111        struct snd_seq_queue *q;
 112
 113        q = kzalloc(sizeof(*q), GFP_KERNEL);
 114        if (q == NULL) {
 115                snd_printd("malloc failed for snd_seq_queue_new()\n");
 116                return NULL;
 117        }
 118
 119        spin_lock_init(&q->owner_lock);
 120        spin_lock_init(&q->check_lock);
 121        mutex_init(&q->timer_mutex);
 122        snd_use_lock_init(&q->use_lock);
 123        q->queue = -1;
 124
 125        q->tickq = snd_seq_prioq_new();
 126        q->timeq = snd_seq_prioq_new();
 127        q->timer = snd_seq_timer_new();
 128        if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
 129                snd_seq_prioq_delete(&q->tickq);
 130                snd_seq_prioq_delete(&q->timeq);
 131                snd_seq_timer_delete(&q->timer);
 132                kfree(q);
 133                return NULL;
 134        }
 135
 136        q->owner = owner;
 137        q->locked = locked;
 138        q->klocked = 0;
 139
 140        return q;
 141}
 142
 143/* delete queue (destructor) */
 144static void queue_delete(struct snd_seq_queue *q)
 145{
 146        /* stop and release the timer */
 147        snd_seq_timer_stop(q->timer);
 148        snd_seq_timer_close(q);
 149        /* wait until access free */
 150        snd_use_lock_sync(&q->use_lock);
 151        /* release resources... */
 152        snd_seq_prioq_delete(&q->tickq);
 153        snd_seq_prioq_delete(&q->timeq);
 154        snd_seq_timer_delete(&q->timer);
 155
 156        kfree(q);
 157}
 158
 159
 160/*----------------------------------------------------------------*/
 161
 162/* setup queues */
 163int __init snd_seq_queues_init(void)
 164{
 165        /*
 166        memset(queue_list, 0, sizeof(queue_list));
 167        num_queues = 0;
 168        */
 169        return 0;
 170}
 171
 172/* delete all existing queues */
 173void __exit snd_seq_queues_delete(void)
 174{
 175        int i;
 176
 177        /* clear list */
 178        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 179                if (queue_list[i])
 180                        queue_delete(queue_list[i]);
 181        }
 182}
 183
 184/* allocate a new queue -
 185 * return queue index value or negative value for error
 186 */
 187int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 188{
 189        struct snd_seq_queue *q;
 190
 191        q = queue_new(client, locked);
 192        if (q == NULL)
 193                return -ENOMEM;
 194        q->info_flags = info_flags;
 195        if (queue_list_add(q) < 0) {
 196                queue_delete(q);
 197                return -ENOMEM;
 198        }
 199        snd_seq_queue_use(q->queue, client, 1); /* use this queue */
 200        return q->queue;
 201}
 202
 203/* delete a queue - queue must be owned by the client */
 204int snd_seq_queue_delete(int client, int queueid)
 205{
 206        struct snd_seq_queue *q;
 207
 208        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 209                return -EINVAL;
 210        q = queue_list_remove(queueid, client);
 211        if (q == NULL)
 212                return -EINVAL;
 213        queue_delete(q);
 214
 215        return 0;
 216}
 217
 218
 219/* return pointer to queue structure for specified id */
 220struct snd_seq_queue *queueptr(int queueid)
 221{
 222        struct snd_seq_queue *q;
 223        unsigned long flags;
 224
 225        if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
 226                return NULL;
 227        spin_lock_irqsave(&queue_list_lock, flags);
 228        q = queue_list[queueid];
 229        if (q)
 230                snd_use_lock_use(&q->use_lock);
 231        spin_unlock_irqrestore(&queue_list_lock, flags);
 232        return q;
 233}
 234
 235/* return the (first) queue matching with the specified name */
 236struct snd_seq_queue *snd_seq_queue_find_name(char *name)
 237{
 238        int i;
 239        struct snd_seq_queue *q;
 240
 241        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 242                if ((q = queueptr(i)) != NULL) {
 243                        if (strncmp(q->name, name, sizeof(q->name)) == 0)
 244                                return q;
 245                        queuefree(q);
 246                }
 247        }
 248        return NULL;
 249}
 250
 251
 252/* -------------------------------------------------------- */
 253
 254void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
 255{
 256        unsigned long flags;
 257        struct snd_seq_event_cell *cell;
 258
 259        if (q == NULL)
 260                return;
 261
 262        /* make this function non-reentrant */
 263        spin_lock_irqsave(&q->check_lock, flags);
 264        if (q->check_blocked) {
 265                q->check_again = 1;
 266                spin_unlock_irqrestore(&q->check_lock, flags);
 267                return;         /* other thread is already checking queues */
 268        }
 269        q->check_blocked = 1;
 270        spin_unlock_irqrestore(&q->check_lock, flags);
 271
 272      __again:
 273        /* Process tick queue... */
 274        while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
 275                if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
 276                                              &cell->event.time.tick)) {
 277                        cell = snd_seq_prioq_cell_out(q->tickq);
 278                        if (cell)
 279                                snd_seq_dispatch_event(cell, atomic, hop);
 280                } else {
 281                        /* event remains in the queue */
 282                        break;
 283                }
 284        }
 285
 286
 287        /* Process time queue... */
 288        while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
 289                if (snd_seq_compare_real_time(&q->timer->cur_time,
 290                                              &cell->event.time.time)) {
 291                        cell = snd_seq_prioq_cell_out(q->timeq);
 292                        if (cell)
 293                                snd_seq_dispatch_event(cell, atomic, hop);
 294                } else {
 295                        /* event remains in the queue */
 296                        break;
 297                }
 298        }
 299
 300        /* free lock */
 301        spin_lock_irqsave(&q->check_lock, flags);
 302        if (q->check_again) {
 303                q->check_again = 0;
 304                spin_unlock_irqrestore(&q->check_lock, flags);
 305                goto __again;
 306        }
 307        q->check_blocked = 0;
 308        spin_unlock_irqrestore(&q->check_lock, flags);
 309}
 310
 311
 312/* enqueue a event to singe queue */
 313int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 314{
 315        int dest, err;
 316        struct snd_seq_queue *q;
 317
 318        if (snd_BUG_ON(!cell))
 319                return -EINVAL;
 320        dest = cell->event.queue;       /* destination queue */
 321        q = queueptr(dest);
 322        if (q == NULL)
 323                return -EINVAL;
 324        /* handle relative time stamps, convert them into absolute */
 325        if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
 326                switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 327                case SNDRV_SEQ_TIME_STAMP_TICK:
 328                        cell->event.time.tick += q->timer->tick.cur_tick;
 329                        break;
 330
 331                case SNDRV_SEQ_TIME_STAMP_REAL:
 332                        snd_seq_inc_real_time(&cell->event.time.time,
 333                                              &q->timer->cur_time);
 334                        break;
 335                }
 336                cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
 337                cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
 338        }
 339        /* enqueue event in the real-time or midi queue */
 340        switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
 341        case SNDRV_SEQ_TIME_STAMP_TICK:
 342                err = snd_seq_prioq_cell_in(q->tickq, cell);
 343                break;
 344
 345        case SNDRV_SEQ_TIME_STAMP_REAL:
 346        default:
 347                err = snd_seq_prioq_cell_in(q->timeq, cell);
 348                break;
 349        }
 350
 351        if (err < 0) {
 352                queuefree(q); /* unlock */
 353                return err;
 354        }
 355
 356        /* trigger dispatching */
 357        snd_seq_check_queue(q, atomic, hop);
 358
 359        queuefree(q); /* unlock */
 360
 361        return 0;
 362}
 363
 364
 365/*----------------------------------------------------------------*/
 366
 367static inline int check_access(struct snd_seq_queue *q, int client)
 368{
 369        return (q->owner == client) || (!q->locked && !q->klocked);
 370}
 371
 372/* check if the client has permission to modify queue parameters.
 373 * if it does, lock the queue
 374 */
 375static int queue_access_lock(struct snd_seq_queue *q, int client)
 376{
 377        unsigned long flags;
 378        int access_ok;
 379        
 380        spin_lock_irqsave(&q->owner_lock, flags);
 381        access_ok = check_access(q, client);
 382        if (access_ok)
 383                q->klocked = 1;
 384        spin_unlock_irqrestore(&q->owner_lock, flags);
 385        return access_ok;
 386}
 387
 388/* unlock the queue */
 389static inline void queue_access_unlock(struct snd_seq_queue *q)
 390{
 391        unsigned long flags;
 392
 393        spin_lock_irqsave(&q->owner_lock, flags);
 394        q->klocked = 0;
 395        spin_unlock_irqrestore(&q->owner_lock, flags);
 396}
 397
 398/* exported - only checking permission */
 399int snd_seq_queue_check_access(int queueid, int client)
 400{
 401        struct snd_seq_queue *q = queueptr(queueid);
 402        int access_ok;
 403        unsigned long flags;
 404
 405        if (! q)
 406                return 0;
 407        spin_lock_irqsave(&q->owner_lock, flags);
 408        access_ok = check_access(q, client);
 409        spin_unlock_irqrestore(&q->owner_lock, flags);
 410        queuefree(q);
 411        return access_ok;
 412}
 413
 414/*----------------------------------------------------------------*/
 415
 416/*
 417 * change queue's owner and permission
 418 */
 419int snd_seq_queue_set_owner(int queueid, int client, int locked)
 420{
 421        struct snd_seq_queue *q = queueptr(queueid);
 422
 423        if (q == NULL)
 424                return -EINVAL;
 425
 426        if (! queue_access_lock(q, client)) {
 427                queuefree(q);
 428                return -EPERM;
 429        }
 430
 431        q->locked = locked ? 1 : 0;
 432        q->owner = client;
 433        queue_access_unlock(q);
 434        queuefree(q);
 435
 436        return 0;
 437}
 438
 439
 440/*----------------------------------------------------------------*/
 441
 442/* open timer -
 443 * q->use mutex should be down before calling this function to avoid
 444 * confliction with snd_seq_queue_use()
 445 */
 446int snd_seq_queue_timer_open(int queueid)
 447{
 448        int result = 0;
 449        struct snd_seq_queue *queue;
 450        struct snd_seq_timer *tmr;
 451
 452        queue = queueptr(queueid);
 453        if (queue == NULL)
 454                return -EINVAL;
 455        tmr = queue->timer;
 456        if ((result = snd_seq_timer_open(queue)) < 0) {
 457                snd_seq_timer_defaults(tmr);
 458                result = snd_seq_timer_open(queue);
 459        }
 460        queuefree(queue);
 461        return result;
 462}
 463
 464/* close timer -
 465 * q->use mutex should be down before calling this function
 466 */
 467int snd_seq_queue_timer_close(int queueid)
 468{
 469        struct snd_seq_queue *queue;
 470        struct snd_seq_timer *tmr;
 471        int result = 0;
 472
 473        queue = queueptr(queueid);
 474        if (queue == NULL)
 475                return -EINVAL;
 476        tmr = queue->timer;
 477        snd_seq_timer_close(queue);
 478        queuefree(queue);
 479        return result;
 480}
 481
 482/* change queue tempo and ppq */
 483int snd_seq_queue_timer_set_tempo(int queueid, int client,
 484                                  struct snd_seq_queue_tempo *info)
 485{
 486        struct snd_seq_queue *q = queueptr(queueid);
 487        int result;
 488
 489        if (q == NULL)
 490                return -EINVAL;
 491        if (! queue_access_lock(q, client)) {
 492                queuefree(q);
 493                return -EPERM;
 494        }
 495
 496        result = snd_seq_timer_set_tempo(q->timer, info->tempo);
 497        if (result >= 0)
 498                result = snd_seq_timer_set_ppq(q->timer, info->ppq);
 499        if (result >= 0 && info->skew_base > 0)
 500                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
 501                                                info->skew_base);
 502        queue_access_unlock(q);
 503        queuefree(q);
 504        return result;
 505}
 506
 507
 508/* use or unuse this queue -
 509 * if it is the first client, starts the timer.
 510 * if it is not longer used by any clients, stop the timer.
 511 */
 512int snd_seq_queue_use(int queueid, int client, int use)
 513{
 514        struct snd_seq_queue *queue;
 515
 516        queue = queueptr(queueid);
 517        if (queue == NULL)
 518                return -EINVAL;
 519        mutex_lock(&queue->timer_mutex);
 520        if (use) {
 521                if (!test_and_set_bit(client, queue->clients_bitmap))
 522                        queue->clients++;
 523        } else {
 524                if (test_and_clear_bit(client, queue->clients_bitmap))
 525                        queue->clients--;
 526        }
 527        if (queue->clients) {
 528                if (use && queue->clients == 1)
 529                        snd_seq_timer_defaults(queue->timer);
 530                snd_seq_timer_open(queue);
 531        } else {
 532                snd_seq_timer_close(queue);
 533        }
 534        mutex_unlock(&queue->timer_mutex);
 535        queuefree(queue);
 536        return 0;
 537}
 538
 539/*
 540 * check if queue is used by the client
 541 * return negative value if the queue is invalid.
 542 * return 0 if not used, 1 if used.
 543 */
 544int snd_seq_queue_is_used(int queueid, int client)
 545{
 546        struct snd_seq_queue *q;
 547        int result;
 548
 549        q = queueptr(queueid);
 550        if (q == NULL)
 551                return -EINVAL; /* invalid queue */
 552        result = test_bit(client, q->clients_bitmap) ? 1 : 0;
 553        queuefree(q);
 554        return result;
 555}
 556
 557
 558/*----------------------------------------------------------------*/
 559
 560/* notification that client has left the system -
 561 * stop the timer on all queues owned by this client
 562 */
 563void snd_seq_queue_client_termination(int client)
 564{
 565        unsigned long flags;
 566        int i;
 567        struct snd_seq_queue *q;
 568
 569        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 570                if ((q = queueptr(i)) == NULL)
 571                        continue;
 572                spin_lock_irqsave(&q->owner_lock, flags);
 573                if (q->owner == client)
 574                        q->klocked = 1;
 575                spin_unlock_irqrestore(&q->owner_lock, flags);
 576                if (q->owner == client) {
 577                        if (q->timer->running)
 578                                snd_seq_timer_stop(q->timer);
 579                        snd_seq_timer_reset(q->timer);
 580                }
 581                queuefree(q);
 582        }
 583}
 584
 585/* final stage notification -
 586 * remove cells for no longer exist client (for non-owned queue)
 587 * or delete this queue (for owned queue)
 588 */
 589void snd_seq_queue_client_leave(int client)
 590{
 591        int i;
 592        struct snd_seq_queue *q;
 593
 594        /* delete own queues from queue list */
 595        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 596                if ((q = queue_list_remove(i, client)) != NULL)
 597                        queue_delete(q);
 598        }
 599
 600        /* remove cells from existing queues -
 601         * they are not owned by this client
 602         */
 603        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 604                if ((q = queueptr(i)) == NULL)
 605                        continue;
 606                if (test_bit(client, q->clients_bitmap)) {
 607                        snd_seq_prioq_leave(q->tickq, client, 0);
 608                        snd_seq_prioq_leave(q->timeq, client, 0);
 609                        snd_seq_queue_use(q->queue, client, 0);
 610                }
 611                queuefree(q);
 612        }
 613}
 614
 615
 616
 617/*----------------------------------------------------------------*/
 618
 619/* remove cells from all queues */
 620void snd_seq_queue_client_leave_cells(int client)
 621{
 622        int i;
 623        struct snd_seq_queue *q;
 624
 625        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 626                if ((q = queueptr(i)) == NULL)
 627                        continue;
 628                snd_seq_prioq_leave(q->tickq, client, 0);
 629                snd_seq_prioq_leave(q->timeq, client, 0);
 630                queuefree(q);
 631        }
 632}
 633
 634/* remove cells based on flush criteria */
 635void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
 636{
 637        int i;
 638        struct snd_seq_queue *q;
 639
 640        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 641                if ((q = queueptr(i)) == NULL)
 642                        continue;
 643                if (test_bit(client, q->clients_bitmap) &&
 644                    (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
 645                     q->queue == info->queue)) {
 646                        snd_seq_prioq_remove_events(q->tickq, client, info);
 647                        snd_seq_prioq_remove_events(q->timeq, client, info);
 648                }
 649                queuefree(q);
 650        }
 651}
 652
 653/*----------------------------------------------------------------*/
 654
 655/*
 656 * send events to all subscribed ports
 657 */
 658static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
 659                                  int atomic, int hop)
 660{
 661        struct snd_seq_event sev;
 662
 663        sev = *ev;
 664        
 665        sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
 666        sev.time.tick = q->timer->tick.cur_tick;
 667        sev.queue = q->queue;
 668        sev.data.queue.queue = q->queue;
 669
 670        /* broadcast events from Timer port */
 671        sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
 672        sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
 673        sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
 674        snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
 675}
 676
 677/*
 678 * process a received queue-control event.
 679 * this function is exported for seq_sync.c.
 680 */
 681static void snd_seq_queue_process_event(struct snd_seq_queue *q,
 682                                        struct snd_seq_event *ev,
 683                                        int atomic, int hop)
 684{
 685        switch (ev->type) {
 686        case SNDRV_SEQ_EVENT_START:
 687                snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
 688                snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
 689                if (! snd_seq_timer_start(q->timer))
 690                        queue_broadcast_event(q, ev, atomic, hop);
 691                break;
 692
 693        case SNDRV_SEQ_EVENT_CONTINUE:
 694                if (! snd_seq_timer_continue(q->timer))
 695                        queue_broadcast_event(q, ev, atomic, hop);
 696                break;
 697
 698        case SNDRV_SEQ_EVENT_STOP:
 699                snd_seq_timer_stop(q->timer);
 700                queue_broadcast_event(q, ev, atomic, hop);
 701                break;
 702
 703        case SNDRV_SEQ_EVENT_TEMPO:
 704                snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
 705                queue_broadcast_event(q, ev, atomic, hop);
 706                break;
 707
 708        case SNDRV_SEQ_EVENT_SETPOS_TICK:
 709                if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
 710                        queue_broadcast_event(q, ev, atomic, hop);
 711                }
 712                break;
 713
 714        case SNDRV_SEQ_EVENT_SETPOS_TIME:
 715                if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
 716                        queue_broadcast_event(q, ev, atomic, hop);
 717                }
 718                break;
 719        case SNDRV_SEQ_EVENT_QUEUE_SKEW:
 720                if (snd_seq_timer_set_skew(q->timer,
 721                                           ev->data.queue.param.skew.value,
 722                                           ev->data.queue.param.skew.base) == 0) {
 723                        queue_broadcast_event(q, ev, atomic, hop);
 724                }
 725                break;
 726        }
 727}
 728
 729
 730/*
 731 * Queue control via timer control port:
 732 * this function is exported as a callback of timer port.
 733 */
 734int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
 735{
 736        struct snd_seq_queue *q;
 737
 738        if (snd_BUG_ON(!ev))
 739                return -EINVAL;
 740        q = queueptr(ev->data.queue.queue);
 741
 742        if (q == NULL)
 743                return -EINVAL;
 744
 745        if (! queue_access_lock(q, ev->source.client)) {
 746                queuefree(q);
 747                return -EPERM;
 748        }
 749
 750        snd_seq_queue_process_event(q, ev, atomic, hop);
 751
 752        queue_access_unlock(q);
 753        queuefree(q);
 754        return 0;
 755}
 756
 757
 758/*----------------------------------------------------------------*/
 759
 760#ifdef CONFIG_PROC_FS
 761/* exported to seq_info.c */
 762void snd_seq_info_queues_read(struct snd_info_entry *entry, 
 763                              struct snd_info_buffer *buffer)
 764{
 765        int i, bpm;
 766        struct snd_seq_queue *q;
 767        struct snd_seq_timer *tmr;
 768
 769        for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
 770                if ((q = queueptr(i)) == NULL)
 771                        continue;
 772
 773                tmr = q->timer;
 774                if (tmr->tempo)
 775                        bpm = 60000000 / tmr->tempo;
 776                else
 777                        bpm = 0;
 778
 779                snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
 780                snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
 781                snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
 782                snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
 783                snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
 784                snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
 785                snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
 786                snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
 787                snd_iprintf(buffer, "current BPM        : %d\n", bpm);
 788                snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
 789                snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
 790                snd_iprintf(buffer, "\n");
 791                queuefree(q);
 792        }
 793}
 794#endif /* CONFIG_PROC_FS */
 795
 796