linux/sound/firewire/amdtp-stream.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
   4 * with Common Isochronous Packet (IEC 61883-1) headers
   5 *
   6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/err.h>
  11#include <linux/firewire.h>
  12#include <linux/firewire-constants.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <sound/pcm.h>
  16#include <sound/pcm_params.h>
  17#include "amdtp-stream.h"
  18
  19#define TICKS_PER_CYCLE         3072
  20#define CYCLES_PER_SECOND       8000
  21#define TICKS_PER_SECOND        (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
  22
  23#define OHCI_MAX_SECOND         8
  24
  25/* Always support Linux tracing subsystem. */
  26#define CREATE_TRACE_POINTS
  27#include "amdtp-stream-trace.h"
  28
  29#define TRANSFER_DELAY_TICKS    0x2e00 /* 479.17 microseconds */
  30
  31/* isochronous header parameters */
  32#define ISO_DATA_LENGTH_SHIFT   16
  33#define TAG_NO_CIP_HEADER       0
  34#define TAG_CIP                 1
  35
  36/* common isochronous packet header parameters */
  37#define CIP_EOH_SHIFT           31
  38#define CIP_EOH                 (1u << CIP_EOH_SHIFT)
  39#define CIP_EOH_MASK            0x80000000
  40#define CIP_SID_SHIFT           24
  41#define CIP_SID_MASK            0x3f000000
  42#define CIP_DBS_MASK            0x00ff0000
  43#define CIP_DBS_SHIFT           16
  44#define CIP_SPH_MASK            0x00000400
  45#define CIP_SPH_SHIFT           10
  46#define CIP_DBC_MASK            0x000000ff
  47#define CIP_FMT_SHIFT           24
  48#define CIP_FMT_MASK            0x3f000000
  49#define CIP_FDF_MASK            0x00ff0000
  50#define CIP_FDF_SHIFT           16
  51#define CIP_SYT_MASK            0x0000ffff
  52#define CIP_SYT_NO_INFO         0xffff
  53
  54/* Audio and Music transfer protocol specific parameters */
  55#define CIP_FMT_AM              0x10
  56#define AMDTP_FDF_NO_DATA       0xff
  57
  58// For iso header, tstamp and 2 CIP header.
  59#define IR_CTX_HEADER_SIZE_CIP          16
  60// For iso header and tstamp.
  61#define IR_CTX_HEADER_SIZE_NO_CIP       8
  62#define HEADER_TSTAMP_MASK      0x0000ffff
  63
  64#define IT_PKT_HEADER_SIZE_CIP          8 // For 2 CIP header.
  65#define IT_PKT_HEADER_SIZE_NO_CIP       0 // Nothing.
  66
  67static void pcm_period_tasklet(struct tasklet_struct *t);
  68
  69/**
  70 * amdtp_stream_init - initialize an AMDTP stream structure
  71 * @s: the AMDTP stream to initialize
  72 * @unit: the target of the stream
  73 * @dir: the direction of stream
  74 * @flags: the packet transmission method to use
  75 * @fmt: the value of fmt field in CIP header
  76 * @process_ctx_payloads: callback handler to process payloads of isoc context
  77 * @protocol_size: the size to allocate newly for protocol
  78 */
  79int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
  80                      enum amdtp_stream_direction dir, enum cip_flags flags,
  81                      unsigned int fmt,
  82                      amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
  83                      unsigned int protocol_size)
  84{
  85        if (process_ctx_payloads == NULL)
  86                return -EINVAL;
  87
  88        s->protocol = kzalloc(protocol_size, GFP_KERNEL);
  89        if (!s->protocol)
  90                return -ENOMEM;
  91
  92        s->unit = unit;
  93        s->direction = dir;
  94        s->flags = flags;
  95        s->context = ERR_PTR(-1);
  96        mutex_init(&s->mutex);
  97        tasklet_setup(&s->period_tasklet, pcm_period_tasklet);
  98        s->packet_index = 0;
  99
 100        init_waitqueue_head(&s->callback_wait);
 101        s->callbacked = false;
 102
 103        s->fmt = fmt;
 104        s->process_ctx_payloads = process_ctx_payloads;
 105
 106        if (dir == AMDTP_OUT_STREAM)
 107                s->ctx_data.rx.syt_override = -1;
 108
 109        return 0;
 110}
 111EXPORT_SYMBOL(amdtp_stream_init);
 112
 113/**
 114 * amdtp_stream_destroy - free stream resources
 115 * @s: the AMDTP stream to destroy
 116 */
 117void amdtp_stream_destroy(struct amdtp_stream *s)
 118{
 119        /* Not initialized. */
 120        if (s->protocol == NULL)
 121                return;
 122
 123        WARN_ON(amdtp_stream_running(s));
 124        kfree(s->protocol);
 125        mutex_destroy(&s->mutex);
 126}
 127EXPORT_SYMBOL(amdtp_stream_destroy);
 128
 129const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
 130        [CIP_SFC_32000]  =  8,
 131        [CIP_SFC_44100]  =  8,
 132        [CIP_SFC_48000]  =  8,
 133        [CIP_SFC_88200]  = 16,
 134        [CIP_SFC_96000]  = 16,
 135        [CIP_SFC_176400] = 32,
 136        [CIP_SFC_192000] = 32,
 137};
 138EXPORT_SYMBOL(amdtp_syt_intervals);
 139
 140const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
 141        [CIP_SFC_32000]  =  32000,
 142        [CIP_SFC_44100]  =  44100,
 143        [CIP_SFC_48000]  =  48000,
 144        [CIP_SFC_88200]  =  88200,
 145        [CIP_SFC_96000]  =  96000,
 146        [CIP_SFC_176400] = 176400,
 147        [CIP_SFC_192000] = 192000,
 148};
 149EXPORT_SYMBOL(amdtp_rate_table);
 150
 151static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
 152                                    struct snd_pcm_hw_rule *rule)
 153{
 154        struct snd_interval *s = hw_param_interval(params, rule->var);
 155        const struct snd_interval *r =
 156                hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
 157        struct snd_interval t = {0};
 158        unsigned int step = 0;
 159        int i;
 160
 161        for (i = 0; i < CIP_SFC_COUNT; ++i) {
 162                if (snd_interval_test(r, amdtp_rate_table[i]))
 163                        step = max(step, amdtp_syt_intervals[i]);
 164        }
 165
 166        t.min = roundup(s->min, step);
 167        t.max = rounddown(s->max, step);
 168        t.integer = 1;
 169
 170        return snd_interval_refine(s, &t);
 171}
 172
 173/**
 174 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
 175 * @s:          the AMDTP stream, which must be initialized.
 176 * @runtime:    the PCM substream runtime
 177 */
 178int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
 179                                        struct snd_pcm_runtime *runtime)
 180{
 181        struct snd_pcm_hardware *hw = &runtime->hw;
 182        unsigned int ctx_header_size;
 183        unsigned int maximum_usec_per_period;
 184        int err;
 185
 186        hw->info = SNDRV_PCM_INFO_BATCH |
 187                   SNDRV_PCM_INFO_BLOCK_TRANSFER |
 188                   SNDRV_PCM_INFO_INTERLEAVED |
 189                   SNDRV_PCM_INFO_JOINT_DUPLEX |
 190                   SNDRV_PCM_INFO_MMAP |
 191                   SNDRV_PCM_INFO_MMAP_VALID;
 192
 193        /* SNDRV_PCM_INFO_BATCH */
 194        hw->periods_min = 2;
 195        hw->periods_max = UINT_MAX;
 196
 197        /* bytes for a frame */
 198        hw->period_bytes_min = 4 * hw->channels_max;
 199
 200        /* Just to prevent from allocating much pages. */
 201        hw->period_bytes_max = hw->period_bytes_min * 2048;
 202        hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
 203
 204        // Linux driver for 1394 OHCI controller voluntarily flushes isoc
 205        // context when total size of accumulated context header reaches
 206        // PAGE_SIZE. This kicks tasklet for the isoc context and brings
 207        // callback in the middle of scheduled interrupts.
 208        // Although AMDTP streams in the same domain use the same events per
 209        // IRQ, use the largest size of context header between IT/IR contexts.
 210        // Here, use the value of context header in IR context is for both
 211        // contexts.
 212        if (!(s->flags & CIP_NO_HEADER))
 213                ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
 214        else
 215                ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
 216        maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
 217                                  CYCLES_PER_SECOND / ctx_header_size;
 218
 219        // In IEC 61883-6, one isoc packet can transfer events up to the value
 220        // of syt interval. This comes from the interval of isoc cycle. As 1394
 221        // OHCI controller can generate hardware IRQ per isoc packet, the
 222        // interval is 125 usec.
 223        // However, there are two ways of transmission in IEC 61883-6; blocking
 224        // and non-blocking modes. In blocking mode, the sequence of isoc packet
 225        // includes 'empty' or 'NODATA' packets which include no event. In
 226        // non-blocking mode, the number of events per packet is variable up to
 227        // the syt interval.
 228        // Due to the above protocol design, the minimum PCM frames per
 229        // interrupt should be double of the value of syt interval, thus it is
 230        // 250 usec.
 231        err = snd_pcm_hw_constraint_minmax(runtime,
 232                                           SNDRV_PCM_HW_PARAM_PERIOD_TIME,
 233                                           250, maximum_usec_per_period);
 234        if (err < 0)
 235                goto end;
 236
 237        /* Non-Blocking stream has no more constraints */
 238        if (!(s->flags & CIP_BLOCKING))
 239                goto end;
 240
 241        /*
 242         * One AMDTP packet can include some frames. In blocking mode, the
 243         * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
 244         * depending on its sampling rate. For accurate period interrupt, it's
 245         * preferrable to align period/buffer sizes to current SYT_INTERVAL.
 246         */
 247        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
 248                                  apply_constraint_to_size, NULL,
 249                                  SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
 250                                  SNDRV_PCM_HW_PARAM_RATE, -1);
 251        if (err < 0)
 252                goto end;
 253        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
 254                                  apply_constraint_to_size, NULL,
 255                                  SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
 256                                  SNDRV_PCM_HW_PARAM_RATE, -1);
 257        if (err < 0)
 258                goto end;
 259end:
 260        return err;
 261}
 262EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
 263
 264/**
 265 * amdtp_stream_set_parameters - set stream parameters
 266 * @s: the AMDTP stream to configure
 267 * @rate: the sample rate
 268 * @data_block_quadlets: the size of a data block in quadlet unit
 269 *
 270 * The parameters must be set before the stream is started, and must not be
 271 * changed while the stream is running.
 272 */
 273int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
 274                                unsigned int data_block_quadlets)
 275{
 276        unsigned int sfc;
 277
 278        for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
 279                if (amdtp_rate_table[sfc] == rate)
 280                        break;
 281        }
 282        if (sfc == ARRAY_SIZE(amdtp_rate_table))
 283                return -EINVAL;
 284
 285        s->sfc = sfc;
 286        s->data_block_quadlets = data_block_quadlets;
 287        s->syt_interval = amdtp_syt_intervals[sfc];
 288
 289        // default buffering in the device.
 290        if (s->direction == AMDTP_OUT_STREAM) {
 291                s->ctx_data.rx.transfer_delay =
 292                                        TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
 293
 294                if (s->flags & CIP_BLOCKING) {
 295                        // additional buffering needed to adjust for no-data
 296                        // packets.
 297                        s->ctx_data.rx.transfer_delay +=
 298                                TICKS_PER_SECOND * s->syt_interval / rate;
 299                }
 300        }
 301
 302        return 0;
 303}
 304EXPORT_SYMBOL(amdtp_stream_set_parameters);
 305
 306/**
 307 * amdtp_stream_get_max_payload - get the stream's packet size
 308 * @s: the AMDTP stream
 309 *
 310 * This function must not be called before the stream has been configured
 311 * with amdtp_stream_set_parameters().
 312 */
 313unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
 314{
 315        unsigned int multiplier = 1;
 316        unsigned int cip_header_size = 0;
 317
 318        if (s->flags & CIP_JUMBO_PAYLOAD)
 319                multiplier = 5;
 320        if (!(s->flags & CIP_NO_HEADER))
 321                cip_header_size = sizeof(__be32) * 2;
 322
 323        return cip_header_size +
 324                s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
 325}
 326EXPORT_SYMBOL(amdtp_stream_get_max_payload);
 327
 328/**
 329 * amdtp_stream_pcm_prepare - prepare PCM device for running
 330 * @s: the AMDTP stream
 331 *
 332 * This function should be called from the PCM device's .prepare callback.
 333 */
 334void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
 335{
 336        tasklet_kill(&s->period_tasklet);
 337        s->pcm_buffer_pointer = 0;
 338        s->pcm_period_pointer = 0;
 339}
 340EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
 341
 342static unsigned int calculate_data_blocks(unsigned int *data_block_state,
 343                                bool is_blocking, bool is_no_info,
 344                                unsigned int syt_interval, enum cip_sfc sfc)
 345{
 346        unsigned int data_blocks;
 347
 348        /* Blocking mode. */
 349        if (is_blocking) {
 350                /* This module generate empty packet for 'no data'. */
 351                if (is_no_info)
 352                        data_blocks = 0;
 353                else
 354                        data_blocks = syt_interval;
 355        /* Non-blocking mode. */
 356        } else {
 357                if (!cip_sfc_is_base_44100(sfc)) {
 358                        // Sample_rate / 8000 is an integer, and precomputed.
 359                        data_blocks = *data_block_state;
 360                } else {
 361                        unsigned int phase = *data_block_state;
 362
 363                /*
 364                 * This calculates the number of data blocks per packet so that
 365                 * 1) the overall rate is correct and exactly synchronized to
 366                 *    the bus clock, and
 367                 * 2) packets with a rounded-up number of blocks occur as early
 368                 *    as possible in the sequence (to prevent underruns of the
 369                 *    device's buffer).
 370                 */
 371                        if (sfc == CIP_SFC_44100)
 372                                /* 6 6 5 6 5 6 5 ... */
 373                                data_blocks = 5 + ((phase & 1) ^
 374                                                   (phase == 0 || phase >= 40));
 375                        else
 376                                /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
 377                                data_blocks = 11 * (sfc >> 1) + (phase == 0);
 378                        if (++phase >= (80 >> (sfc >> 1)))
 379                                phase = 0;
 380                        *data_block_state = phase;
 381                }
 382        }
 383
 384        return data_blocks;
 385}
 386
 387static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
 388                        unsigned int *syt_offset_state, enum cip_sfc sfc)
 389{
 390        unsigned int syt_offset;
 391
 392        if (*last_syt_offset < TICKS_PER_CYCLE) {
 393                if (!cip_sfc_is_base_44100(sfc))
 394                        syt_offset = *last_syt_offset + *syt_offset_state;
 395                else {
 396                /*
 397                 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
 398                 *   n * SYT_INTERVAL * 24576000 / sample_rate
 399                 * Modulo TICKS_PER_CYCLE, the difference between successive
 400                 * elements is about 1386.23.  Rounding the results of this
 401                 * formula to the SYT precision results in a sequence of
 402                 * differences that begins with:
 403                 *   1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
 404                 * This code generates _exactly_ the same sequence.
 405                 */
 406                        unsigned int phase = *syt_offset_state;
 407                        unsigned int index = phase % 13;
 408
 409                        syt_offset = *last_syt_offset;
 410                        syt_offset += 1386 + ((index && !(index & 3)) ||
 411                                              phase == 146);
 412                        if (++phase >= 147)
 413                                phase = 0;
 414                        *syt_offset_state = phase;
 415                }
 416        } else
 417                syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
 418        *last_syt_offset = syt_offset;
 419
 420        if (syt_offset >= TICKS_PER_CYCLE)
 421                syt_offset = CIP_SYT_NO_INFO;
 422
 423        return syt_offset;
 424}
 425
 426static void update_pcm_pointers(struct amdtp_stream *s,
 427                                struct snd_pcm_substream *pcm,
 428                                unsigned int frames)
 429{
 430        unsigned int ptr;
 431
 432        ptr = s->pcm_buffer_pointer + frames;
 433        if (ptr >= pcm->runtime->buffer_size)
 434                ptr -= pcm->runtime->buffer_size;
 435        WRITE_ONCE(s->pcm_buffer_pointer, ptr);
 436
 437        s->pcm_period_pointer += frames;
 438        if (s->pcm_period_pointer >= pcm->runtime->period_size) {
 439                s->pcm_period_pointer -= pcm->runtime->period_size;
 440                tasklet_hi_schedule(&s->period_tasklet);
 441        }
 442}
 443
 444static void pcm_period_tasklet(struct tasklet_struct *t)
 445{
 446        struct amdtp_stream *s = from_tasklet(s, t, period_tasklet);
 447        struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
 448
 449        if (pcm)
 450                snd_pcm_period_elapsed(pcm);
 451}
 452
 453static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
 454                        bool sched_irq)
 455{
 456        int err;
 457
 458        params->interrupt = sched_irq;
 459        params->tag = s->tag;
 460        params->sy = 0;
 461
 462        err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
 463                                   s->buffer.packets[s->packet_index].offset);
 464        if (err < 0) {
 465                dev_err(&s->unit->device, "queueing error: %d\n", err);
 466                goto end;
 467        }
 468
 469        if (++s->packet_index >= s->queue_size)
 470                s->packet_index = 0;
 471end:
 472        return err;
 473}
 474
 475static inline int queue_out_packet(struct amdtp_stream *s,
 476                                   struct fw_iso_packet *params, bool sched_irq)
 477{
 478        params->skip =
 479                !!(params->header_length == 0 && params->payload_length == 0);
 480        return queue_packet(s, params, sched_irq);
 481}
 482
 483static inline int queue_in_packet(struct amdtp_stream *s,
 484                                  struct fw_iso_packet *params)
 485{
 486        // Queue one packet for IR context.
 487        params->header_length = s->ctx_data.tx.ctx_header_size;
 488        params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
 489        params->skip = false;
 490        return queue_packet(s, params, false);
 491}
 492
 493static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
 494                        unsigned int data_block_counter, unsigned int syt)
 495{
 496        cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
 497                                (s->data_block_quadlets << CIP_DBS_SHIFT) |
 498                                ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
 499                                data_block_counter);
 500        cip_header[1] = cpu_to_be32(CIP_EOH |
 501                        ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
 502                        ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
 503                        (syt & CIP_SYT_MASK));
 504}
 505
 506static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
 507                                struct fw_iso_packet *params,
 508                                unsigned int data_blocks,
 509                                unsigned int data_block_counter,
 510                                unsigned int syt, unsigned int index)
 511{
 512        unsigned int payload_length;
 513        __be32 *cip_header;
 514
 515        payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
 516        params->payload_length = payload_length;
 517
 518        if (!(s->flags & CIP_NO_HEADER)) {
 519                cip_header = (__be32 *)params->header;
 520                generate_cip_header(s, cip_header, data_block_counter, syt);
 521                params->header_length = 2 * sizeof(__be32);
 522                payload_length += params->header_length;
 523        } else {
 524                cip_header = NULL;
 525        }
 526
 527        trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
 528                           data_block_counter, index);
 529}
 530
 531static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
 532                            unsigned int payload_length,
 533                            unsigned int *data_blocks,
 534                            unsigned int *data_block_counter, unsigned int *syt)
 535{
 536        u32 cip_header[2];
 537        unsigned int sph;
 538        unsigned int fmt;
 539        unsigned int fdf;
 540        unsigned int dbc;
 541        bool lost;
 542
 543        cip_header[0] = be32_to_cpu(buf[0]);
 544        cip_header[1] = be32_to_cpu(buf[1]);
 545
 546        /*
 547         * This module supports 'Two-quadlet CIP header with SYT field'.
 548         * For convenience, also check FMT field is AM824 or not.
 549         */
 550        if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
 551             ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
 552            (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
 553                dev_info_ratelimited(&s->unit->device,
 554                                "Invalid CIP header for AMDTP: %08X:%08X\n",
 555                                cip_header[0], cip_header[1]);
 556                return -EAGAIN;
 557        }
 558
 559        /* Check valid protocol or not. */
 560        sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
 561        fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
 562        if (sph != s->sph || fmt != s->fmt) {
 563                dev_info_ratelimited(&s->unit->device,
 564                                     "Detect unexpected protocol: %08x %08x\n",
 565                                     cip_header[0], cip_header[1]);
 566                return -EAGAIN;
 567        }
 568
 569        /* Calculate data blocks */
 570        fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
 571        if (payload_length < sizeof(__be32) * 2 ||
 572            (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
 573                *data_blocks = 0;
 574        } else {
 575                unsigned int data_block_quadlets =
 576                                (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
 577                /* avoid division by zero */
 578                if (data_block_quadlets == 0) {
 579                        dev_err(&s->unit->device,
 580                                "Detect invalid value in dbs field: %08X\n",
 581                                cip_header[0]);
 582                        return -EPROTO;
 583                }
 584                if (s->flags & CIP_WRONG_DBS)
 585                        data_block_quadlets = s->data_block_quadlets;
 586
 587                *data_blocks = (payload_length / sizeof(__be32) - 2) /
 588                                                        data_block_quadlets;
 589        }
 590
 591        /* Check data block counter continuity */
 592        dbc = cip_header[0] & CIP_DBC_MASK;
 593        if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
 594            *data_block_counter != UINT_MAX)
 595                dbc = *data_block_counter;
 596
 597        if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
 598            *data_block_counter == UINT_MAX) {
 599                lost = false;
 600        } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
 601                lost = dbc != *data_block_counter;
 602        } else {
 603                unsigned int dbc_interval;
 604
 605                if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
 606                        dbc_interval = s->ctx_data.tx.dbc_interval;
 607                else
 608                        dbc_interval = *data_blocks;
 609
 610                lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
 611        }
 612
 613        if (lost) {
 614                dev_err(&s->unit->device,
 615                        "Detect discontinuity of CIP: %02X %02X\n",
 616                        *data_block_counter, dbc);
 617                return -EIO;
 618        }
 619
 620        *data_block_counter = dbc;
 621
 622        *syt = cip_header[1] & CIP_SYT_MASK;
 623
 624        return 0;
 625}
 626
 627static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
 628                               const __be32 *ctx_header,
 629                               unsigned int *payload_length,
 630                               unsigned int *data_blocks,
 631                               unsigned int *data_block_counter,
 632                               unsigned int *syt, unsigned int index)
 633{
 634        const __be32 *cip_header;
 635        int err;
 636
 637        *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
 638        if (*payload_length > s->ctx_data.tx.ctx_header_size +
 639                                        s->ctx_data.tx.max_ctx_payload_length) {
 640                dev_err(&s->unit->device,
 641                        "Detect jumbo payload: %04x %04x\n",
 642                        *payload_length, s->ctx_data.tx.max_ctx_payload_length);
 643                return -EIO;
 644        }
 645
 646        if (!(s->flags & CIP_NO_HEADER)) {
 647                cip_header = ctx_header + 2;
 648                err = check_cip_header(s, cip_header, *payload_length,
 649                                       data_blocks, data_block_counter, syt);
 650                if (err < 0)
 651                        return err;
 652        } else {
 653                cip_header = NULL;
 654                err = 0;
 655                *data_blocks = *payload_length / sizeof(__be32) /
 656                               s->data_block_quadlets;
 657                *syt = 0;
 658
 659                if (*data_block_counter == UINT_MAX)
 660                        *data_block_counter = 0;
 661        }
 662
 663        trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
 664                           *data_block_counter, index);
 665
 666        return err;
 667}
 668
 669// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
 670// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
 671// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
 672static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
 673{
 674        u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
 675        return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
 676}
 677
 678static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
 679{
 680        cycle += addend;
 681        if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
 682                cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
 683        return cycle;
 684}
 685
 686// Align to actual cycle count for the packet which is going to be scheduled.
 687// This module queued the same number of isochronous cycle as the size of queue
 688// to kip isochronous cycle, therefore it's OK to just increment the cycle by
 689// the size of queue for scheduled cycle.
 690static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
 691                                   unsigned int queue_size)
 692{
 693        u32 cycle = compute_cycle_count(ctx_header_tstamp);
 694        return increment_cycle_count(cycle, queue_size);
 695}
 696
 697static int generate_device_pkt_descs(struct amdtp_stream *s,
 698                                     struct pkt_desc *descs,
 699                                     const __be32 *ctx_header,
 700                                     unsigned int packets)
 701{
 702        unsigned int dbc = s->data_block_counter;
 703        int i;
 704        int err;
 705
 706        for (i = 0; i < packets; ++i) {
 707                struct pkt_desc *desc = descs + i;
 708                unsigned int index = (s->packet_index + i) % s->queue_size;
 709                unsigned int cycle;
 710                unsigned int payload_length;
 711                unsigned int data_blocks;
 712                unsigned int syt;
 713
 714                cycle = compute_cycle_count(ctx_header[1]);
 715
 716                err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
 717                                          &data_blocks, &dbc, &syt, i);
 718                if (err < 0)
 719                        return err;
 720
 721                desc->cycle = cycle;
 722                desc->syt = syt;
 723                desc->data_blocks = data_blocks;
 724                desc->data_block_counter = dbc;
 725                desc->ctx_payload = s->buffer.packets[index].buffer;
 726
 727                if (!(s->flags & CIP_DBC_IS_END_EVENT))
 728                        dbc = (dbc + desc->data_blocks) & 0xff;
 729
 730                ctx_header +=
 731                        s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
 732        }
 733
 734        s->data_block_counter = dbc;
 735
 736        return 0;
 737}
 738
 739static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
 740                                unsigned int transfer_delay)
 741{
 742        unsigned int syt;
 743
 744        syt_offset += transfer_delay;
 745        syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
 746              (syt_offset % TICKS_PER_CYCLE);
 747        return syt & CIP_SYT_MASK;
 748}
 749
 750static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
 751                               const __be32 *ctx_header, unsigned int packets,
 752                               const struct seq_desc *seq_descs,
 753                               unsigned int seq_size)
 754{
 755        unsigned int dbc = s->data_block_counter;
 756        unsigned int seq_index = s->ctx_data.rx.seq_index;
 757        int i;
 758
 759        for (i = 0; i < packets; ++i) {
 760                struct pkt_desc *desc = descs + i;
 761                unsigned int index = (s->packet_index + i) % s->queue_size;
 762                const struct seq_desc *seq = seq_descs + seq_index;
 763                unsigned int syt;
 764
 765                desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
 766
 767                syt = seq->syt_offset;
 768                if (syt != CIP_SYT_NO_INFO) {
 769                        syt = compute_syt(syt, desc->cycle,
 770                                          s->ctx_data.rx.transfer_delay);
 771                }
 772                desc->syt = syt;
 773                desc->data_blocks = seq->data_blocks;
 774
 775                if (s->flags & CIP_DBC_IS_END_EVENT)
 776                        dbc = (dbc + desc->data_blocks) & 0xff;
 777
 778                desc->data_block_counter = dbc;
 779
 780                if (!(s->flags & CIP_DBC_IS_END_EVENT))
 781                        dbc = (dbc + desc->data_blocks) & 0xff;
 782
 783                desc->ctx_payload = s->buffer.packets[index].buffer;
 784
 785                seq_index = (seq_index + 1) % seq_size;
 786
 787                ++ctx_header;
 788        }
 789
 790        s->data_block_counter = dbc;
 791        s->ctx_data.rx.seq_index = seq_index;
 792}
 793
 794static inline void cancel_stream(struct amdtp_stream *s)
 795{
 796        s->packet_index = -1;
 797        if (in_interrupt())
 798                amdtp_stream_pcm_abort(s);
 799        WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 800}
 801
 802static void process_ctx_payloads(struct amdtp_stream *s,
 803                                 const struct pkt_desc *descs,
 804                                 unsigned int packets)
 805{
 806        struct snd_pcm_substream *pcm;
 807        unsigned int pcm_frames;
 808
 809        pcm = READ_ONCE(s->pcm);
 810        pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
 811        if (pcm)
 812                update_pcm_pointers(s, pcm, pcm_frames);
 813}
 814
 815static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
 816                                size_t header_length, void *header,
 817                                void *private_data)
 818{
 819        struct amdtp_stream *s = private_data;
 820        const struct amdtp_domain *d = s->domain;
 821        const __be32 *ctx_header = header;
 822        unsigned int events_per_period = s->ctx_data.rx.events_per_period;
 823        unsigned int event_count = s->ctx_data.rx.event_count;
 824        unsigned int packets;
 825        int i;
 826
 827        if (s->packet_index < 0)
 828                return;
 829
 830        // Calculate the number of packets in buffer and check XRUN.
 831        packets = header_length / sizeof(*ctx_header);
 832
 833        generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
 834                           d->seq_size);
 835
 836        process_ctx_payloads(s, s->pkt_descs, packets);
 837
 838        for (i = 0; i < packets; ++i) {
 839                const struct pkt_desc *desc = s->pkt_descs + i;
 840                unsigned int syt;
 841                struct {
 842                        struct fw_iso_packet params;
 843                        __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
 844                } template = { {0}, {0} };
 845                bool sched_irq = false;
 846
 847                if (s->ctx_data.rx.syt_override < 0)
 848                        syt = desc->syt;
 849                else
 850                        syt = s->ctx_data.rx.syt_override;
 851
 852                build_it_pkt_header(s, desc->cycle, &template.params,
 853                                    desc->data_blocks, desc->data_block_counter,
 854                                    syt, i);
 855
 856                if (s == s->domain->irq_target) {
 857                        event_count += desc->data_blocks;
 858                        if (event_count >= events_per_period) {
 859                                event_count -= events_per_period;
 860                                sched_irq = true;
 861                        }
 862                }
 863
 864                if (queue_out_packet(s, &template.params, sched_irq) < 0) {
 865                        cancel_stream(s);
 866                        return;
 867                }
 868        }
 869
 870        s->ctx_data.rx.event_count = event_count;
 871}
 872
 873static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
 874                               size_t header_length, void *header,
 875                               void *private_data)
 876{
 877        struct amdtp_stream *s = private_data;
 878        __be32 *ctx_header = header;
 879        unsigned int packets;
 880        int i;
 881        int err;
 882
 883        if (s->packet_index < 0)
 884                return;
 885
 886        // Calculate the number of packets in buffer and check XRUN.
 887        packets = header_length / s->ctx_data.tx.ctx_header_size;
 888
 889        err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
 890        if (err < 0) {
 891                if (err != -EAGAIN) {
 892                        cancel_stream(s);
 893                        return;
 894                }
 895        } else {
 896                process_ctx_payloads(s, s->pkt_descs, packets);
 897        }
 898
 899        for (i = 0; i < packets; ++i) {
 900                struct fw_iso_packet params = {0};
 901
 902                if (queue_in_packet(s, &params) < 0) {
 903                        cancel_stream(s);
 904                        return;
 905                }
 906        }
 907}
 908
 909static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
 910{
 911        struct amdtp_stream *irq_target = d->irq_target;
 912        unsigned int seq_tail = d->seq_tail;
 913        unsigned int seq_size = d->seq_size;
 914        unsigned int min_avail;
 915        struct amdtp_stream *s;
 916
 917        min_avail = d->seq_size;
 918        list_for_each_entry(s, &d->streams, list) {
 919                unsigned int seq_index;
 920                unsigned int avail;
 921
 922                if (s->direction == AMDTP_IN_STREAM)
 923                        continue;
 924
 925                seq_index = s->ctx_data.rx.seq_index;
 926                avail = d->seq_tail;
 927                if (seq_index > avail)
 928                        avail += d->seq_size;
 929                avail -= seq_index;
 930
 931                if (avail < min_avail)
 932                        min_avail = avail;
 933        }
 934
 935        while (min_avail < packets) {
 936                struct seq_desc *desc = d->seq_descs + seq_tail;
 937
 938                desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
 939                                        &d->syt_offset_state, irq_target->sfc);
 940                desc->data_blocks = calculate_data_blocks(&d->data_block_state,
 941                                !!(irq_target->flags & CIP_BLOCKING),
 942                                desc->syt_offset == CIP_SYT_NO_INFO,
 943                                irq_target->syt_interval, irq_target->sfc);
 944
 945                ++seq_tail;
 946                seq_tail %= seq_size;
 947
 948                ++min_avail;
 949        }
 950
 951        d->seq_tail = seq_tail;
 952}
 953
 954static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
 955                                size_t header_length, void *header,
 956                                void *private_data)
 957{
 958        struct amdtp_stream *irq_target = private_data;
 959        struct amdtp_domain *d = irq_target->domain;
 960        unsigned int packets = header_length / sizeof(__be32);
 961        struct amdtp_stream *s;
 962
 963        // Record enough entries with extra 3 cycles at least.
 964        pool_ideal_seq_descs(d, packets + 3);
 965
 966        out_stream_callback(context, tstamp, header_length, header, irq_target);
 967        if (amdtp_streaming_error(irq_target))
 968                goto error;
 969
 970        list_for_each_entry(s, &d->streams, list) {
 971                if (s != irq_target && amdtp_stream_running(s)) {
 972                        fw_iso_context_flush_completions(s->context);
 973                        if (amdtp_streaming_error(s))
 974                                goto error;
 975                }
 976        }
 977
 978        return;
 979error:
 980        if (amdtp_stream_running(irq_target))
 981                cancel_stream(irq_target);
 982
 983        list_for_each_entry(s, &d->streams, list) {
 984                if (amdtp_stream_running(s))
 985                        cancel_stream(s);
 986        }
 987}
 988
 989// this is executed one time.
 990static void amdtp_stream_first_callback(struct fw_iso_context *context,
 991                                        u32 tstamp, size_t header_length,
 992                                        void *header, void *private_data)
 993{
 994        struct amdtp_stream *s = private_data;
 995        const __be32 *ctx_header = header;
 996        u32 cycle;
 997
 998        /*
 999         * For in-stream, first packet has come.
1000         * For out-stream, prepared to transmit first packet
1001         */
1002        s->callbacked = true;
1003        wake_up(&s->callback_wait);
1004
1005        if (s->direction == AMDTP_IN_STREAM) {
1006                cycle = compute_cycle_count(ctx_header[1]);
1007
1008                context->callback.sc = in_stream_callback;
1009        } else {
1010                cycle = compute_it_cycle(*ctx_header, s->queue_size);
1011
1012                if (s == s->domain->irq_target)
1013                        context->callback.sc = irq_target_callback;
1014                else
1015                        context->callback.sc = out_stream_callback;
1016        }
1017
1018        s->start_cycle = cycle;
1019
1020        context->callback.sc(context, tstamp, header_length, header, s);
1021}
1022
1023/**
1024 * amdtp_stream_start - start transferring packets
1025 * @s: the AMDTP stream to start
1026 * @channel: the isochronous channel on the bus
1027 * @speed: firewire speed code
1028 * @start_cycle: the isochronous cycle to start the context. Start immediately
1029 *               if negative value is given.
1030 * @queue_size: The number of packets in the queue.
1031 * @idle_irq_interval: the interval to queue packet during initial state.
1032 *
1033 * The stream cannot be started until it has been configured with
1034 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1035 * device can be started.
1036 */
1037static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1038                              int start_cycle, unsigned int queue_size,
1039                              unsigned int idle_irq_interval)
1040{
1041        bool is_irq_target = (s == s->domain->irq_target);
1042        unsigned int ctx_header_size;
1043        unsigned int max_ctx_payload_size;
1044        enum dma_data_direction dir;
1045        int type, tag, err;
1046
1047        mutex_lock(&s->mutex);
1048
1049        if (WARN_ON(amdtp_stream_running(s) ||
1050                    (s->data_block_quadlets < 1))) {
1051                err = -EBADFD;
1052                goto err_unlock;
1053        }
1054
1055        if (s->direction == AMDTP_IN_STREAM) {
1056                // NOTE: IT context should be used for constant IRQ.
1057                if (is_irq_target) {
1058                        err = -EINVAL;
1059                        goto err_unlock;
1060                }
1061
1062                s->data_block_counter = UINT_MAX;
1063        } else {
1064                s->data_block_counter = 0;
1065        }
1066
1067        /* initialize packet buffer */
1068        if (s->direction == AMDTP_IN_STREAM) {
1069                dir = DMA_FROM_DEVICE;
1070                type = FW_ISO_CONTEXT_RECEIVE;
1071                if (!(s->flags & CIP_NO_HEADER))
1072                        ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1073                else
1074                        ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1075
1076                max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
1077                                       ctx_header_size;
1078        } else {
1079                dir = DMA_TO_DEVICE;
1080                type = FW_ISO_CONTEXT_TRANSMIT;
1081                ctx_header_size = 0;    // No effect for IT context.
1082
1083                max_ctx_payload_size = amdtp_stream_get_max_payload(s);
1084                if (!(s->flags & CIP_NO_HEADER))
1085                        max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
1086        }
1087
1088        err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
1089                                      max_ctx_payload_size, dir);
1090        if (err < 0)
1091                goto err_unlock;
1092        s->queue_size = queue_size;
1093
1094        s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1095                                          type, channel, speed, ctx_header_size,
1096                                          amdtp_stream_first_callback, s);
1097        if (IS_ERR(s->context)) {
1098                err = PTR_ERR(s->context);
1099                if (err == -EBUSY)
1100                        dev_err(&s->unit->device,
1101                                "no free stream on this controller\n");
1102                goto err_buffer;
1103        }
1104
1105        amdtp_stream_update(s);
1106
1107        if (s->direction == AMDTP_IN_STREAM) {
1108                s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1109                s->ctx_data.tx.ctx_header_size = ctx_header_size;
1110        }
1111
1112        if (s->flags & CIP_NO_HEADER)
1113                s->tag = TAG_NO_CIP_HEADER;
1114        else
1115                s->tag = TAG_CIP;
1116
1117        s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1118                               GFP_KERNEL);
1119        if (!s->pkt_descs) {
1120                err = -ENOMEM;
1121                goto err_context;
1122        }
1123
1124        s->packet_index = 0;
1125        do {
1126                struct fw_iso_packet params;
1127
1128                if (s->direction == AMDTP_IN_STREAM) {
1129                        err = queue_in_packet(s, &params);
1130                } else {
1131                        bool sched_irq = false;
1132
1133                        params.header_length = 0;
1134                        params.payload_length = 0;
1135
1136                        if (is_irq_target) {
1137                                sched_irq = !((s->packet_index + 1) %
1138                                              idle_irq_interval);
1139                        }
1140
1141                        err = queue_out_packet(s, &params, sched_irq);
1142                }
1143                if (err < 0)
1144                        goto err_pkt_descs;
1145        } while (s->packet_index > 0);
1146
1147        /* NOTE: TAG1 matches CIP. This just affects in stream. */
1148        tag = FW_ISO_CONTEXT_MATCH_TAG1;
1149        if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1150                tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1151
1152        s->callbacked = false;
1153        err = fw_iso_context_start(s->context, start_cycle, 0, tag);
1154        if (err < 0)
1155                goto err_pkt_descs;
1156
1157        mutex_unlock(&s->mutex);
1158
1159        return 0;
1160err_pkt_descs:
1161        kfree(s->pkt_descs);
1162err_context:
1163        fw_iso_context_destroy(s->context);
1164        s->context = ERR_PTR(-1);
1165err_buffer:
1166        iso_packets_buffer_destroy(&s->buffer, s->unit);
1167err_unlock:
1168        mutex_unlock(&s->mutex);
1169
1170        return err;
1171}
1172
1173/**
1174 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1175 * @d: the AMDTP domain.
1176 * @s: the AMDTP stream that transports the PCM data
1177 *
1178 * Returns the current buffer position, in frames.
1179 */
1180unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1181                                              struct amdtp_stream *s)
1182{
1183        struct amdtp_stream *irq_target = d->irq_target;
1184
1185        if (irq_target && amdtp_stream_running(irq_target)) {
1186                // This function is called in software IRQ context of
1187                // period_tasklet or process context.
1188                //
1189                // When the software IRQ context was scheduled by software IRQ
1190                // context of IT contexts, queued packets were already handled.
1191                // Therefore, no need to flush the queue in buffer furthermore.
1192                //
1193                // When the process context reach here, some packets will be
1194                // already queued in the buffer. These packets should be handled
1195                // immediately to keep better granularity of PCM pointer.
1196                //
1197                // Later, the process context will sometimes schedules software
1198                // IRQ context of the period_tasklet. Then, no need to flush the
1199                // queue by the same reason as described in the above
1200                if (!in_interrupt()) {
1201                        // Queued packet should be processed without any kernel
1202                        // preemption to keep latency against bus cycle.
1203                        preempt_disable();
1204                        fw_iso_context_flush_completions(irq_target->context);
1205                        preempt_enable();
1206                }
1207        }
1208
1209        return READ_ONCE(s->pcm_buffer_pointer);
1210}
1211EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1212
1213/**
1214 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1215 * @d: the AMDTP domain.
1216 * @s: the AMDTP stream that transfers the PCM frames
1217 *
1218 * Returns zero always.
1219 */
1220int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1221{
1222        struct amdtp_stream *irq_target = d->irq_target;
1223
1224        // Process isochronous packets for recent isochronous cycle to handle
1225        // queued PCM frames.
1226        if (irq_target && amdtp_stream_running(irq_target)) {
1227                // Queued packet should be processed without any kernel
1228                // preemption to keep latency against bus cycle.
1229                preempt_disable();
1230                fw_iso_context_flush_completions(irq_target->context);
1231                preempt_enable();
1232        }
1233
1234        return 0;
1235}
1236EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1237
1238/**
1239 * amdtp_stream_update - update the stream after a bus reset
1240 * @s: the AMDTP stream
1241 */
1242void amdtp_stream_update(struct amdtp_stream *s)
1243{
1244        /* Precomputing. */
1245        WRITE_ONCE(s->source_node_id_field,
1246                   (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1247}
1248EXPORT_SYMBOL(amdtp_stream_update);
1249
1250/**
1251 * amdtp_stream_stop - stop sending packets
1252 * @s: the AMDTP stream to stop
1253 *
1254 * All PCM and MIDI devices of the stream must be stopped before the stream
1255 * itself can be stopped.
1256 */
1257static void amdtp_stream_stop(struct amdtp_stream *s)
1258{
1259        mutex_lock(&s->mutex);
1260
1261        if (!amdtp_stream_running(s)) {
1262                mutex_unlock(&s->mutex);
1263                return;
1264        }
1265
1266        tasklet_kill(&s->period_tasklet);
1267        fw_iso_context_stop(s->context);
1268        fw_iso_context_destroy(s->context);
1269        s->context = ERR_PTR(-1);
1270        iso_packets_buffer_destroy(&s->buffer, s->unit);
1271        kfree(s->pkt_descs);
1272
1273        s->callbacked = false;
1274
1275        mutex_unlock(&s->mutex);
1276}
1277
1278/**
1279 * amdtp_stream_pcm_abort - abort the running PCM device
1280 * @s: the AMDTP stream about to be stopped
1281 *
1282 * If the isochronous stream needs to be stopped asynchronously, call this
1283 * function first to stop the PCM device.
1284 */
1285void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1286{
1287        struct snd_pcm_substream *pcm;
1288
1289        pcm = READ_ONCE(s->pcm);
1290        if (pcm)
1291                snd_pcm_stop_xrun(pcm);
1292}
1293EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1294
1295/**
1296 * amdtp_domain_init - initialize an AMDTP domain structure
1297 * @d: the AMDTP domain to initialize.
1298 */
1299int amdtp_domain_init(struct amdtp_domain *d)
1300{
1301        INIT_LIST_HEAD(&d->streams);
1302
1303        d->events_per_period = 0;
1304
1305        d->seq_descs = NULL;
1306
1307        return 0;
1308}
1309EXPORT_SYMBOL_GPL(amdtp_domain_init);
1310
1311/**
1312 * amdtp_domain_destroy - destroy an AMDTP domain structure
1313 * @d: the AMDTP domain to destroy.
1314 */
1315void amdtp_domain_destroy(struct amdtp_domain *d)
1316{
1317        // At present nothing to do.
1318        return;
1319}
1320EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1321
1322/**
1323 * amdtp_domain_add_stream - register isoc context into the domain.
1324 * @d: the AMDTP domain.
1325 * @s: the AMDTP stream.
1326 * @channel: the isochronous channel on the bus.
1327 * @speed: firewire speed code.
1328 */
1329int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1330                            int channel, int speed)
1331{
1332        struct amdtp_stream *tmp;
1333
1334        list_for_each_entry(tmp, &d->streams, list) {
1335                if (s == tmp)
1336                        return -EBUSY;
1337        }
1338
1339        list_add(&s->list, &d->streams);
1340
1341        s->channel = channel;
1342        s->speed = speed;
1343        s->domain = d;
1344
1345        return 0;
1346}
1347EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1348
1349static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
1350{
1351        int generation;
1352        int rcode;
1353        __be32 reg;
1354        u32 data;
1355
1356        // This is a request to local 1394 OHCI controller and expected to
1357        // complete without any event waiting.
1358        generation = fw_card->generation;
1359        smp_rmb();      // node_id vs. generation.
1360        rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
1361                                   fw_card->node_id, generation, SCODE_100,
1362                                   CSR_REGISTER_BASE + CSR_CYCLE_TIME,
1363                                   &reg, sizeof(reg));
1364        if (rcode != RCODE_COMPLETE)
1365                return -EIO;
1366
1367        data = be32_to_cpu(reg);
1368        *cur_cycle = data >> 12;
1369
1370        return 0;
1371}
1372
1373/**
1374 * amdtp_domain_start - start sending packets for isoc context in the domain.
1375 * @d: the AMDTP domain.
1376 * @ir_delay_cycle: the cycle delay to start all IR contexts.
1377 */
1378int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
1379{
1380        static const struct {
1381                unsigned int data_block;
1382                unsigned int syt_offset;
1383        } *entry, initial_state[] = {
1384                [CIP_SFC_32000]  = {  4, 3072 },
1385                [CIP_SFC_48000]  = {  6, 1024 },
1386                [CIP_SFC_96000]  = { 12, 1024 },
1387                [CIP_SFC_192000] = { 24, 1024 },
1388                [CIP_SFC_44100]  = {  0,   67 },
1389                [CIP_SFC_88200]  = {  0,   67 },
1390                [CIP_SFC_176400] = {  0,   67 },
1391        };
1392        unsigned int events_per_buffer = d->events_per_buffer;
1393        unsigned int events_per_period = d->events_per_period;
1394        unsigned int idle_irq_interval;
1395        unsigned int queue_size;
1396        struct amdtp_stream *s;
1397        int cycle;
1398        int err;
1399
1400        // Select an IT context as IRQ target.
1401        list_for_each_entry(s, &d->streams, list) {
1402                if (s->direction == AMDTP_OUT_STREAM)
1403                        break;
1404        }
1405        if (!s)
1406                return -ENXIO;
1407        d->irq_target = s;
1408
1409        // This is a case that AMDTP streams in domain run just for MIDI
1410        // substream. Use the number of events equivalent to 10 msec as
1411        // interval of hardware IRQ.
1412        if (events_per_period == 0)
1413                events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1414        if (events_per_buffer == 0)
1415                events_per_buffer = events_per_period * 3;
1416
1417        queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1418                                  amdtp_rate_table[d->irq_target->sfc]);
1419
1420        d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
1421        if (!d->seq_descs)
1422                return -ENOMEM;
1423        d->seq_size = queue_size;
1424        d->seq_tail = 0;
1425
1426        entry = &initial_state[s->sfc];
1427        d->data_block_state = entry->data_block;
1428        d->syt_offset_state = entry->syt_offset;
1429        d->last_syt_offset = TICKS_PER_CYCLE;
1430
1431        if (ir_delay_cycle > 0) {
1432                struct fw_card *fw_card = fw_parent_device(s->unit)->card;
1433
1434                err = get_current_cycle_time(fw_card, &cycle);
1435                if (err < 0)
1436                        goto error;
1437
1438                // No need to care overflow in cycle field because of enough
1439                // width.
1440                cycle += ir_delay_cycle;
1441
1442                // Round up to sec field.
1443                if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
1444                        unsigned int sec;
1445
1446                        // The sec field can overflow.
1447                        sec = (cycle & 0xffffe000) >> 13;
1448                        cycle = (++sec << 13) |
1449                                ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
1450                }
1451
1452                // In OHCI 1394 specification, lower 2 bits are available for
1453                // sec field.
1454                cycle &= 0x00007fff;
1455        } else {
1456                cycle = -1;
1457        }
1458
1459        list_for_each_entry(s, &d->streams, list) {
1460                int cycle_match;
1461
1462                if (s->direction == AMDTP_IN_STREAM) {
1463                        cycle_match = cycle;
1464                } else {
1465                        // IT context starts immediately.
1466                        cycle_match = -1;
1467                        s->ctx_data.rx.seq_index = 0;
1468                }
1469
1470                if (s != d->irq_target) {
1471                        err = amdtp_stream_start(s, s->channel, s->speed,
1472                                                 cycle_match, queue_size, 0);
1473                        if (err < 0)
1474                                goto error;
1475                }
1476        }
1477
1478        s = d->irq_target;
1479        s->ctx_data.rx.events_per_period = events_per_period;
1480        s->ctx_data.rx.event_count = 0;
1481        s->ctx_data.rx.seq_index = 0;
1482
1483        idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1484                                         amdtp_rate_table[d->irq_target->sfc]);
1485        err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
1486                                 idle_irq_interval);
1487        if (err < 0)
1488                goto error;
1489
1490        return 0;
1491error:
1492        list_for_each_entry(s, &d->streams, list)
1493                amdtp_stream_stop(s);
1494        kfree(d->seq_descs);
1495        d->seq_descs = NULL;
1496        return err;
1497}
1498EXPORT_SYMBOL_GPL(amdtp_domain_start);
1499
1500/**
1501 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1502 * @d: the AMDTP domain to which the isoc contexts belong.
1503 */
1504void amdtp_domain_stop(struct amdtp_domain *d)
1505{
1506        struct amdtp_stream *s, *next;
1507
1508        if (d->irq_target)
1509                amdtp_stream_stop(d->irq_target);
1510
1511        list_for_each_entry_safe(s, next, &d->streams, list) {
1512                list_del(&s->list);
1513
1514                if (s != d->irq_target)
1515                        amdtp_stream_stop(s);
1516        }
1517
1518        d->events_per_period = 0;
1519        d->irq_target = NULL;
1520
1521        kfree(d->seq_descs);
1522        d->seq_descs = NULL;
1523}
1524EXPORT_SYMBOL_GPL(amdtp_domain_stop);
1525