1
2
3
4
5
6
7
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/firewire.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <sound/pcm.h>
15#include <sound/pcm_params.h>
16#include "amdtp-stream.h"
17
18#define TICKS_PER_CYCLE 3072
19#define CYCLES_PER_SECOND 8000
20#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
21
22
23#define CREATE_TRACE_POINTS
24#include "amdtp-stream-trace.h"
25
26#define TRANSFER_DELAY_TICKS 0x2e00
27
28
29#define ISO_DATA_LENGTH_SHIFT 16
30#define TAG_NO_CIP_HEADER 0
31#define TAG_CIP 1
32
33
34#define CIP_EOH_SHIFT 31
35#define CIP_EOH (1u << CIP_EOH_SHIFT)
36#define CIP_EOH_MASK 0x80000000
37#define CIP_SID_SHIFT 24
38#define CIP_SID_MASK 0x3f000000
39#define CIP_DBS_MASK 0x00ff0000
40#define CIP_DBS_SHIFT 16
41#define CIP_SPH_MASK 0x00000400
42#define CIP_SPH_SHIFT 10
43#define CIP_DBC_MASK 0x000000ff
44#define CIP_FMT_SHIFT 24
45#define CIP_FMT_MASK 0x3f000000
46#define CIP_FDF_MASK 0x00ff0000
47#define CIP_FDF_SHIFT 16
48#define CIP_SYT_MASK 0x0000ffff
49#define CIP_SYT_NO_INFO 0xffff
50
51
52#define CIP_FMT_AM 0x10
53#define AMDTP_FDF_NO_DATA 0xff
54
55
56#define INTERRUPT_INTERVAL 16
57#define QUEUE_LENGTH 48
58
59#define IN_PACKET_HEADER_SIZE 4
60#define OUT_PACKET_HEADER_SIZE 0
61
62static void pcm_period_tasklet(unsigned long data);
63
64
65
66
67
68
69
70
71
72
73
74int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
75 enum amdtp_stream_direction dir, enum cip_flags flags,
76 unsigned int fmt,
77 amdtp_stream_process_data_blocks_t process_data_blocks,
78 unsigned int protocol_size)
79{
80 if (process_data_blocks == NULL)
81 return -EINVAL;
82
83 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
84 if (!s->protocol)
85 return -ENOMEM;
86
87 s->unit = unit;
88 s->direction = dir;
89 s->flags = flags;
90 s->context = ERR_PTR(-1);
91 mutex_init(&s->mutex);
92 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
93 s->packet_index = 0;
94
95 init_waitqueue_head(&s->callback_wait);
96 s->callbacked = false;
97
98 s->fmt = fmt;
99 s->process_data_blocks = process_data_blocks;
100
101 return 0;
102}
103EXPORT_SYMBOL(amdtp_stream_init);
104
105
106
107
108
109void amdtp_stream_destroy(struct amdtp_stream *s)
110{
111
112 if (s->protocol == NULL)
113 return;
114
115 WARN_ON(amdtp_stream_running(s));
116 kfree(s->protocol);
117 mutex_destroy(&s->mutex);
118}
119EXPORT_SYMBOL(amdtp_stream_destroy);
120
121const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
122 [CIP_SFC_32000] = 8,
123 [CIP_SFC_44100] = 8,
124 [CIP_SFC_48000] = 8,
125 [CIP_SFC_88200] = 16,
126 [CIP_SFC_96000] = 16,
127 [CIP_SFC_176400] = 32,
128 [CIP_SFC_192000] = 32,
129};
130EXPORT_SYMBOL(amdtp_syt_intervals);
131
132const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
133 [CIP_SFC_32000] = 32000,
134 [CIP_SFC_44100] = 44100,
135 [CIP_SFC_48000] = 48000,
136 [CIP_SFC_88200] = 88200,
137 [CIP_SFC_96000] = 96000,
138 [CIP_SFC_176400] = 176400,
139 [CIP_SFC_192000] = 192000,
140};
141EXPORT_SYMBOL(amdtp_rate_table);
142
143static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
144 struct snd_pcm_hw_rule *rule)
145{
146 struct snd_interval *s = hw_param_interval(params, rule->var);
147 const struct snd_interval *r =
148 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
149 struct snd_interval t = {0};
150 unsigned int step = 0;
151 int i;
152
153 for (i = 0; i < CIP_SFC_COUNT; ++i) {
154 if (snd_interval_test(r, amdtp_rate_table[i]))
155 step = max(step, amdtp_syt_intervals[i]);
156 }
157
158 t.min = roundup(s->min, step);
159 t.max = rounddown(s->max, step);
160 t.integer = 1;
161
162 return snd_interval_refine(s, &t);
163}
164
165
166
167
168
169
170int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
171 struct snd_pcm_runtime *runtime)
172{
173 struct snd_pcm_hardware *hw = &runtime->hw;
174 int err;
175
176 hw->info = SNDRV_PCM_INFO_BATCH |
177 SNDRV_PCM_INFO_BLOCK_TRANSFER |
178 SNDRV_PCM_INFO_INTERLEAVED |
179 SNDRV_PCM_INFO_JOINT_DUPLEX |
180 SNDRV_PCM_INFO_MMAP |
181 SNDRV_PCM_INFO_MMAP_VALID;
182
183
184 hw->periods_min = 2;
185 hw->periods_max = UINT_MAX;
186
187
188 hw->period_bytes_min = 4 * hw->channels_max;
189
190
191 hw->period_bytes_max = hw->period_bytes_min * 2048;
192 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
193
194
195
196
197
198
199
200
201
202
203
204 err = snd_pcm_hw_constraint_minmax(runtime,
205 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
206 5000, UINT_MAX);
207 if (err < 0)
208 goto end;
209
210
211 if (!(s->flags & CIP_BLOCKING))
212 goto end;
213
214
215
216
217
218
219
220 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
221 apply_constraint_to_size, NULL,
222 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
223 SNDRV_PCM_HW_PARAM_RATE, -1);
224 if (err < 0)
225 goto end;
226 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
227 apply_constraint_to_size, NULL,
228 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
229 SNDRV_PCM_HW_PARAM_RATE, -1);
230 if (err < 0)
231 goto end;
232end:
233 return err;
234}
235EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
236
237
238
239
240
241
242
243
244
245
246int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
247 unsigned int data_block_quadlets)
248{
249 unsigned int sfc;
250
251 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
252 if (amdtp_rate_table[sfc] == rate)
253 break;
254 }
255 if (sfc == ARRAY_SIZE(amdtp_rate_table))
256 return -EINVAL;
257
258 s->sfc = sfc;
259 s->data_block_quadlets = data_block_quadlets;
260 s->syt_interval = amdtp_syt_intervals[sfc];
261
262
263 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
264 if (s->flags & CIP_BLOCKING)
265
266 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
267
268 return 0;
269}
270EXPORT_SYMBOL(amdtp_stream_set_parameters);
271
272
273
274
275
276
277
278
279unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
280{
281 unsigned int multiplier = 1;
282 unsigned int header_size = 0;
283
284 if (s->flags & CIP_JUMBO_PAYLOAD)
285 multiplier = 5;
286 if (!(s->flags & CIP_NO_HEADER))
287 header_size = 8;
288
289 return header_size +
290 s->syt_interval * s->data_block_quadlets * 4 * multiplier;
291}
292EXPORT_SYMBOL(amdtp_stream_get_max_payload);
293
294
295
296
297
298
299
300void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
301{
302 tasklet_kill(&s->period_tasklet);
303 s->pcm_buffer_pointer = 0;
304 s->pcm_period_pointer = 0;
305}
306EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
307
308static unsigned int calculate_data_blocks(struct amdtp_stream *s,
309 unsigned int syt)
310{
311 unsigned int phase, data_blocks;
312
313
314 if (s->flags & CIP_BLOCKING) {
315
316 if (syt == CIP_SYT_NO_INFO)
317 data_blocks = 0;
318 else
319 data_blocks = s->syt_interval;
320
321 } else {
322 if (!cip_sfc_is_base_44100(s->sfc)) {
323
324 data_blocks = s->data_block_state;
325 } else {
326 phase = s->data_block_state;
327
328
329
330
331
332
333
334
335
336 if (s->sfc == CIP_SFC_44100)
337
338 data_blocks = 5 + ((phase & 1) ^
339 (phase == 0 || phase >= 40));
340 else
341
342 data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
343 if (++phase >= (80 >> (s->sfc >> 1)))
344 phase = 0;
345 s->data_block_state = phase;
346 }
347 }
348
349 return data_blocks;
350}
351
352static unsigned int calculate_syt(struct amdtp_stream *s,
353 unsigned int cycle)
354{
355 unsigned int syt_offset, phase, index, syt;
356
357 if (s->last_syt_offset < TICKS_PER_CYCLE) {
358 if (!cip_sfc_is_base_44100(s->sfc))
359 syt_offset = s->last_syt_offset + s->syt_offset_state;
360 else {
361
362
363
364
365
366
367
368
369
370
371 phase = s->syt_offset_state;
372 index = phase % 13;
373 syt_offset = s->last_syt_offset;
374 syt_offset += 1386 + ((index && !(index & 3)) ||
375 phase == 146);
376 if (++phase >= 147)
377 phase = 0;
378 s->syt_offset_state = phase;
379 }
380 } else
381 syt_offset = s->last_syt_offset - TICKS_PER_CYCLE;
382 s->last_syt_offset = syt_offset;
383
384 if (syt_offset < TICKS_PER_CYCLE) {
385 syt_offset += s->transfer_delay;
386 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
387 syt += syt_offset % TICKS_PER_CYCLE;
388
389 return syt & CIP_SYT_MASK;
390 } else {
391 return CIP_SYT_NO_INFO;
392 }
393}
394
395static void update_pcm_pointers(struct amdtp_stream *s,
396 struct snd_pcm_substream *pcm,
397 unsigned int frames)
398{
399 unsigned int ptr;
400
401 ptr = s->pcm_buffer_pointer + frames;
402 if (ptr >= pcm->runtime->buffer_size)
403 ptr -= pcm->runtime->buffer_size;
404 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
405
406 s->pcm_period_pointer += frames;
407 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
408 s->pcm_period_pointer -= pcm->runtime->period_size;
409 tasklet_hi_schedule(&s->period_tasklet);
410 }
411}
412
413static void pcm_period_tasklet(unsigned long data)
414{
415 struct amdtp_stream *s = (void *)data;
416 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
417
418 if (pcm)
419 snd_pcm_period_elapsed(pcm);
420}
421
422static int queue_packet(struct amdtp_stream *s, unsigned int header_length,
423 unsigned int payload_length)
424{
425 struct fw_iso_packet p = {0};
426 int err = 0;
427
428 if (IS_ERR(s->context))
429 goto end;
430
431 p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
432 p.tag = s->tag;
433 p.header_length = header_length;
434 if (payload_length > 0)
435 p.payload_length = payload_length;
436 else
437 p.skip = true;
438 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
439 s->buffer.packets[s->packet_index].offset);
440 if (err < 0) {
441 dev_err(&s->unit->device, "queueing error: %d\n", err);
442 goto end;
443 }
444
445 if (++s->packet_index >= QUEUE_LENGTH)
446 s->packet_index = 0;
447end:
448 return err;
449}
450
451static inline int queue_out_packet(struct amdtp_stream *s,
452 unsigned int payload_length)
453{
454 return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length);
455}
456
457static inline int queue_in_packet(struct amdtp_stream *s)
458{
459 return queue_packet(s, IN_PACKET_HEADER_SIZE, s->max_payload_length);
460}
461
462static int handle_out_packet(struct amdtp_stream *s,
463 unsigned int payload_length, unsigned int cycle,
464 unsigned int index)
465{
466 __be32 *buffer;
467 unsigned int syt;
468 unsigned int data_blocks;
469 unsigned int pcm_frames;
470 struct snd_pcm_substream *pcm;
471
472 buffer = s->buffer.packets[s->packet_index].buffer;
473 syt = calculate_syt(s, cycle);
474 data_blocks = calculate_data_blocks(s, syt);
475 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
476
477 if (s->flags & CIP_DBC_IS_END_EVENT)
478 s->data_block_counter =
479 (s->data_block_counter + data_blocks) & 0xff;
480
481 buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
482 (s->data_block_quadlets << CIP_DBS_SHIFT) |
483 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
484 s->data_block_counter);
485 buffer[1] = cpu_to_be32(CIP_EOH |
486 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
487 ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
488 (syt & CIP_SYT_MASK));
489
490 if (!(s->flags & CIP_DBC_IS_END_EVENT))
491 s->data_block_counter =
492 (s->data_block_counter + data_blocks) & 0xff;
493 payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
494
495 trace_out_packet(s, cycle, buffer, payload_length, index);
496
497 if (queue_out_packet(s, payload_length) < 0)
498 return -EIO;
499
500 pcm = READ_ONCE(s->pcm);
501 if (pcm && pcm_frames > 0)
502 update_pcm_pointers(s, pcm, pcm_frames);
503
504
505 return 0;
506}
507
508static int handle_out_packet_without_header(struct amdtp_stream *s,
509 unsigned int payload_length, unsigned int cycle,
510 unsigned int index)
511{
512 __be32 *buffer;
513 unsigned int syt;
514 unsigned int data_blocks;
515 unsigned int pcm_frames;
516 struct snd_pcm_substream *pcm;
517
518 buffer = s->buffer.packets[s->packet_index].buffer;
519 syt = calculate_syt(s, cycle);
520 data_blocks = calculate_data_blocks(s, syt);
521 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt);
522 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
523
524 payload_length = data_blocks * 4 * s->data_block_quadlets;
525
526 trace_out_packet_without_header(s, cycle, payload_length, data_blocks,
527 index);
528
529 if (queue_out_packet(s, payload_length) < 0)
530 return -EIO;
531
532 pcm = READ_ONCE(s->pcm);
533 if (pcm && pcm_frames > 0)
534 update_pcm_pointers(s, pcm, pcm_frames);
535
536
537 return 0;
538}
539
540static int handle_in_packet(struct amdtp_stream *s,
541 unsigned int payload_length, unsigned int cycle,
542 unsigned int index)
543{
544 __be32 *buffer;
545 u32 cip_header[2];
546 unsigned int sph, fmt, fdf, syt;
547 unsigned int data_block_quadlets, data_block_counter, dbc_interval;
548 unsigned int data_blocks;
549 struct snd_pcm_substream *pcm;
550 unsigned int pcm_frames;
551 bool lost;
552
553 buffer = s->buffer.packets[s->packet_index].buffer;
554 cip_header[0] = be32_to_cpu(buffer[0]);
555 cip_header[1] = be32_to_cpu(buffer[1]);
556
557 trace_in_packet(s, cycle, cip_header, payload_length, index);
558
559
560
561
562
563 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
564 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
565 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
566 dev_info_ratelimited(&s->unit->device,
567 "Invalid CIP header for AMDTP: %08X:%08X\n",
568 cip_header[0], cip_header[1]);
569 data_blocks = 0;
570 pcm_frames = 0;
571 goto end;
572 }
573
574
575 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
576 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
577 if (sph != s->sph || fmt != s->fmt) {
578 dev_info_ratelimited(&s->unit->device,
579 "Detect unexpected protocol: %08x %08x\n",
580 cip_header[0], cip_header[1]);
581 data_blocks = 0;
582 pcm_frames = 0;
583 goto end;
584 }
585
586
587 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
588 if (payload_length < 12 ||
589 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
590 data_blocks = 0;
591 } else {
592 data_block_quadlets =
593 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
594
595 if (data_block_quadlets == 0) {
596 dev_err(&s->unit->device,
597 "Detect invalid value in dbs field: %08X\n",
598 cip_header[0]);
599 return -EPROTO;
600 }
601 if (s->flags & CIP_WRONG_DBS)
602 data_block_quadlets = s->data_block_quadlets;
603
604 data_blocks = (payload_length / 4 - 2) /
605 data_block_quadlets;
606 }
607
608
609 data_block_counter = cip_header[0] & CIP_DBC_MASK;
610 if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
611 s->data_block_counter != UINT_MAX)
612 data_block_counter = s->data_block_counter;
613
614 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
615 data_block_counter == s->tx_first_dbc) ||
616 s->data_block_counter == UINT_MAX) {
617 lost = false;
618 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
619 lost = data_block_counter != s->data_block_counter;
620 } else {
621 if (data_blocks > 0 && s->tx_dbc_interval > 0)
622 dbc_interval = s->tx_dbc_interval;
623 else
624 dbc_interval = data_blocks;
625
626 lost = data_block_counter !=
627 ((s->data_block_counter + dbc_interval) & 0xff);
628 }
629
630 if (lost) {
631 dev_err(&s->unit->device,
632 "Detect discontinuity of CIP: %02X %02X\n",
633 s->data_block_counter, data_block_counter);
634 return -EIO;
635 }
636
637 syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
638 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
639
640 if (s->flags & CIP_DBC_IS_END_EVENT)
641 s->data_block_counter = data_block_counter;
642 else
643 s->data_block_counter =
644 (data_block_counter + data_blocks) & 0xff;
645end:
646 if (queue_in_packet(s) < 0)
647 return -EIO;
648
649 pcm = READ_ONCE(s->pcm);
650 if (pcm && pcm_frames > 0)
651 update_pcm_pointers(s, pcm, pcm_frames);
652
653 return 0;
654}
655
656static int handle_in_packet_without_header(struct amdtp_stream *s,
657 unsigned int payload_length, unsigned int cycle,
658 unsigned int index)
659{
660 __be32 *buffer;
661 unsigned int payload_quadlets;
662 unsigned int data_blocks;
663 struct snd_pcm_substream *pcm;
664 unsigned int pcm_frames;
665
666 buffer = s->buffer.packets[s->packet_index].buffer;
667 payload_quadlets = payload_length / 4;
668 data_blocks = payload_quadlets / s->data_block_quadlets;
669
670 trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
671 index);
672
673 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, NULL);
674 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
675
676 if (queue_in_packet(s) < 0)
677 return -EIO;
678
679 pcm = READ_ONCE(s->pcm);
680 if (pcm && pcm_frames > 0)
681 update_pcm_pointers(s, pcm, pcm_frames);
682
683 return 0;
684}
685
686
687
688
689
690
691static inline u32 compute_cycle_count(u32 tstamp)
692{
693 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
694}
695
696static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
697{
698 cycle += addend;
699 if (cycle >= 8 * CYCLES_PER_SECOND)
700 cycle -= 8 * CYCLES_PER_SECOND;
701 return cycle;
702}
703
704static inline u32 decrement_cycle_count(u32 cycle, unsigned int subtrahend)
705{
706 if (cycle < subtrahend)
707 cycle += 8 * CYCLES_PER_SECOND;
708 return cycle - subtrahend;
709}
710
711static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
712 size_t header_length, void *header,
713 void *private_data)
714{
715 struct amdtp_stream *s = private_data;
716 unsigned int i, packets = header_length / 4;
717 u32 cycle;
718
719 if (s->packet_index < 0)
720 return;
721
722 cycle = compute_cycle_count(tstamp);
723
724
725 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
726
727 for (i = 0; i < packets; ++i) {
728 cycle = increment_cycle_count(cycle, 1);
729 if (s->handle_packet(s, 0, cycle, i) < 0) {
730 s->packet_index = -1;
731 if (in_interrupt())
732 amdtp_stream_pcm_abort(s);
733 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
734 return;
735 }
736 }
737
738 fw_iso_context_queue_flush(s->context);
739}
740
741static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
742 size_t header_length, void *header,
743 void *private_data)
744{
745 struct amdtp_stream *s = private_data;
746 unsigned int i, packets;
747 unsigned int payload_length, max_payload_length;
748 __be32 *headers = header;
749 u32 cycle;
750
751 if (s->packet_index < 0)
752 return;
753
754
755 packets = header_length / IN_PACKET_HEADER_SIZE;
756
757 cycle = compute_cycle_count(tstamp);
758
759
760 cycle = decrement_cycle_count(cycle, packets);
761
762
763 max_payload_length = s->max_payload_length;
764
765 for (i = 0; i < packets; i++) {
766 cycle = increment_cycle_count(cycle, 1);
767
768
769 payload_length =
770 (be32_to_cpu(headers[i]) >> ISO_DATA_LENGTH_SHIFT);
771 if (payload_length > max_payload_length) {
772 dev_err(&s->unit->device,
773 "Detect jumbo payload: %04x %04x\n",
774 payload_length, max_payload_length);
775 break;
776 }
777
778 if (s->handle_packet(s, payload_length, cycle, i) < 0)
779 break;
780 }
781
782
783 if (i < packets) {
784 s->packet_index = -1;
785 if (in_interrupt())
786 amdtp_stream_pcm_abort(s);
787 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
788 return;
789 }
790
791 fw_iso_context_queue_flush(s->context);
792}
793
794
795static void amdtp_stream_first_callback(struct fw_iso_context *context,
796 u32 tstamp, size_t header_length,
797 void *header, void *private_data)
798{
799 struct amdtp_stream *s = private_data;
800 u32 cycle;
801 unsigned int packets;
802
803
804
805
806
807 s->callbacked = true;
808 wake_up(&s->callback_wait);
809
810 cycle = compute_cycle_count(tstamp);
811
812 if (s->direction == AMDTP_IN_STREAM) {
813 packets = header_length / IN_PACKET_HEADER_SIZE;
814 cycle = decrement_cycle_count(cycle, packets);
815 context->callback.sc = in_stream_callback;
816 if (s->flags & CIP_NO_HEADER)
817 s->handle_packet = handle_in_packet_without_header;
818 else
819 s->handle_packet = handle_in_packet;
820 } else {
821 packets = header_length / 4;
822 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
823 context->callback.sc = out_stream_callback;
824 if (s->flags & CIP_NO_HEADER)
825 s->handle_packet = handle_out_packet_without_header;
826 else
827 s->handle_packet = handle_out_packet;
828 }
829
830 s->start_cycle = cycle;
831
832 context->callback.sc(context, tstamp, header_length, header, s);
833}
834
835
836
837
838
839
840
841
842
843
844
845int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
846{
847 static const struct {
848 unsigned int data_block;
849 unsigned int syt_offset;
850 } initial_state[] = {
851 [CIP_SFC_32000] = { 4, 3072 },
852 [CIP_SFC_48000] = { 6, 1024 },
853 [CIP_SFC_96000] = { 12, 1024 },
854 [CIP_SFC_192000] = { 24, 1024 },
855 [CIP_SFC_44100] = { 0, 67 },
856 [CIP_SFC_88200] = { 0, 67 },
857 [CIP_SFC_176400] = { 0, 67 },
858 };
859 unsigned int header_size;
860 enum dma_data_direction dir;
861 int type, tag, err;
862
863 mutex_lock(&s->mutex);
864
865 if (WARN_ON(amdtp_stream_running(s) ||
866 (s->data_block_quadlets < 1))) {
867 err = -EBADFD;
868 goto err_unlock;
869 }
870
871 if (s->direction == AMDTP_IN_STREAM)
872 s->data_block_counter = UINT_MAX;
873 else
874 s->data_block_counter = 0;
875 s->data_block_state = initial_state[s->sfc].data_block;
876 s->syt_offset_state = initial_state[s->sfc].syt_offset;
877 s->last_syt_offset = TICKS_PER_CYCLE;
878
879
880 if (s->direction == AMDTP_IN_STREAM) {
881 dir = DMA_FROM_DEVICE;
882 type = FW_ISO_CONTEXT_RECEIVE;
883 header_size = IN_PACKET_HEADER_SIZE;
884 } else {
885 dir = DMA_TO_DEVICE;
886 type = FW_ISO_CONTEXT_TRANSMIT;
887 header_size = OUT_PACKET_HEADER_SIZE;
888 }
889 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
890 amdtp_stream_get_max_payload(s), dir);
891 if (err < 0)
892 goto err_unlock;
893
894 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
895 type, channel, speed, header_size,
896 amdtp_stream_first_callback, s);
897 if (IS_ERR(s->context)) {
898 err = PTR_ERR(s->context);
899 if (err == -EBUSY)
900 dev_err(&s->unit->device,
901 "no free stream on this controller\n");
902 goto err_buffer;
903 }
904
905 amdtp_stream_update(s);
906
907 if (s->direction == AMDTP_IN_STREAM)
908 s->max_payload_length = amdtp_stream_get_max_payload(s);
909
910 if (s->flags & CIP_NO_HEADER)
911 s->tag = TAG_NO_CIP_HEADER;
912 else
913 s->tag = TAG_CIP;
914
915 s->packet_index = 0;
916 do {
917 if (s->direction == AMDTP_IN_STREAM)
918 err = queue_in_packet(s);
919 else
920 err = queue_out_packet(s, 0);
921 if (err < 0)
922 goto err_context;
923 } while (s->packet_index > 0);
924
925
926 tag = FW_ISO_CONTEXT_MATCH_TAG1;
927 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
928 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
929
930 s->callbacked = false;
931 err = fw_iso_context_start(s->context, -1, 0, tag);
932 if (err < 0)
933 goto err_context;
934
935 mutex_unlock(&s->mutex);
936
937 return 0;
938
939err_context:
940 fw_iso_context_destroy(s->context);
941 s->context = ERR_PTR(-1);
942err_buffer:
943 iso_packets_buffer_destroy(&s->buffer, s->unit);
944err_unlock:
945 mutex_unlock(&s->mutex);
946
947 return err;
948}
949EXPORT_SYMBOL(amdtp_stream_start);
950
951
952
953
954
955
956
957unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
958{
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975 if (!in_interrupt() && amdtp_stream_running(s))
976 fw_iso_context_flush_completions(s->context);
977
978 return READ_ONCE(s->pcm_buffer_pointer);
979}
980EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
981
982
983
984
985
986
987
988int amdtp_stream_pcm_ack(struct amdtp_stream *s)
989{
990
991
992
993
994 if (amdtp_stream_running(s))
995 fw_iso_context_flush_completions(s->context);
996
997 return 0;
998}
999EXPORT_SYMBOL(amdtp_stream_pcm_ack);
1000
1001
1002
1003
1004
1005void amdtp_stream_update(struct amdtp_stream *s)
1006{
1007
1008 WRITE_ONCE(s->source_node_id_field,
1009 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1010}
1011EXPORT_SYMBOL(amdtp_stream_update);
1012
1013
1014
1015
1016
1017
1018
1019
1020void amdtp_stream_stop(struct amdtp_stream *s)
1021{
1022 mutex_lock(&s->mutex);
1023
1024 if (!amdtp_stream_running(s)) {
1025 mutex_unlock(&s->mutex);
1026 return;
1027 }
1028
1029 tasklet_kill(&s->period_tasklet);
1030 fw_iso_context_stop(s->context);
1031 fw_iso_context_destroy(s->context);
1032 s->context = ERR_PTR(-1);
1033 iso_packets_buffer_destroy(&s->buffer, s->unit);
1034
1035 s->callbacked = false;
1036
1037 mutex_unlock(&s->mutex);
1038}
1039EXPORT_SYMBOL(amdtp_stream_stop);
1040
1041
1042
1043
1044
1045
1046
1047
1048void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1049{
1050 struct snd_pcm_substream *pcm;
1051
1052 pcm = READ_ONCE(s->pcm);
1053 if (pcm)
1054 snd_pcm_stop_xrun(pcm);
1055}
1056EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1057