1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
5 *
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/firewire.h>
12#include <linux/firewire-constants.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <sound/pcm.h>
16#include <sound/pcm_params.h>
17#include "amdtp-stream.h"
18
19#define TICKS_PER_CYCLE 3072
20#define CYCLES_PER_SECOND 8000
21#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
22
23#define OHCI_SECOND_MODULUS 8
24
25/* Always support Linux tracing subsystem. */
26#define CREATE_TRACE_POINTS
27#include "amdtp-stream-trace.h"
28
29#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
30
31/* isochronous header parameters */
32#define ISO_DATA_LENGTH_SHIFT 16
33#define TAG_NO_CIP_HEADER 0
34#define TAG_CIP 1
35
36// Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37#define CIP_HEADER_QUADLETS 2
38#define CIP_EOH_SHIFT 31
39#define CIP_EOH (1u << CIP_EOH_SHIFT)
40#define CIP_EOH_MASK 0x80000000
41#define CIP_SID_SHIFT 24
42#define CIP_SID_MASK 0x3f000000
43#define CIP_DBS_MASK 0x00ff0000
44#define CIP_DBS_SHIFT 16
45#define CIP_SPH_MASK 0x00000400
46#define CIP_SPH_SHIFT 10
47#define CIP_DBC_MASK 0x000000ff
48#define CIP_FMT_SHIFT 24
49#define CIP_FMT_MASK 0x3f000000
50#define CIP_FDF_MASK 0x00ff0000
51#define CIP_FDF_SHIFT 16
52#define CIP_FDF_NO_DATA 0xff
53#define CIP_SYT_MASK 0x0000ffff
54#define CIP_SYT_NO_INFO 0xffff
55#define CIP_SYT_CYCLE_MODULUS 16
56#define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
57
58#define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
59
60/* Audio and Music transfer protocol specific parameters */
61#define CIP_FMT_AM 0x10
62#define AMDTP_FDF_NO_DATA 0xff
63
64// For iso header and tstamp.
65#define IR_CTX_HEADER_DEFAULT_QUADLETS 2
66// Add nothing.
67#define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68// Add two quadlets CIP header.
69#define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70#define HEADER_TSTAMP_MASK 0x0000ffff
71
72#define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
74
75// The initial firmware of OXFW970 can postpone transmission of packet during finishing
76// asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77// overrun. Actual device can skip more, then this module stops the packet streaming.
78#define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
79
80/**
81 * amdtp_stream_init - initialize an AMDTP stream structure
82 * @s: the AMDTP stream to initialize
83 * @unit: the target of the stream
84 * @dir: the direction of stream
85 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
86 * @fmt: the value of fmt field in CIP header
87 * @process_ctx_payloads: callback handler to process payloads of isoc context
88 * @protocol_size: the size to allocate newly for protocol
89 */
90int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
91 enum amdtp_stream_direction dir, unsigned int flags,
92 unsigned int fmt,
93 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
94 unsigned int protocol_size)
95{
96 if (process_ctx_payloads == NULL)
97 return -EINVAL;
98
99 s->protocol = kzalloc(size: protocol_size, GFP_KERNEL);
100 if (!s->protocol)
101 return -ENOMEM;
102
103 s->unit = unit;
104 s->direction = dir;
105 s->flags = flags;
106 s->context = ERR_PTR(error: -1);
107 mutex_init(&s->mutex);
108 s->packet_index = 0;
109
110 init_waitqueue_head(&s->ready_wait);
111
112 s->fmt = fmt;
113 s->process_ctx_payloads = process_ctx_payloads;
114
115 return 0;
116}
117EXPORT_SYMBOL(amdtp_stream_init);
118
119/**
120 * amdtp_stream_destroy - free stream resources
121 * @s: the AMDTP stream to destroy
122 */
123void amdtp_stream_destroy(struct amdtp_stream *s)
124{
125 /* Not initialized. */
126 if (s->protocol == NULL)
127 return;
128
129 WARN_ON(amdtp_stream_running(s));
130 kfree(objp: s->protocol);
131 mutex_destroy(lock: &s->mutex);
132}
133EXPORT_SYMBOL(amdtp_stream_destroy);
134
135const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
136 [CIP_SFC_32000] = 8,
137 [CIP_SFC_44100] = 8,
138 [CIP_SFC_48000] = 8,
139 [CIP_SFC_88200] = 16,
140 [CIP_SFC_96000] = 16,
141 [CIP_SFC_176400] = 32,
142 [CIP_SFC_192000] = 32,
143};
144EXPORT_SYMBOL(amdtp_syt_intervals);
145
146const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
147 [CIP_SFC_32000] = 32000,
148 [CIP_SFC_44100] = 44100,
149 [CIP_SFC_48000] = 48000,
150 [CIP_SFC_88200] = 88200,
151 [CIP_SFC_96000] = 96000,
152 [CIP_SFC_176400] = 176400,
153 [CIP_SFC_192000] = 192000,
154};
155EXPORT_SYMBOL(amdtp_rate_table);
156
157static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
158 struct snd_pcm_hw_rule *rule)
159{
160 struct snd_interval *s = hw_param_interval(params, var: rule->var);
161 const struct snd_interval *r =
162 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
163 struct snd_interval t = {0};
164 unsigned int step = 0;
165 int i;
166
167 for (i = 0; i < CIP_SFC_COUNT; ++i) {
168 if (snd_interval_test(i: r, val: amdtp_rate_table[i]))
169 step = max(step, amdtp_syt_intervals[i]);
170 }
171
172 t.min = roundup(s->min, step);
173 t.max = rounddown(s->max, step);
174 t.integer = 1;
175
176 return snd_interval_refine(i: s, v: &t);
177}
178
179/**
180 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
181 * @s: the AMDTP stream, which must be initialized.
182 * @runtime: the PCM substream runtime
183 */
184int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
185 struct snd_pcm_runtime *runtime)
186{
187 struct snd_pcm_hardware *hw = &runtime->hw;
188 unsigned int ctx_header_size;
189 unsigned int maximum_usec_per_period;
190 int err;
191
192 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
193 SNDRV_PCM_INFO_INTERLEAVED |
194 SNDRV_PCM_INFO_JOINT_DUPLEX |
195 SNDRV_PCM_INFO_MMAP |
196 SNDRV_PCM_INFO_MMAP_VALID |
197 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
198
199 hw->periods_min = 2;
200 hw->periods_max = UINT_MAX;
201
202 /* bytes for a frame */
203 hw->period_bytes_min = 4 * hw->channels_max;
204
205 /* Just to prevent from allocating much pages. */
206 hw->period_bytes_max = hw->period_bytes_min * 2048;
207 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
208
209 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
210 // context when total size of accumulated context header reaches
211 // PAGE_SIZE. This kicks work for the isoc context and brings
212 // callback in the middle of scheduled interrupts.
213 // Although AMDTP streams in the same domain use the same events per
214 // IRQ, use the largest size of context header between IT/IR contexts.
215 // Here, use the value of context header in IR context is for both
216 // contexts.
217 if (!(s->flags & CIP_NO_HEADER))
218 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
219 else
220 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
221 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
222 CYCLES_PER_SECOND / ctx_header_size;
223
224 // In IEC 61883-6, one isoc packet can transfer events up to the value
225 // of syt interval. This comes from the interval of isoc cycle. As 1394
226 // OHCI controller can generate hardware IRQ per isoc packet, the
227 // interval is 125 usec.
228 // However, there are two ways of transmission in IEC 61883-6; blocking
229 // and non-blocking modes. In blocking mode, the sequence of isoc packet
230 // includes 'empty' or 'NODATA' packets which include no event. In
231 // non-blocking mode, the number of events per packet is variable up to
232 // the syt interval.
233 // Due to the above protocol design, the minimum PCM frames per
234 // interrupt should be double of the value of syt interval, thus it is
235 // 250 usec.
236 err = snd_pcm_hw_constraint_minmax(runtime,
237 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
238 min: 250, max: maximum_usec_per_period);
239 if (err < 0)
240 goto end;
241
242 /* Non-Blocking stream has no more constraints */
243 if (!(s->flags & CIP_BLOCKING))
244 goto end;
245
246 /*
247 * One AMDTP packet can include some frames. In blocking mode, the
248 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
249 * depending on its sampling rate. For accurate period interrupt, it's
250 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
251 */
252 err = snd_pcm_hw_rule_add(runtime, cond: 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
253 func: apply_constraint_to_size, NULL,
254 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
255 SNDRV_PCM_HW_PARAM_RATE, -1);
256 if (err < 0)
257 goto end;
258 err = snd_pcm_hw_rule_add(runtime, cond: 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
259 func: apply_constraint_to_size, NULL,
260 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
261 SNDRV_PCM_HW_PARAM_RATE, -1);
262 if (err < 0)
263 goto end;
264end:
265 return err;
266}
267EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
268
269/**
270 * amdtp_stream_set_parameters - set stream parameters
271 * @s: the AMDTP stream to configure
272 * @rate: the sample rate
273 * @data_block_quadlets: the size of a data block in quadlet unit
274 * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
275 * events.
276 *
277 * The parameters must be set before the stream is started, and must not be
278 * changed while the stream is running.
279 */
280int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
281 unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier)
282{
283 unsigned int sfc;
284
285 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
286 if (amdtp_rate_table[sfc] == rate)
287 break;
288 }
289 if (sfc == ARRAY_SIZE(amdtp_rate_table))
290 return -EINVAL;
291
292 s->sfc = sfc;
293 s->data_block_quadlets = data_block_quadlets;
294 s->syt_interval = amdtp_syt_intervals[sfc];
295
296 // default buffering in the device.
297 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
298
299 // additional buffering needed to adjust for no-data packets.
300 if (s->flags & CIP_BLOCKING)
301 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
302
303 s->pcm_frame_multiplier = pcm_frame_multiplier;
304
305 return 0;
306}
307EXPORT_SYMBOL(amdtp_stream_set_parameters);
308
309// The CIP header is processed in context header apart from context payload.
310static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
311{
312 unsigned int multiplier;
313
314 if (s->flags & CIP_JUMBO_PAYLOAD)
315 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
316 else
317 multiplier = 1;
318
319 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
320}
321
322/**
323 * amdtp_stream_get_max_payload - get the stream's packet size
324 * @s: the AMDTP stream
325 *
326 * This function must not be called before the stream has been configured
327 * with amdtp_stream_set_parameters().
328 */
329unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
330{
331 unsigned int cip_header_size;
332
333 if (!(s->flags & CIP_NO_HEADER))
334 cip_header_size = CIP_HEADER_SIZE;
335 else
336 cip_header_size = 0;
337
338 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
339}
340EXPORT_SYMBOL(amdtp_stream_get_max_payload);
341
342/**
343 * amdtp_stream_pcm_prepare - prepare PCM device for running
344 * @s: the AMDTP stream
345 *
346 * This function should be called from the PCM device's .prepare callback.
347 */
348void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
349{
350 s->pcm_buffer_pointer = 0;
351 s->pcm_period_pointer = 0;
352}
353EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
354
355#define prev_packet_desc(s, desc) \
356 list_prev_entry_circular(desc, &s->packet_descs_list, link)
357
358static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
359 unsigned int size, unsigned int pos, unsigned int count)
360{
361 const unsigned int syt_interval = s->syt_interval;
362 int i;
363
364 for (i = 0; i < count; ++i) {
365 struct seq_desc *desc = descs + pos;
366
367 if (desc->syt_offset != CIP_SYT_NO_INFO)
368 desc->data_blocks = syt_interval;
369 else
370 desc->data_blocks = 0;
371
372 pos = (pos + 1) % size;
373 }
374}
375
376static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
377 unsigned int size, unsigned int pos,
378 unsigned int count)
379{
380 const enum cip_sfc sfc = s->sfc;
381 unsigned int state = s->ctx_data.rx.data_block_state;
382 int i;
383
384 for (i = 0; i < count; ++i) {
385 struct seq_desc *desc = descs + pos;
386
387 if (!cip_sfc_is_base_44100(sfc)) {
388 // Sample_rate / 8000 is an integer, and precomputed.
389 desc->data_blocks = state;
390 } else {
391 unsigned int phase = state;
392
393 /*
394 * This calculates the number of data blocks per packet so that
395 * 1) the overall rate is correct and exactly synchronized to
396 * the bus clock, and
397 * 2) packets with a rounded-up number of blocks occur as early
398 * as possible in the sequence (to prevent underruns of the
399 * device's buffer).
400 */
401 if (sfc == CIP_SFC_44100)
402 /* 6 6 5 6 5 6 5 ... */
403 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
404 else
405 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
406 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
407 if (++phase >= (80 >> (sfc >> 1)))
408 phase = 0;
409 state = phase;
410 }
411
412 pos = (pos + 1) % size;
413 }
414
415 s->ctx_data.rx.data_block_state = state;
416}
417
418static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
419 unsigned int *syt_offset_state, enum cip_sfc sfc)
420{
421 unsigned int syt_offset;
422
423 if (*last_syt_offset < TICKS_PER_CYCLE) {
424 if (!cip_sfc_is_base_44100(sfc))
425 syt_offset = *last_syt_offset + *syt_offset_state;
426 else {
427 /*
428 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
429 * n * SYT_INTERVAL * 24576000 / sample_rate
430 * Modulo TICKS_PER_CYCLE, the difference between successive
431 * elements is about 1386.23. Rounding the results of this
432 * formula to the SYT precision results in a sequence of
433 * differences that begins with:
434 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
435 * This code generates _exactly_ the same sequence.
436 */
437 unsigned int phase = *syt_offset_state;
438 unsigned int index = phase % 13;
439
440 syt_offset = *last_syt_offset;
441 syt_offset += 1386 + ((index && !(index & 3)) ||
442 phase == 146);
443 if (++phase >= 147)
444 phase = 0;
445 *syt_offset_state = phase;
446 }
447 } else
448 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
449 *last_syt_offset = syt_offset;
450
451 if (syt_offset >= TICKS_PER_CYCLE)
452 syt_offset = CIP_SYT_NO_INFO;
453
454 return syt_offset;
455}
456
457static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
458 unsigned int size, unsigned int pos, unsigned int count)
459{
460 const enum cip_sfc sfc = s->sfc;
461 unsigned int last = s->ctx_data.rx.last_syt_offset;
462 unsigned int state = s->ctx_data.rx.syt_offset_state;
463 int i;
464
465 for (i = 0; i < count; ++i) {
466 struct seq_desc *desc = descs + pos;
467
468 desc->syt_offset = calculate_syt_offset(last_syt_offset: &last, syt_offset_state: &state, sfc);
469
470 pos = (pos + 1) % size;
471 }
472
473 s->ctx_data.rx.last_syt_offset = last;
474 s->ctx_data.rx.syt_offset_state = state;
475}
476
477static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
478 unsigned int transfer_delay)
479{
480 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
481 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
482 unsigned int syt_offset;
483
484 // Round up.
485 if (syt_cycle_lo < cycle_lo)
486 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
487 syt_cycle_lo -= cycle_lo;
488
489 // Subtract transfer delay so that the synchronization offset is not so large
490 // at transmission.
491 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
492 if (syt_offset < transfer_delay)
493 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
494
495 return syt_offset - transfer_delay;
496}
497
498// Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
499// Additionally, the sequence of tx packets is severely checked against any discontinuity
500// before filling entries in the queue. The calculation is safe even if it looks fragile by
501// overrun.
502static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
503{
504 const unsigned int cache_size = s->ctx_data.tx.cache.size;
505 unsigned int cycles = s->ctx_data.tx.cache.pos;
506
507 if (cycles < head)
508 cycles += cache_size;
509 cycles -= head;
510
511 return cycles;
512}
513
514static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
515{
516 const unsigned int transfer_delay = s->transfer_delay;
517 const unsigned int cache_size = s->ctx_data.tx.cache.size;
518 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
519 unsigned int cache_pos = s->ctx_data.tx.cache.pos;
520 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
521 int i;
522
523 for (i = 0; i < desc_count; ++i) {
524 struct seq_desc *dst = cache + cache_pos;
525
526 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
527 dst->syt_offset = compute_syt_offset(syt: src->syt, cycle: src->cycle, transfer_delay);
528 else
529 dst->syt_offset = CIP_SYT_NO_INFO;
530 dst->data_blocks = src->data_blocks;
531
532 cache_pos = (cache_pos + 1) % cache_size;
533 src = amdtp_stream_next_packet_desc(s, src);
534 }
535
536 s->ctx_data.tx.cache.pos = cache_pos;
537}
538
539static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
540 unsigned int pos, unsigned int count)
541{
542 pool_ideal_syt_offsets(s, descs, size, pos, count);
543
544 if (s->flags & CIP_BLOCKING)
545 pool_blocking_data_blocks(s, descs, size, pos, count);
546 else
547 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
548}
549
550static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
551 unsigned int pos, unsigned int count)
552{
553 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
554 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
555 const unsigned int cache_size = target->ctx_data.tx.cache.size;
556 unsigned int cache_pos = s->ctx_data.rx.cache_pos;
557 int i;
558
559 for (i = 0; i < count; ++i) {
560 descs[pos] = cache[cache_pos];
561 cache_pos = (cache_pos + 1) % cache_size;
562 pos = (pos + 1) % size;
563 }
564
565 s->ctx_data.rx.cache_pos = cache_pos;
566}
567
568static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
569 unsigned int pos, unsigned int count)
570{
571 struct amdtp_domain *d = s->domain;
572 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
573 unsigned int pos, unsigned int count);
574
575 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
576 pool_seq_descs = pool_ideal_seq_descs;
577 } else {
578 if (!d->replay.on_the_fly) {
579 pool_seq_descs = pool_replayed_seq;
580 } else {
581 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
582 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
583 const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
584 unsigned int cached_cycles = calculate_cached_cycle_count(s: tx, head: cache_pos);
585
586 if (cached_cycles > count && cached_cycles > cache_size / 2)
587 pool_seq_descs = pool_replayed_seq;
588 else
589 pool_seq_descs = pool_ideal_seq_descs;
590 }
591 }
592
593 pool_seq_descs(s, descs, size, pos, count);
594}
595
596static void update_pcm_pointers(struct amdtp_stream *s,
597 struct snd_pcm_substream *pcm,
598 unsigned int frames)
599{
600 unsigned int ptr;
601
602 ptr = s->pcm_buffer_pointer + frames;
603 if (ptr >= pcm->runtime->buffer_size)
604 ptr -= pcm->runtime->buffer_size;
605 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
606
607 s->pcm_period_pointer += frames;
608 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
609 s->pcm_period_pointer -= pcm->runtime->period_size;
610
611 // The program in user process should periodically check the status of intermediate
612 // buffer associated to PCM substream to process PCM frames in the buffer, instead
613 // of receiving notification of period elapsed by poll wait.
614 if (!pcm->runtime->no_period_wakeup) {
615 if (in_softirq()) {
616 // In software IRQ context for 1394 OHCI.
617 snd_pcm_period_elapsed(substream: pcm);
618 } else {
619 // In process context of ALSA PCM application under acquired lock of
620 // PCM substream.
621 snd_pcm_period_elapsed_under_stream_lock(substream: pcm);
622 }
623 }
624 }
625}
626
627static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
628 bool sched_irq)
629{
630 int err;
631
632 params->interrupt = sched_irq;
633 params->tag = s->tag;
634 params->sy = 0;
635
636 err = fw_iso_context_queue(ctx: s->context, packet: params, buffer: &s->buffer.iso_buffer,
637 payload: s->buffer.packets[s->packet_index].offset);
638 if (err < 0) {
639 dev_err(&s->unit->device, "queueing error: %d\n", err);
640 goto end;
641 }
642
643 if (++s->packet_index >= s->queue_size)
644 s->packet_index = 0;
645end:
646 return err;
647}
648
649static inline int queue_out_packet(struct amdtp_stream *s,
650 struct fw_iso_packet *params, bool sched_irq)
651{
652 params->skip =
653 !!(params->header_length == 0 && params->payload_length == 0);
654 return queue_packet(s, params, sched_irq);
655}
656
657static inline int queue_in_packet(struct amdtp_stream *s,
658 struct fw_iso_packet *params)
659{
660 // Queue one packet for IR context.
661 params->header_length = s->ctx_data.tx.ctx_header_size;
662 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
663 params->skip = false;
664 return queue_packet(s, params, sched_irq: false);
665}
666
667static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
668 unsigned int data_block_counter, unsigned int syt)
669{
670 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
671 (s->data_block_quadlets << CIP_DBS_SHIFT) |
672 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
673 data_block_counter);
674 cip_header[1] = cpu_to_be32(CIP_EOH |
675 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
676 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
677 (syt & CIP_SYT_MASK));
678}
679
680static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
681 struct fw_iso_packet *params, unsigned int header_length,
682 unsigned int data_blocks,
683 unsigned int data_block_counter,
684 unsigned int syt, unsigned int index, u32 curr_cycle_time)
685{
686 unsigned int payload_length;
687 __be32 *cip_header;
688
689 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
690 params->payload_length = payload_length;
691
692 if (header_length > 0) {
693 cip_header = (__be32 *)params->header;
694 generate_cip_header(s, cip_header, data_block_counter, syt);
695 params->header_length = header_length;
696 } else {
697 cip_header = NULL;
698 }
699
700 trace_amdtp_packet(s, cycles: cycle, cip_header, payload_length: payload_length + header_length, data_blocks,
701 data_block_counter, packet_index: s->packet_index, index, curr_cycle_time);
702}
703
704static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
705 unsigned int payload_length,
706 unsigned int *data_blocks,
707 unsigned int *data_block_counter, unsigned int *syt)
708{
709 u32 cip_header[2];
710 unsigned int sph;
711 unsigned int fmt;
712 unsigned int fdf;
713 unsigned int dbc;
714 bool lost;
715
716 cip_header[0] = be32_to_cpu(buf[0]);
717 cip_header[1] = be32_to_cpu(buf[1]);
718
719 /*
720 * This module supports 'Two-quadlet CIP header with SYT field'.
721 * For convenience, also check FMT field is AM824 or not.
722 */
723 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
724 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
725 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
726 dev_info_ratelimited(&s->unit->device,
727 "Invalid CIP header for AMDTP: %08X:%08X\n",
728 cip_header[0], cip_header[1]);
729 return -EAGAIN;
730 }
731
732 /* Check valid protocol or not. */
733 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
734 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
735 if (sph != s->sph || fmt != s->fmt) {
736 dev_info_ratelimited(&s->unit->device,
737 "Detect unexpected protocol: %08x %08x\n",
738 cip_header[0], cip_header[1]);
739 return -EAGAIN;
740 }
741
742 /* Calculate data blocks */
743 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
744 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
745 *data_blocks = 0;
746 } else {
747 unsigned int data_block_quadlets =
748 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
749 /* avoid division by zero */
750 if (data_block_quadlets == 0) {
751 dev_err(&s->unit->device,
752 "Detect invalid value in dbs field: %08X\n",
753 cip_header[0]);
754 return -EPROTO;
755 }
756 if (s->flags & CIP_WRONG_DBS)
757 data_block_quadlets = s->data_block_quadlets;
758
759 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
760 }
761
762 /* Check data block counter continuity */
763 dbc = cip_header[0] & CIP_DBC_MASK;
764 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
765 *data_block_counter != UINT_MAX)
766 dbc = *data_block_counter;
767
768 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
769 *data_block_counter == UINT_MAX) {
770 lost = false;
771 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
772 lost = dbc != *data_block_counter;
773 } else {
774 unsigned int dbc_interval;
775
776 if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
777 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
778 dbc_interval = s->ctx_data.tx.dbc_interval;
779 else
780 dbc_interval = *data_blocks;
781 } else {
782 dbc_interval = payload_length / sizeof(__be32);
783 }
784
785 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
786 }
787
788 if (lost) {
789 dev_err(&s->unit->device,
790 "Detect discontinuity of CIP: %02X %02X\n",
791 *data_block_counter, dbc);
792 return -EIO;
793 }
794
795 *data_block_counter = dbc;
796
797 if (!(s->flags & CIP_UNAWARE_SYT))
798 *syt = cip_header[1] & CIP_SYT_MASK;
799
800 return 0;
801}
802
803static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
804 const __be32 *ctx_header,
805 unsigned int *data_blocks,
806 unsigned int *data_block_counter,
807 unsigned int *syt, unsigned int packet_index, unsigned int index,
808 u32 curr_cycle_time)
809{
810 unsigned int payload_length;
811 const __be32 *cip_header;
812 unsigned int cip_header_size;
813
814 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
815
816 if (!(s->flags & CIP_NO_HEADER))
817 cip_header_size = CIP_HEADER_SIZE;
818 else
819 cip_header_size = 0;
820
821 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
822 dev_err(&s->unit->device,
823 "Detect jumbo payload: %04x %04x\n",
824 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
825 return -EIO;
826 }
827
828 if (cip_header_size > 0) {
829 if (payload_length >= cip_header_size) {
830 int err;
831
832 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
833 err = check_cip_header(s, buf: cip_header, payload_length: payload_length - cip_header_size,
834 data_blocks, data_block_counter, syt);
835 if (err < 0)
836 return err;
837 } else {
838 // Handle the cycle so that empty packet arrives.
839 cip_header = NULL;
840 *data_blocks = 0;
841 *syt = 0;
842 }
843 } else {
844 cip_header = NULL;
845 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
846 *syt = 0;
847
848 if (*data_block_counter == UINT_MAX)
849 *data_block_counter = 0;
850 }
851
852 trace_amdtp_packet(s, cycles: cycle, cip_header, payload_length, data_blocks: *data_blocks,
853 data_block_counter: *data_block_counter, packet_index, index, curr_cycle_time);
854
855 return 0;
856}
857
858// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
859// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
860// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
861static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
862{
863 return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
864}
865
866static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
867{
868 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
869 return compute_ohci_iso_ctx_cycle_count(tstamp);
870}
871
872static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
873{
874 cycle += addend;
875 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
876 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
877 return cycle;
878}
879
880static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend)
881{
882 if (minuend < subtrahend)
883 minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
884
885 return minuend - subtrahend;
886}
887
888static int compare_ohci_cycle_count(u32 lval, u32 rval)
889{
890 if (lval == rval)
891 return 0;
892 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
893 return -1;
894 else
895 return 1;
896}
897
898// Align to actual cycle count for the packet which is going to be scheduled.
899// This module queued the same number of isochronous cycle as the size of queue
900// to kip isochronous cycle, therefore it's OK to just increment the cycle by
901// the size of queue for scheduled cycle.
902static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
903 unsigned int queue_size)
904{
905 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
906 return increment_ohci_cycle_count(cycle, addend: queue_size);
907}
908
909static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
910 const __be32 *ctx_header, unsigned int packet_count,
911 unsigned int *desc_count)
912{
913 unsigned int next_cycle = s->next_cycle;
914 unsigned int dbc = s->data_block_counter;
915 unsigned int packet_index = s->packet_index;
916 unsigned int queue_size = s->queue_size;
917 u32 curr_cycle_time = 0;
918 int i;
919 int err;
920
921 if (trace_amdtp_packet_enabled())
922 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, cycle_time: &curr_cycle_time);
923
924 *desc_count = 0;
925 for (i = 0; i < packet_count; ++i) {
926 unsigned int cycle;
927 bool lost;
928 unsigned int data_blocks;
929 unsigned int syt;
930
931 cycle = compute_ohci_cycle_count(ctx_header_tstamp: ctx_header[1]);
932 lost = (next_cycle != cycle);
933 if (lost) {
934 if (s->flags & CIP_NO_HEADER) {
935 // Fireface skips transmission just for an isoc cycle corresponding
936 // to empty packet.
937 unsigned int prev_cycle = next_cycle;
938
939 next_cycle = increment_ohci_cycle_count(cycle: next_cycle, addend: 1);
940 lost = (next_cycle != cycle);
941 if (!lost) {
942 // Prepare a description for the skipped cycle for
943 // sequence replay.
944 desc->cycle = prev_cycle;
945 desc->syt = 0;
946 desc->data_blocks = 0;
947 desc->data_block_counter = dbc;
948 desc->ctx_payload = NULL;
949 desc = amdtp_stream_next_packet_desc(s, desc);
950 ++(*desc_count);
951 }
952 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
953 // OXFW970 skips transmission for several isoc cycles during
954 // asynchronous transaction. The sequence replay is impossible due
955 // to the reason.
956 unsigned int safe_cycle = increment_ohci_cycle_count(cycle: next_cycle,
957 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
958 lost = (compare_ohci_cycle_count(lval: safe_cycle, rval: cycle) < 0);
959 }
960 if (lost) {
961 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
962 next_cycle, cycle);
963 return -EIO;
964 }
965 }
966
967 err = parse_ir_ctx_header(s, cycle, ctx_header, data_blocks: &data_blocks, data_block_counter: &dbc, syt: &syt,
968 packet_index, index: i, curr_cycle_time);
969 if (err < 0)
970 return err;
971
972 desc->cycle = cycle;
973 desc->syt = syt;
974 desc->data_blocks = data_blocks;
975 desc->data_block_counter = dbc;
976 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
977
978 if (!(s->flags & CIP_DBC_IS_END_EVENT))
979 dbc = (dbc + desc->data_blocks) & 0xff;
980
981 next_cycle = increment_ohci_cycle_count(cycle: next_cycle, addend: 1);
982 desc = amdtp_stream_next_packet_desc(s, desc);
983 ++(*desc_count);
984 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
985 packet_index = (packet_index + 1) % queue_size;
986 }
987
988 s->next_cycle = next_cycle;
989 s->data_block_counter = dbc;
990
991 return 0;
992}
993
994static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
995 unsigned int transfer_delay)
996{
997 unsigned int syt;
998
999 syt_offset += transfer_delay;
1000 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
1001 (syt_offset % TICKS_PER_CYCLE);
1002 return syt & CIP_SYT_MASK;
1003}
1004
1005static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
1006 const __be32 *ctx_header, unsigned int packet_count)
1007{
1008 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
1009 unsigned int seq_size = s->ctx_data.rx.seq.size;
1010 unsigned int seq_pos = s->ctx_data.rx.seq.pos;
1011 unsigned int dbc = s->data_block_counter;
1012 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
1013 int i;
1014
1015 pool_seq_descs(s, descs: seq_descs, size: seq_size, pos: seq_pos, count: packet_count);
1016
1017 for (i = 0; i < packet_count; ++i) {
1018 unsigned int index = (s->packet_index + i) % s->queue_size;
1019 const struct seq_desc *seq = seq_descs + seq_pos;
1020
1021 desc->cycle = compute_ohci_it_cycle(ctx_header_tstamp: *ctx_header, queue_size: s->queue_size);
1022
1023 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1024 desc->syt = compute_syt(syt_offset: seq->syt_offset, cycle: desc->cycle, transfer_delay: s->transfer_delay);
1025 else
1026 desc->syt = CIP_SYT_NO_INFO;
1027
1028 desc->data_blocks = seq->data_blocks;
1029
1030 if (s->flags & CIP_DBC_IS_END_EVENT)
1031 dbc = (dbc + desc->data_blocks) & 0xff;
1032
1033 desc->data_block_counter = dbc;
1034
1035 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1036 dbc = (dbc + desc->data_blocks) & 0xff;
1037
1038 desc->ctx_payload = s->buffer.packets[index].buffer;
1039
1040 seq_pos = (seq_pos + 1) % seq_size;
1041 desc = amdtp_stream_next_packet_desc(s, desc);
1042
1043 ++ctx_header;
1044 }
1045
1046 s->data_block_counter = dbc;
1047 s->ctx_data.rx.seq.pos = seq_pos;
1048}
1049
1050static inline void cancel_stream(struct amdtp_stream *s)
1051{
1052 s->packet_index = -1;
1053 if (in_softirq())
1054 amdtp_stream_pcm_abort(s);
1055 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1056}
1057
1058static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s,
1059 const struct pkt_desc *desc, unsigned int count)
1060{
1061 unsigned int data_block_count = 0;
1062 u32 latest_cycle;
1063 u32 cycle_time;
1064 u32 curr_cycle;
1065 u32 cycle_gap;
1066 int i, err;
1067
1068 if (count == 0)
1069 goto end;
1070
1071 // Forward to the latest record.
1072 for (i = 0; i < count - 1; ++i)
1073 desc = amdtp_stream_next_packet_desc(s, desc);
1074 latest_cycle = desc->cycle;
1075
1076 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, cycle_time: &cycle_time);
1077 if (err < 0)
1078 goto end;
1079
1080 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
1081 // format of 1394 OHCI isochronous context.
1082 curr_cycle = compute_ohci_iso_ctx_cycle_count(tstamp: (cycle_time >> 12) & 0x0000ffff);
1083
1084 if (s->direction == AMDTP_IN_STREAM) {
1085 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
1086 // it corresponds to arrived isochronous packet.
1087 if (compare_ohci_cycle_count(lval: latest_cycle, rval: curr_cycle) > 0)
1088 goto end;
1089 cycle_gap = decrement_ohci_cycle_count(minuend: curr_cycle, subtrahend: latest_cycle);
1090
1091 // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
1092 // value expectedly corresponds to a few packets (0-2) since the packet arrived at
1093 // the most recent isochronous cycle has been already processed.
1094 for (i = 0; i < cycle_gap; ++i) {
1095 desc = amdtp_stream_next_packet_desc(s, desc);
1096 data_block_count += desc->data_blocks;
1097 }
1098 } else {
1099 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
1100 // since it was already scheduled.
1101 if (compare_ohci_cycle_count(lval: latest_cycle, rval: curr_cycle) < 0)
1102 goto end;
1103 cycle_gap = decrement_ohci_cycle_count(minuend: latest_cycle, subtrahend: curr_cycle);
1104
1105 // NOTE: use history of scheduled packets.
1106 for (i = 0; i < cycle_gap; ++i) {
1107 data_block_count += desc->data_blocks;
1108 desc = prev_packet_desc(s, desc);
1109 }
1110 }
1111end:
1112 return data_block_count * s->pcm_frame_multiplier;
1113}
1114
1115static void process_ctx_payloads(struct amdtp_stream *s,
1116 const struct pkt_desc *desc,
1117 unsigned int count)
1118{
1119 struct snd_pcm_substream *pcm;
1120 int i;
1121
1122 pcm = READ_ONCE(s->pcm);
1123 s->process_ctx_payloads(s, desc, count, pcm);
1124
1125 if (pcm) {
1126 unsigned int data_block_count = 0;
1127
1128 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
1129
1130 for (i = 0; i < count; ++i) {
1131 data_block_count += desc->data_blocks;
1132 desc = amdtp_stream_next_packet_desc(s, desc);
1133 }
1134
1135 update_pcm_pointers(s, pcm, frames: data_block_count * s->pcm_frame_multiplier);
1136 }
1137}
1138
1139static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1140 void *header, void *private_data)
1141{
1142 struct amdtp_stream *s = private_data;
1143 const struct amdtp_domain *d = s->domain;
1144 const __be32 *ctx_header = header;
1145 const unsigned int events_per_period = d->events_per_period;
1146 unsigned int event_count = s->ctx_data.rx.event_count;
1147 struct pkt_desc *desc = s->packet_descs_cursor;
1148 unsigned int pkt_header_length;
1149 unsigned int packets;
1150 u32 curr_cycle_time;
1151 bool need_hw_irq;
1152 int i;
1153
1154 if (s->packet_index < 0)
1155 return;
1156
1157 // Calculate the number of packets in buffer and check XRUN.
1158 packets = header_length / sizeof(*ctx_header);
1159
1160 generate_rx_packet_descs(s, desc, ctx_header, packet_count: packets);
1161
1162 process_ctx_payloads(s, desc, count: packets);
1163
1164 if (!(s->flags & CIP_NO_HEADER))
1165 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1166 else
1167 pkt_header_length = 0;
1168
1169 if (s == d->irq_target) {
1170 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1171 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1172 // with some requests, instead of scheduled hardware IRQ of an IT context.
1173 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1174 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1175 } else {
1176 need_hw_irq = false;
1177 }
1178
1179 if (trace_amdtp_packet_enabled())
1180 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, cycle_time: &curr_cycle_time);
1181
1182 for (i = 0; i < packets; ++i) {
1183 struct {
1184 struct fw_iso_packet params;
1185 __be32 header[CIP_HEADER_QUADLETS];
1186 } template = { {0}, {0} };
1187 bool sched_irq = false;
1188
1189 build_it_pkt_header(s, cycle: desc->cycle, params: &template.params, header_length: pkt_header_length,
1190 data_blocks: desc->data_blocks, data_block_counter: desc->data_block_counter,
1191 syt: desc->syt, index: i, curr_cycle_time);
1192
1193 if (s == s->domain->irq_target) {
1194 event_count += desc->data_blocks;
1195 if (event_count >= events_per_period) {
1196 event_count -= events_per_period;
1197 sched_irq = need_hw_irq;
1198 }
1199 }
1200
1201 if (queue_out_packet(s, params: &template.params, sched_irq) < 0) {
1202 cancel_stream(s);
1203 return;
1204 }
1205
1206 desc = amdtp_stream_next_packet_desc(s, desc);
1207 }
1208
1209 s->ctx_data.rx.event_count = event_count;
1210 s->packet_descs_cursor = desc;
1211}
1212
1213static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1214 void *header, void *private_data)
1215{
1216 struct amdtp_stream *s = private_data;
1217 struct amdtp_domain *d = s->domain;
1218 const __be32 *ctx_header = header;
1219 unsigned int packets;
1220 unsigned int cycle;
1221 int i;
1222
1223 if (s->packet_index < 0)
1224 return;
1225
1226 packets = header_length / sizeof(*ctx_header);
1227
1228 cycle = compute_ohci_it_cycle(ctx_header_tstamp: ctx_header[packets - 1], queue_size: s->queue_size);
1229 s->next_cycle = increment_ohci_cycle_count(cycle, addend: 1);
1230
1231 for (i = 0; i < packets; ++i) {
1232 struct fw_iso_packet params = {
1233 .header_length = 0,
1234 .payload_length = 0,
1235 };
1236 bool sched_irq = (s == d->irq_target && i == packets - 1);
1237
1238 if (queue_out_packet(s, params: &params, sched_irq) < 0) {
1239 cancel_stream(s);
1240 return;
1241 }
1242 }
1243}
1244
1245static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1246 void *header, void *private_data);
1247
1248static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1249 size_t header_length, void *header, void *private_data)
1250{
1251 struct amdtp_stream *s = private_data;
1252 struct amdtp_domain *d = s->domain;
1253 __be32 *ctx_header = header;
1254 const unsigned int queue_size = s->queue_size;
1255 unsigned int packets;
1256 unsigned int offset;
1257
1258 if (s->packet_index < 0)
1259 return;
1260
1261 packets = header_length / sizeof(*ctx_header);
1262
1263 offset = 0;
1264 while (offset < packets) {
1265 unsigned int cycle = compute_ohci_it_cycle(ctx_header_tstamp: ctx_header[offset], queue_size);
1266
1267 if (compare_ohci_cycle_count(lval: cycle, rval: d->processing_cycle.rx_start) >= 0)
1268 break;
1269
1270 ++offset;
1271 }
1272
1273 if (offset > 0) {
1274 unsigned int length = sizeof(*ctx_header) * offset;
1275
1276 skip_rx_packets(context, tstamp, header_length: length, header: ctx_header, private_data);
1277 if (amdtp_streaming_error(s))
1278 return;
1279
1280 ctx_header += offset;
1281 header_length -= length;
1282 }
1283
1284 if (offset < packets) {
1285 s->ready_processing = true;
1286 wake_up(&s->ready_wait);
1287
1288 if (d->replay.enable)
1289 s->ctx_data.rx.cache_pos = 0;
1290
1291 process_rx_packets(context, tstamp, header_length, header: ctx_header, private_data);
1292 if (amdtp_streaming_error(s))
1293 return;
1294
1295 if (s == d->irq_target)
1296 s->context->callback.sc = irq_target_callback;
1297 else
1298 s->context->callback.sc = process_rx_packets;
1299 }
1300}
1301
1302static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1303 void *header, void *private_data)
1304{
1305 struct amdtp_stream *s = private_data;
1306 __be32 *ctx_header = header;
1307 struct pkt_desc *desc = s->packet_descs_cursor;
1308 unsigned int packet_count;
1309 unsigned int desc_count;
1310 int i;
1311 int err;
1312
1313 if (s->packet_index < 0)
1314 return;
1315
1316 // Calculate the number of packets in buffer and check XRUN.
1317 packet_count = header_length / s->ctx_data.tx.ctx_header_size;
1318
1319 desc_count = 0;
1320 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, desc_count: &desc_count);
1321 if (err < 0) {
1322 if (err != -EAGAIN) {
1323 cancel_stream(s);
1324 return;
1325 }
1326 } else {
1327 struct amdtp_domain *d = s->domain;
1328
1329 process_ctx_payloads(s, desc, count: desc_count);
1330
1331 if (d->replay.enable)
1332 cache_seq(s, src: desc, desc_count);
1333
1334 for (i = 0; i < desc_count; ++i)
1335 desc = amdtp_stream_next_packet_desc(s, desc);
1336 s->packet_descs_cursor = desc;
1337 }
1338
1339 for (i = 0; i < packet_count; ++i) {
1340 struct fw_iso_packet params = {0};
1341
1342 if (queue_in_packet(s, params: &params) < 0) {
1343 cancel_stream(s);
1344 return;
1345 }
1346 }
1347}
1348
1349static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1350 void *header, void *private_data)
1351{
1352 struct amdtp_stream *s = private_data;
1353 const __be32 *ctx_header = header;
1354 unsigned int packets;
1355 unsigned int cycle;
1356 int i;
1357
1358 if (s->packet_index < 0)
1359 return;
1360
1361 packets = header_length / s->ctx_data.tx.ctx_header_size;
1362
1363 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1364 cycle = compute_ohci_cycle_count(ctx_header_tstamp: ctx_header[1]);
1365 s->next_cycle = increment_ohci_cycle_count(cycle, addend: 1);
1366
1367 for (i = 0; i < packets; ++i) {
1368 struct fw_iso_packet params = {0};
1369
1370 if (queue_in_packet(s, params: &params) < 0) {
1371 cancel_stream(s);
1372 return;
1373 }
1374 }
1375}
1376
1377static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1378 size_t header_length, void *header, void *private_data)
1379{
1380 struct amdtp_stream *s = private_data;
1381 struct amdtp_domain *d = s->domain;
1382 __be32 *ctx_header;
1383 unsigned int packets;
1384 unsigned int offset;
1385
1386 if (s->packet_index < 0)
1387 return;
1388
1389 packets = header_length / s->ctx_data.tx.ctx_header_size;
1390
1391 offset = 0;
1392 ctx_header = header;
1393 while (offset < packets) {
1394 unsigned int cycle = compute_ohci_cycle_count(ctx_header_tstamp: ctx_header[1]);
1395
1396 if (compare_ohci_cycle_count(lval: cycle, rval: d->processing_cycle.tx_start) >= 0)
1397 break;
1398
1399 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1400 ++offset;
1401 }
1402
1403 ctx_header = header;
1404
1405 if (offset > 0) {
1406 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1407
1408 drop_tx_packets(context, tstamp, header_length: length, header: ctx_header, private_data: s);
1409 if (amdtp_streaming_error(s))
1410 return;
1411
1412 ctx_header += length / sizeof(*ctx_header);
1413 header_length -= length;
1414 }
1415
1416 if (offset < packets) {
1417 s->ready_processing = true;
1418 wake_up(&s->ready_wait);
1419
1420 process_tx_packets(context, tstamp, header_length, header: ctx_header, private_data: s);
1421 if (amdtp_streaming_error(s))
1422 return;
1423
1424 context->callback.sc = process_tx_packets;
1425 }
1426}
1427
1428static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1429 size_t header_length, void *header, void *private_data)
1430{
1431 struct amdtp_stream *s = private_data;
1432 struct amdtp_domain *d = s->domain;
1433 __be32 *ctx_header;
1434 unsigned int count;
1435 unsigned int events;
1436 int i;
1437
1438 if (s->packet_index < 0)
1439 return;
1440
1441 count = header_length / s->ctx_data.tx.ctx_header_size;
1442
1443 // Attempt to detect any event in the batch of packets.
1444 events = 0;
1445 ctx_header = header;
1446 for (i = 0; i < count; ++i) {
1447 unsigned int payload_quads =
1448 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1449 unsigned int data_blocks;
1450
1451 if (s->flags & CIP_NO_HEADER) {
1452 data_blocks = payload_quads / s->data_block_quadlets;
1453 } else {
1454 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1455
1456 if (payload_quads < CIP_HEADER_QUADLETS) {
1457 data_blocks = 0;
1458 } else {
1459 payload_quads -= CIP_HEADER_QUADLETS;
1460
1461 if (s->flags & CIP_UNAWARE_SYT) {
1462 data_blocks = payload_quads / s->data_block_quadlets;
1463 } else {
1464 u32 cip1 = be32_to_cpu(cip_headers[1]);
1465
1466 // NODATA packet can includes any data blocks but they are
1467 // not available as event.
1468 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1469 data_blocks = 0;
1470 else
1471 data_blocks = payload_quads / s->data_block_quadlets;
1472 }
1473 }
1474 }
1475
1476 events += data_blocks;
1477
1478 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1479 }
1480
1481 drop_tx_packets(context, tstamp, header_length, header, private_data: s);
1482
1483 if (events > 0)
1484 s->ctx_data.tx.event_starts = true;
1485
1486 // Decide the cycle count to begin processing content of packet in IR contexts.
1487 {
1488 unsigned int stream_count = 0;
1489 unsigned int event_starts_count = 0;
1490 unsigned int cycle = UINT_MAX;
1491
1492 list_for_each_entry(s, &d->streams, list) {
1493 if (s->direction == AMDTP_IN_STREAM) {
1494 ++stream_count;
1495 if (s->ctx_data.tx.event_starts)
1496 ++event_starts_count;
1497 }
1498 }
1499
1500 if (stream_count == event_starts_count) {
1501 unsigned int next_cycle;
1502
1503 list_for_each_entry(s, &d->streams, list) {
1504 if (s->direction != AMDTP_IN_STREAM)
1505 continue;
1506
1507 next_cycle = increment_ohci_cycle_count(cycle: s->next_cycle,
1508 addend: d->processing_cycle.tx_init_skip);
1509 if (cycle == UINT_MAX ||
1510 compare_ohci_cycle_count(lval: next_cycle, rval: cycle) > 0)
1511 cycle = next_cycle;
1512
1513 s->context->callback.sc = process_tx_packets_intermediately;
1514 }
1515
1516 d->processing_cycle.tx_start = cycle;
1517 }
1518 }
1519}
1520
1521static void process_ctxs_in_domain(struct amdtp_domain *d)
1522{
1523 struct amdtp_stream *s;
1524
1525 list_for_each_entry(s, &d->streams, list) {
1526 if (s != d->irq_target && amdtp_stream_running(s))
1527 fw_iso_context_flush_completions(ctx: s->context);
1528
1529 if (amdtp_streaming_error(s))
1530 goto error;
1531 }
1532
1533 return;
1534error:
1535 if (amdtp_stream_running(s: d->irq_target))
1536 cancel_stream(s: d->irq_target);
1537
1538 list_for_each_entry(s, &d->streams, list) {
1539 if (amdtp_stream_running(s))
1540 cancel_stream(s);
1541 }
1542}
1543
1544static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1545 void *header, void *private_data)
1546{
1547 struct amdtp_stream *s = private_data;
1548 struct amdtp_domain *d = s->domain;
1549
1550 process_rx_packets(context, tstamp, header_length, header, private_data);
1551 process_ctxs_in_domain(d);
1552}
1553
1554static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1555 size_t header_length, void *header, void *private_data)
1556{
1557 struct amdtp_stream *s = private_data;
1558 struct amdtp_domain *d = s->domain;
1559
1560 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1561 process_ctxs_in_domain(d);
1562}
1563
1564static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1565 size_t header_length, void *header, void *private_data)
1566{
1567 struct amdtp_stream *s = private_data;
1568 struct amdtp_domain *d = s->domain;
1569 bool ready_to_start;
1570
1571 skip_rx_packets(context, tstamp, header_length, header, private_data);
1572 process_ctxs_in_domain(d);
1573
1574 if (d->replay.enable && !d->replay.on_the_fly) {
1575 unsigned int rx_count = 0;
1576 unsigned int rx_ready_count = 0;
1577 struct amdtp_stream *rx;
1578
1579 list_for_each_entry(rx, &d->streams, list) {
1580 struct amdtp_stream *tx;
1581 unsigned int cached_cycles;
1582
1583 if (rx->direction != AMDTP_OUT_STREAM)
1584 continue;
1585 ++rx_count;
1586
1587 tx = rx->ctx_data.rx.replay_target;
1588 cached_cycles = calculate_cached_cycle_count(s: tx, head: 0);
1589 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1590 ++rx_ready_count;
1591 }
1592
1593 ready_to_start = (rx_count == rx_ready_count);
1594 } else {
1595 ready_to_start = true;
1596 }
1597
1598 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1599 // contexts are expected to start and get callback when reaching here.
1600 if (ready_to_start) {
1601 unsigned int cycle = s->next_cycle;
1602 list_for_each_entry(s, &d->streams, list) {
1603 if (s->direction != AMDTP_OUT_STREAM)
1604 continue;
1605
1606 if (compare_ohci_cycle_count(lval: s->next_cycle, rval: cycle) > 0)
1607 cycle = s->next_cycle;
1608
1609 if (s == d->irq_target)
1610 s->context->callback.sc = irq_target_callback_intermediately;
1611 else
1612 s->context->callback.sc = process_rx_packets_intermediately;
1613 }
1614
1615 d->processing_cycle.rx_start = cycle;
1616 }
1617}
1618
1619// This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1620// transmit first packet.
1621static void amdtp_stream_first_callback(struct fw_iso_context *context,
1622 u32 tstamp, size_t header_length,
1623 void *header, void *private_data)
1624{
1625 struct amdtp_stream *s = private_data;
1626 struct amdtp_domain *d = s->domain;
1627
1628 if (s->direction == AMDTP_IN_STREAM) {
1629 context->callback.sc = drop_tx_packets_initially;
1630 } else {
1631 if (s == d->irq_target)
1632 context->callback.sc = irq_target_callback_skip;
1633 else
1634 context->callback.sc = skip_rx_packets;
1635 }
1636
1637 context->callback.sc(context, tstamp, header_length, header, s);
1638}
1639
1640/**
1641 * amdtp_stream_start - start transferring packets
1642 * @s: the AMDTP stream to start
1643 * @channel: the isochronous channel on the bus
1644 * @speed: firewire speed code
1645 * @queue_size: The number of packets in the queue.
1646 * @idle_irq_interval: the interval to queue packet during initial state.
1647 *
1648 * The stream cannot be started until it has been configured with
1649 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1650 * device can be started.
1651 */
1652static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1653 unsigned int queue_size, unsigned int idle_irq_interval)
1654{
1655 bool is_irq_target = (s == s->domain->irq_target);
1656 unsigned int ctx_header_size;
1657 unsigned int max_ctx_payload_size;
1658 enum dma_data_direction dir;
1659 struct pkt_desc *descs;
1660 int i, type, tag, err;
1661
1662 mutex_lock(&s->mutex);
1663
1664 if (WARN_ON(amdtp_stream_running(s) ||
1665 (s->data_block_quadlets < 1))) {
1666 err = -EBADFD;
1667 goto err_unlock;
1668 }
1669
1670 if (s->direction == AMDTP_IN_STREAM) {
1671 // NOTE: IT context should be used for constant IRQ.
1672 if (is_irq_target) {
1673 err = -EINVAL;
1674 goto err_unlock;
1675 }
1676
1677 s->data_block_counter = UINT_MAX;
1678 } else {
1679 s->data_block_counter = 0;
1680 }
1681
1682 // initialize packet buffer.
1683 if (s->direction == AMDTP_IN_STREAM) {
1684 dir = DMA_FROM_DEVICE;
1685 type = FW_ISO_CONTEXT_RECEIVE;
1686 if (!(s->flags & CIP_NO_HEADER))
1687 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1688 else
1689 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1690 } else {
1691 dir = DMA_TO_DEVICE;
1692 type = FW_ISO_CONTEXT_TRANSMIT;
1693 ctx_header_size = 0; // No effect for IT context.
1694 }
1695 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1696
1697 err = iso_packets_buffer_init(b: &s->buffer, unit: s->unit, count: queue_size, packet_size: max_ctx_payload_size, direction: dir);
1698 if (err < 0)
1699 goto err_unlock;
1700 s->queue_size = queue_size;
1701
1702 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1703 type, channel, speed, header_size: ctx_header_size,
1704 callback: amdtp_stream_first_callback, callback_data: s);
1705 if (IS_ERR(ptr: s->context)) {
1706 err = PTR_ERR(ptr: s->context);
1707 if (err == -EBUSY)
1708 dev_err(&s->unit->device,
1709 "no free stream on this controller\n");
1710 goto err_buffer;
1711 }
1712
1713 amdtp_stream_update(s);
1714
1715 if (s->direction == AMDTP_IN_STREAM) {
1716 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1717 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1718 s->ctx_data.tx.event_starts = false;
1719
1720 if (s->domain->replay.enable) {
1721 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1722 // possible to cache much unexpectedly.
1723 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1724 queue_size * 3 / 2);
1725 s->ctx_data.tx.cache.pos = 0;
1726 s->ctx_data.tx.cache.descs = kcalloc(n: s->ctx_data.tx.cache.size,
1727 size: sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1728 if (!s->ctx_data.tx.cache.descs) {
1729 err = -ENOMEM;
1730 goto err_context;
1731 }
1732 }
1733 } else {
1734 static const struct {
1735 unsigned int data_block;
1736 unsigned int syt_offset;
1737 } *entry, initial_state[] = {
1738 [CIP_SFC_32000] = { 4, 3072 },
1739 [CIP_SFC_48000] = { .data_block: 6, .syt_offset: 1024 },
1740 [CIP_SFC_96000] = { .data_block: 12, .syt_offset: 1024 },
1741 [CIP_SFC_192000] = { .data_block: 24, .syt_offset: 1024 },
1742 [CIP_SFC_44100] = { .data_block: 0, .syt_offset: 67 },
1743 [CIP_SFC_88200] = { .data_block: 0, .syt_offset: 67 },
1744 [CIP_SFC_176400] = { .data_block: 0, .syt_offset: 67 },
1745 };
1746
1747 s->ctx_data.rx.seq.descs = kcalloc(n: queue_size, size: sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1748 if (!s->ctx_data.rx.seq.descs) {
1749 err = -ENOMEM;
1750 goto err_context;
1751 }
1752 s->ctx_data.rx.seq.size = queue_size;
1753 s->ctx_data.rx.seq.pos = 0;
1754
1755 entry = &initial_state[s->sfc];
1756 s->ctx_data.rx.data_block_state = entry->data_block;
1757 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1758 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1759
1760 s->ctx_data.rx.event_count = 0;
1761 }
1762
1763 if (s->flags & CIP_NO_HEADER)
1764 s->tag = TAG_NO_CIP_HEADER;
1765 else
1766 s->tag = TAG_CIP;
1767
1768 // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
1769 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
1770 // could take a round over queue of AMDTP packet descriptors and small loss of history. For
1771 // safe, keep more 8 elements for the queue, equivalent to 1 ms.
1772 descs = kcalloc(n: s->queue_size + 8, size: sizeof(*descs), GFP_KERNEL);
1773 if (!descs) {
1774 err = -ENOMEM;
1775 goto err_context;
1776 }
1777 s->packet_descs = descs;
1778
1779 INIT_LIST_HEAD(list: &s->packet_descs_list);
1780 for (i = 0; i < s->queue_size; ++i) {
1781 INIT_LIST_HEAD(list: &descs->link);
1782 list_add_tail(new: &descs->link, head: &s->packet_descs_list);
1783 ++descs;
1784 }
1785 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
1786
1787 s->packet_index = 0;
1788 do {
1789 struct fw_iso_packet params;
1790
1791 if (s->direction == AMDTP_IN_STREAM) {
1792 err = queue_in_packet(s, params: &params);
1793 } else {
1794 bool sched_irq = false;
1795
1796 params.header_length = 0;
1797 params.payload_length = 0;
1798
1799 if (is_irq_target) {
1800 sched_irq = !((s->packet_index + 1) %
1801 idle_irq_interval);
1802 }
1803
1804 err = queue_out_packet(s, params: &params, sched_irq);
1805 }
1806 if (err < 0)
1807 goto err_pkt_descs;
1808 } while (s->packet_index > 0);
1809
1810 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1811 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1812 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1813 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1814
1815 s->ready_processing = false;
1816 err = fw_iso_context_start(ctx: s->context, cycle: -1, sync: 0, tags: tag);
1817 if (err < 0)
1818 goto err_pkt_descs;
1819
1820 mutex_unlock(lock: &s->mutex);
1821
1822 return 0;
1823err_pkt_descs:
1824 kfree(objp: s->packet_descs);
1825 s->packet_descs = NULL;
1826err_context:
1827 if (s->direction == AMDTP_OUT_STREAM) {
1828 kfree(objp: s->ctx_data.rx.seq.descs);
1829 } else {
1830 if (s->domain->replay.enable)
1831 kfree(objp: s->ctx_data.tx.cache.descs);
1832 }
1833 fw_iso_context_destroy(ctx: s->context);
1834 s->context = ERR_PTR(error: -1);
1835err_buffer:
1836 iso_packets_buffer_destroy(b: &s->buffer, unit: s->unit);
1837err_unlock:
1838 mutex_unlock(lock: &s->mutex);
1839
1840 return err;
1841}
1842
1843/**
1844 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1845 * @d: the AMDTP domain.
1846 * @s: the AMDTP stream that transports the PCM data
1847 *
1848 * Returns the current buffer position, in frames.
1849 */
1850unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1851 struct amdtp_stream *s)
1852{
1853 struct amdtp_stream *irq_target = d->irq_target;
1854
1855 // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1856 if (irq_target && amdtp_stream_running(s: irq_target)) {
1857 // In software IRQ context, the call causes dead-lock to disable the tasklet
1858 // synchronously.
1859 if (!in_softirq())
1860 fw_iso_context_flush_completions(ctx: irq_target->context);
1861 }
1862
1863 return READ_ONCE(s->pcm_buffer_pointer);
1864}
1865EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1866
1867/**
1868 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1869 * @d: the AMDTP domain.
1870 * @s: the AMDTP stream that transfers the PCM frames
1871 *
1872 * Returns zero always.
1873 */
1874int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1875{
1876 struct amdtp_stream *irq_target = d->irq_target;
1877
1878 // Process isochronous packets for recent isochronous cycle to handle
1879 // queued PCM frames.
1880 if (irq_target && amdtp_stream_running(s: irq_target))
1881 fw_iso_context_flush_completions(ctx: irq_target->context);
1882
1883 return 0;
1884}
1885EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1886
1887/**
1888 * amdtp_stream_update - update the stream after a bus reset
1889 * @s: the AMDTP stream
1890 */
1891void amdtp_stream_update(struct amdtp_stream *s)
1892{
1893 /* Precomputing. */
1894 WRITE_ONCE(s->source_node_id_field,
1895 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1896}
1897EXPORT_SYMBOL(amdtp_stream_update);
1898
1899/**
1900 * amdtp_stream_stop - stop sending packets
1901 * @s: the AMDTP stream to stop
1902 *
1903 * All PCM and MIDI devices of the stream must be stopped before the stream
1904 * itself can be stopped.
1905 */
1906static void amdtp_stream_stop(struct amdtp_stream *s)
1907{
1908 mutex_lock(&s->mutex);
1909
1910 if (!amdtp_stream_running(s)) {
1911 mutex_unlock(lock: &s->mutex);
1912 return;
1913 }
1914
1915 fw_iso_context_stop(ctx: s->context);
1916 fw_iso_context_destroy(ctx: s->context);
1917 s->context = ERR_PTR(error: -1);
1918 iso_packets_buffer_destroy(b: &s->buffer, unit: s->unit);
1919 kfree(objp: s->packet_descs);
1920 s->packet_descs = NULL;
1921
1922 if (s->direction == AMDTP_OUT_STREAM) {
1923 kfree(objp: s->ctx_data.rx.seq.descs);
1924 } else {
1925 if (s->domain->replay.enable)
1926 kfree(objp: s->ctx_data.tx.cache.descs);
1927 }
1928
1929 mutex_unlock(lock: &s->mutex);
1930}
1931
1932/**
1933 * amdtp_stream_pcm_abort - abort the running PCM device
1934 * @s: the AMDTP stream about to be stopped
1935 *
1936 * If the isochronous stream needs to be stopped asynchronously, call this
1937 * function first to stop the PCM device.
1938 */
1939void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1940{
1941 struct snd_pcm_substream *pcm;
1942
1943 pcm = READ_ONCE(s->pcm);
1944 if (pcm)
1945 snd_pcm_stop_xrun(substream: pcm);
1946}
1947EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1948
1949/**
1950 * amdtp_domain_init - initialize an AMDTP domain structure
1951 * @d: the AMDTP domain to initialize.
1952 */
1953int amdtp_domain_init(struct amdtp_domain *d)
1954{
1955 INIT_LIST_HEAD(list: &d->streams);
1956
1957 d->events_per_period = 0;
1958
1959 return 0;
1960}
1961EXPORT_SYMBOL_GPL(amdtp_domain_init);
1962
1963/**
1964 * amdtp_domain_destroy - destroy an AMDTP domain structure
1965 * @d: the AMDTP domain to destroy.
1966 */
1967void amdtp_domain_destroy(struct amdtp_domain *d)
1968{
1969 // At present nothing to do.
1970 return;
1971}
1972EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1973
1974/**
1975 * amdtp_domain_add_stream - register isoc context into the domain.
1976 * @d: the AMDTP domain.
1977 * @s: the AMDTP stream.
1978 * @channel: the isochronous channel on the bus.
1979 * @speed: firewire speed code.
1980 */
1981int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1982 int channel, int speed)
1983{
1984 struct amdtp_stream *tmp;
1985
1986 list_for_each_entry(tmp, &d->streams, list) {
1987 if (s == tmp)
1988 return -EBUSY;
1989 }
1990
1991 list_add(new: &s->list, head: &d->streams);
1992
1993 s->channel = channel;
1994 s->speed = speed;
1995 s->domain = d;
1996
1997 return 0;
1998}
1999EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
2000
2001// Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
2002// is less than the number of rx streams, the first tx stream is selected.
2003static int make_association(struct amdtp_domain *d)
2004{
2005 unsigned int dst_index = 0;
2006 struct amdtp_stream *rx;
2007
2008 // Make association to replay target.
2009 list_for_each_entry(rx, &d->streams, list) {
2010 if (rx->direction == AMDTP_OUT_STREAM) {
2011 unsigned int src_index = 0;
2012 struct amdtp_stream *tx = NULL;
2013 struct amdtp_stream *s;
2014
2015 list_for_each_entry(s, &d->streams, list) {
2016 if (s->direction == AMDTP_IN_STREAM) {
2017 if (dst_index == src_index) {
2018 tx = s;
2019 break;
2020 }
2021
2022 ++src_index;
2023 }
2024 }
2025 if (!tx) {
2026 // Select the first entry.
2027 list_for_each_entry(s, &d->streams, list) {
2028 if (s->direction == AMDTP_IN_STREAM) {
2029 tx = s;
2030 break;
2031 }
2032 }
2033 // No target is available to replay sequence.
2034 if (!tx)
2035 return -EINVAL;
2036 }
2037
2038 rx->ctx_data.rx.replay_target = tx;
2039
2040 ++dst_index;
2041 }
2042 }
2043
2044 return 0;
2045}
2046
2047/**
2048 * amdtp_domain_start - start sending packets for isoc context in the domain.
2049 * @d: the AMDTP domain.
2050 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
2051 * contexts.
2052 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
2053 * IT context.
2054 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
2055 * according to arrival of events in tx packets.
2056 */
2057int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
2058 bool replay_on_the_fly)
2059{
2060 unsigned int events_per_buffer = d->events_per_buffer;
2061 unsigned int events_per_period = d->events_per_period;
2062 unsigned int queue_size;
2063 struct amdtp_stream *s;
2064 bool found = false;
2065 int err;
2066
2067 if (replay_seq) {
2068 err = make_association(d);
2069 if (err < 0)
2070 return err;
2071 }
2072 d->replay.enable = replay_seq;
2073 d->replay.on_the_fly = replay_on_the_fly;
2074
2075 // Select an IT context as IRQ target.
2076 list_for_each_entry(s, &d->streams, list) {
2077 if (s->direction == AMDTP_OUT_STREAM) {
2078 found = true;
2079 break;
2080 }
2081 }
2082 if (!found)
2083 return -ENXIO;
2084 d->irq_target = s;
2085
2086 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
2087
2088 // This is a case that AMDTP streams in domain run just for MIDI
2089 // substream. Use the number of events equivalent to 10 msec as
2090 // interval of hardware IRQ.
2091 if (events_per_period == 0)
2092 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
2093 if (events_per_buffer == 0)
2094 events_per_buffer = events_per_period * 3;
2095
2096 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
2097 amdtp_rate_table[d->irq_target->sfc]);
2098
2099 list_for_each_entry(s, &d->streams, list) {
2100 unsigned int idle_irq_interval = 0;
2101
2102 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
2103 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
2104 amdtp_rate_table[d->irq_target->sfc]);
2105 }
2106
2107 // Starts immediately but actually DMA context starts several hundred cycles later.
2108 err = amdtp_stream_start(s, channel: s->channel, speed: s->speed, queue_size, idle_irq_interval);
2109 if (err < 0)
2110 goto error;
2111 }
2112
2113 return 0;
2114error:
2115 list_for_each_entry(s, &d->streams, list)
2116 amdtp_stream_stop(s);
2117 return err;
2118}
2119EXPORT_SYMBOL_GPL(amdtp_domain_start);
2120
2121/**
2122 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2123 * @d: the AMDTP domain to which the isoc contexts belong.
2124 */
2125void amdtp_domain_stop(struct amdtp_domain *d)
2126{
2127 struct amdtp_stream *s, *next;
2128
2129 if (d->irq_target)
2130 amdtp_stream_stop(s: d->irq_target);
2131
2132 list_for_each_entry_safe(s, next, &d->streams, list) {
2133 list_del(entry: &s->list);
2134
2135 if (s != d->irq_target)
2136 amdtp_stream_stop(s);
2137 }
2138
2139 d->events_per_period = 0;
2140 d->irq_target = NULL;
2141}
2142EXPORT_SYMBOL_GPL(amdtp_domain_stop);
2143

source code of linux/sound/firewire/amdtp-stream.c