1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | // |
3 | // Copyright(c) 2021 Intel Corporation. All rights reserved. |
4 | // |
5 | // Authors: Cezary Rojewski <cezary.rojewski@intel.com> |
6 | // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> |
7 | // |
8 | |
9 | #include <sound/intel-nhlt.h> |
10 | #include <sound/pcm_params.h> |
11 | #include <sound/soc.h> |
12 | #include "avs.h" |
13 | #include "control.h" |
14 | #include "path.h" |
15 | #include "topology.h" |
16 | |
17 | /* Must be called with adev->comp_list_mutex held. */ |
18 | static struct avs_tplg * |
19 | avs_path_find_tplg(struct avs_dev *adev, const char *name) |
20 | { |
21 | struct avs_soc_component *acomp; |
22 | |
23 | list_for_each_entry(acomp, &adev->comp_list, node) |
24 | if (!strcmp(acomp->tplg->name, name)) |
25 | return acomp->tplg; |
26 | return NULL; |
27 | } |
28 | |
29 | static struct avs_path_module * |
30 | avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id) |
31 | { |
32 | struct avs_path_module *mod; |
33 | |
34 | list_for_each_entry(mod, &ppl->mod_list, node) |
35 | if (mod->template->id == template_id) |
36 | return mod; |
37 | return NULL; |
38 | } |
39 | |
40 | static struct avs_path_pipeline * |
41 | avs_path_find_pipeline(struct avs_path *path, u32 template_id) |
42 | { |
43 | struct avs_path_pipeline *ppl; |
44 | |
45 | list_for_each_entry(ppl, &path->ppl_list, node) |
46 | if (ppl->template->id == template_id) |
47 | return ppl; |
48 | return NULL; |
49 | } |
50 | |
51 | static struct avs_path * |
52 | avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id) |
53 | { |
54 | struct avs_tplg_path_template *pos, *template = NULL; |
55 | struct avs_tplg *tplg; |
56 | struct avs_path *path; |
57 | |
58 | tplg = avs_path_find_tplg(adev, name); |
59 | if (!tplg) |
60 | return NULL; |
61 | |
62 | list_for_each_entry(pos, &tplg->path_tmpl_list, node) { |
63 | if (pos->id == template_id) { |
64 | template = pos; |
65 | break; |
66 | } |
67 | } |
68 | if (!template) |
69 | return NULL; |
70 | |
71 | spin_lock(lock: &adev->path_list_lock); |
72 | /* Only one variant of given path template may be instantiated at a time. */ |
73 | list_for_each_entry(path, &adev->path_list, node) { |
74 | if (path->template->owner == template) { |
75 | spin_unlock(lock: &adev->path_list_lock); |
76 | return path; |
77 | } |
78 | } |
79 | |
80 | spin_unlock(lock: &adev->path_list_lock); |
81 | return NULL; |
82 | } |
83 | |
84 | static bool avs_test_hw_params(struct snd_pcm_hw_params *params, |
85 | struct avs_audio_format *fmt) |
86 | { |
87 | return (params_rate(p: params) == fmt->sampling_freq && |
88 | params_channels(p: params) == fmt->num_channels && |
89 | params_physical_width(p: params) == fmt->bit_depth && |
90 | snd_pcm_hw_params_bits(p: params) == fmt->valid_bit_depth); |
91 | } |
92 | |
93 | static struct avs_tplg_path * |
94 | avs_path_find_variant(struct avs_dev *adev, |
95 | struct avs_tplg_path_template *template, |
96 | struct snd_pcm_hw_params *fe_params, |
97 | struct snd_pcm_hw_params *be_params) |
98 | { |
99 | struct avs_tplg_path *variant; |
100 | |
101 | list_for_each_entry(variant, &template->path_list, node) { |
102 | dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n" , |
103 | variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels, |
104 | variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth); |
105 | dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n" , |
106 | variant->be_fmt->sampling_freq, variant->be_fmt->num_channels, |
107 | variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth); |
108 | |
109 | if (variant->fe_fmt && avs_test_hw_params(params: fe_params, fmt: variant->fe_fmt) && |
110 | variant->be_fmt && avs_test_hw_params(params: be_params, fmt: variant->be_fmt)) |
111 | return variant; |
112 | } |
113 | |
114 | return NULL; |
115 | } |
116 | |
117 | __maybe_unused |
118 | static bool avs_dma_type_is_host(u32 dma_type) |
119 | { |
120 | return dma_type == AVS_DMA_HDA_HOST_OUTPUT || |
121 | dma_type == AVS_DMA_HDA_HOST_INPUT; |
122 | } |
123 | |
124 | __maybe_unused |
125 | static bool avs_dma_type_is_link(u32 dma_type) |
126 | { |
127 | return !avs_dma_type_is_host(dma_type); |
128 | } |
129 | |
130 | __maybe_unused |
131 | static bool avs_dma_type_is_output(u32 dma_type) |
132 | { |
133 | return dma_type == AVS_DMA_HDA_HOST_OUTPUT || |
134 | dma_type == AVS_DMA_HDA_LINK_OUTPUT || |
135 | dma_type == AVS_DMA_I2S_LINK_OUTPUT; |
136 | } |
137 | |
138 | __maybe_unused |
139 | static bool avs_dma_type_is_input(u32 dma_type) |
140 | { |
141 | return !avs_dma_type_is_output(dma_type); |
142 | } |
143 | |
144 | static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod) |
145 | { |
146 | struct nhlt_acpi_table *nhlt = adev->nhlt; |
147 | struct avs_tplg_module *t = mod->template; |
148 | struct avs_copier_cfg *cfg; |
149 | struct nhlt_specific_cfg *ep_blob; |
150 | union avs_connector_node_id node_id = {0}; |
151 | size_t cfg_size, data_size = 0; |
152 | void *data = NULL; |
153 | u32 dma_type; |
154 | int ret; |
155 | |
156 | dma_type = t->cfg_ext->copier.dma_type; |
157 | node_id.dma_type = dma_type; |
158 | |
159 | switch (dma_type) { |
160 | struct avs_audio_format *fmt; |
161 | int direction; |
162 | |
163 | case AVS_DMA_I2S_LINK_OUTPUT: |
164 | case AVS_DMA_I2S_LINK_INPUT: |
165 | if (avs_dma_type_is_input(dma_type)) |
166 | direction = SNDRV_PCM_STREAM_CAPTURE; |
167 | else |
168 | direction = SNDRV_PCM_STREAM_PLAYBACK; |
169 | |
170 | if (t->cfg_ext->copier.blob_fmt) |
171 | fmt = t->cfg_ext->copier.blob_fmt; |
172 | else if (direction == SNDRV_PCM_STREAM_CAPTURE) |
173 | fmt = t->in_fmt; |
174 | else |
175 | fmt = t->cfg_ext->copier.out_fmt; |
176 | |
177 | ep_blob = intel_nhlt_get_endpoint_blob(dev: adev->dev, |
178 | nhlt, bus_id: t->cfg_ext->copier.vindex.i2s.instance, |
179 | link_type: NHLT_LINK_SSP, vbps: fmt->valid_bit_depth, bps: fmt->bit_depth, |
180 | num_ch: fmt->num_channels, rate: fmt->sampling_freq, dir: direction, |
181 | dev_type: NHLT_DEVICE_I2S); |
182 | if (!ep_blob) { |
183 | dev_err(adev->dev, "no I2S ep_blob found\n" ); |
184 | return -ENOENT; |
185 | } |
186 | |
187 | data = ep_blob->caps; |
188 | data_size = ep_blob->size; |
189 | /* I2S gateway's vindex is statically assigned in topology */ |
190 | node_id.vindex = t->cfg_ext->copier.vindex.val; |
191 | |
192 | break; |
193 | |
194 | case AVS_DMA_DMIC_LINK_INPUT: |
195 | direction = SNDRV_PCM_STREAM_CAPTURE; |
196 | |
197 | if (t->cfg_ext->copier.blob_fmt) |
198 | fmt = t->cfg_ext->copier.blob_fmt; |
199 | else |
200 | fmt = t->in_fmt; |
201 | |
202 | ep_blob = intel_nhlt_get_endpoint_blob(dev: adev->dev, nhlt, bus_id: 0, |
203 | link_type: NHLT_LINK_DMIC, vbps: fmt->valid_bit_depth, |
204 | bps: fmt->bit_depth, num_ch: fmt->num_channels, |
205 | rate: fmt->sampling_freq, dir: direction, dev_type: NHLT_DEVICE_DMIC); |
206 | if (!ep_blob) { |
207 | dev_err(adev->dev, "no DMIC ep_blob found\n" ); |
208 | return -ENOENT; |
209 | } |
210 | |
211 | data = ep_blob->caps; |
212 | data_size = ep_blob->size; |
213 | /* DMIC gateway's vindex is statically assigned in topology */ |
214 | node_id.vindex = t->cfg_ext->copier.vindex.val; |
215 | |
216 | break; |
217 | |
218 | case AVS_DMA_HDA_HOST_OUTPUT: |
219 | case AVS_DMA_HDA_HOST_INPUT: |
220 | /* HOST gateway's vindex is dynamically assigned with DMA id */ |
221 | node_id.vindex = mod->owner->owner->dma_id; |
222 | break; |
223 | |
224 | case AVS_DMA_HDA_LINK_OUTPUT: |
225 | case AVS_DMA_HDA_LINK_INPUT: |
226 | node_id.vindex = t->cfg_ext->copier.vindex.val | |
227 | mod->owner->owner->dma_id; |
228 | break; |
229 | |
230 | case INVALID_OBJECT_ID: |
231 | default: |
232 | node_id = INVALID_NODE_ID; |
233 | break; |
234 | } |
235 | |
236 | cfg_size = sizeof(*cfg) + data_size; |
237 | /* Every config-BLOB contains gateway attributes. */ |
238 | if (data_size) |
239 | cfg_size -= sizeof(cfg->gtw_cfg.config.attrs); |
240 | if (cfg_size > AVS_MAILBOX_SIZE) |
241 | return -EINVAL; |
242 | |
243 | cfg = adev->modcfg_buf; |
244 | memset(cfg, 0, cfg_size); |
245 | cfg->base.cpc = t->cfg_base->cpc; |
246 | cfg->base.ibs = t->cfg_base->ibs; |
247 | cfg->base.obs = t->cfg_base->obs; |
248 | cfg->base.is_pages = t->cfg_base->is_pages; |
249 | cfg->base.audio_fmt = *t->in_fmt; |
250 | cfg->out_fmt = *t->cfg_ext->copier.out_fmt; |
251 | cfg->feature_mask = t->cfg_ext->copier.feature_mask; |
252 | cfg->gtw_cfg.node_id = node_id; |
253 | cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size; |
254 | /* config_length in DWORDs */ |
255 | cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4); |
256 | if (data) |
257 | memcpy(&cfg->gtw_cfg.config, data, data_size); |
258 | |
259 | mod->gtw_attrs = cfg->gtw_cfg.config.attrs; |
260 | |
261 | ret = avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
262 | core_id: t->core_id, domain: t->domain, param: cfg, param_size: cfg_size, |
263 | instance_id: &mod->instance_id); |
264 | return ret; |
265 | } |
266 | |
267 | static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod) |
268 | { |
269 | struct avs_tplg_module *t = mod->template; |
270 | struct avs_tplg_path_template *path_tmpl; |
271 | struct snd_soc_dapm_widget *w; |
272 | int i; |
273 | |
274 | path_tmpl = t->owner->owner->owner; |
275 | w = path_tmpl->w; |
276 | |
277 | for (i = 0; i < w->num_kcontrols; i++) { |
278 | struct avs_control_data *ctl_data; |
279 | struct soc_mixer_control *mc; |
280 | |
281 | mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value; |
282 | ctl_data = (struct avs_control_data *)mc->dobj.private; |
283 | if (ctl_data->id == t->ctl_id) |
284 | return ctl_data; |
285 | } |
286 | |
287 | return NULL; |
288 | } |
289 | |
290 | static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod) |
291 | { |
292 | struct avs_tplg_module *t = mod->template; |
293 | struct avs_control_data *ctl_data; |
294 | struct avs_peakvol_cfg *cfg; |
295 | int volume = S32_MAX; |
296 | size_t cfg_size; |
297 | int ret; |
298 | |
299 | ctl_data = avs_get_module_control(mod); |
300 | if (ctl_data) |
301 | volume = ctl_data->volume; |
302 | |
303 | /* As 2+ channels controls are unsupported, have a single block for all channels. */ |
304 | cfg_size = struct_size(cfg, vols, 1); |
305 | if (cfg_size > AVS_MAILBOX_SIZE) |
306 | return -EINVAL; |
307 | |
308 | cfg = adev->modcfg_buf; |
309 | memset(cfg, 0, cfg_size); |
310 | cfg->base.cpc = t->cfg_base->cpc; |
311 | cfg->base.ibs = t->cfg_base->ibs; |
312 | cfg->base.obs = t->cfg_base->obs; |
313 | cfg->base.is_pages = t->cfg_base->is_pages; |
314 | cfg->base.audio_fmt = *t->in_fmt; |
315 | cfg->vols[0].target_volume = volume; |
316 | cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK; |
317 | cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE; |
318 | cfg->vols[0].curve_duration = 0; |
319 | |
320 | ret = avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, core_id: t->core_id, |
321 | domain: t->domain, param: cfg, param_size: cfg_size, instance_id: &mod->instance_id); |
322 | |
323 | return ret; |
324 | } |
325 | |
326 | static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod) |
327 | { |
328 | struct avs_tplg_module *t = mod->template; |
329 | struct avs_updown_mixer_cfg cfg; |
330 | int i; |
331 | |
332 | cfg.base.cpc = t->cfg_base->cpc; |
333 | cfg.base.ibs = t->cfg_base->ibs; |
334 | cfg.base.obs = t->cfg_base->obs; |
335 | cfg.base.is_pages = t->cfg_base->is_pages; |
336 | cfg.base.audio_fmt = *t->in_fmt; |
337 | cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config; |
338 | cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select; |
339 | for (i = 0; i < AVS_CHANNELS_MAX; i++) |
340 | cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i]; |
341 | cfg.channel_map = t->cfg_ext->updown_mix.channel_map; |
342 | |
343 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
344 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
345 | instance_id: &mod->instance_id); |
346 | } |
347 | |
348 | static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod) |
349 | { |
350 | struct avs_tplg_module *t = mod->template; |
351 | struct avs_src_cfg cfg; |
352 | |
353 | cfg.base.cpc = t->cfg_base->cpc; |
354 | cfg.base.ibs = t->cfg_base->ibs; |
355 | cfg.base.obs = t->cfg_base->obs; |
356 | cfg.base.is_pages = t->cfg_base->is_pages; |
357 | cfg.base.audio_fmt = *t->in_fmt; |
358 | cfg.out_freq = t->cfg_ext->src.out_freq; |
359 | |
360 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
361 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
362 | instance_id: &mod->instance_id); |
363 | } |
364 | |
365 | static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod) |
366 | { |
367 | struct avs_tplg_module *t = mod->template; |
368 | struct avs_asrc_cfg cfg; |
369 | |
370 | cfg.base.cpc = t->cfg_base->cpc; |
371 | cfg.base.ibs = t->cfg_base->ibs; |
372 | cfg.base.obs = t->cfg_base->obs; |
373 | cfg.base.is_pages = t->cfg_base->is_pages; |
374 | cfg.base.audio_fmt = *t->in_fmt; |
375 | cfg.out_freq = t->cfg_ext->asrc.out_freq; |
376 | cfg.mode = t->cfg_ext->asrc.mode; |
377 | cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer; |
378 | |
379 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
380 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
381 | instance_id: &mod->instance_id); |
382 | } |
383 | |
384 | static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod) |
385 | { |
386 | struct avs_tplg_module *t = mod->template; |
387 | struct avs_aec_cfg cfg; |
388 | |
389 | cfg.base.cpc = t->cfg_base->cpc; |
390 | cfg.base.ibs = t->cfg_base->ibs; |
391 | cfg.base.obs = t->cfg_base->obs; |
392 | cfg.base.is_pages = t->cfg_base->is_pages; |
393 | cfg.base.audio_fmt = *t->in_fmt; |
394 | cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt; |
395 | cfg.out_fmt = *t->cfg_ext->aec.out_fmt; |
396 | cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode; |
397 | |
398 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
399 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
400 | instance_id: &mod->instance_id); |
401 | } |
402 | |
403 | static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod) |
404 | { |
405 | struct avs_tplg_module *t = mod->template; |
406 | struct avs_mux_cfg cfg; |
407 | |
408 | cfg.base.cpc = t->cfg_base->cpc; |
409 | cfg.base.ibs = t->cfg_base->ibs; |
410 | cfg.base.obs = t->cfg_base->obs; |
411 | cfg.base.is_pages = t->cfg_base->is_pages; |
412 | cfg.base.audio_fmt = *t->in_fmt; |
413 | cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt; |
414 | cfg.out_fmt = *t->cfg_ext->mux.out_fmt; |
415 | |
416 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
417 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
418 | instance_id: &mod->instance_id); |
419 | } |
420 | |
421 | static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod) |
422 | { |
423 | struct avs_tplg_module *t = mod->template; |
424 | struct avs_wov_cfg cfg; |
425 | |
426 | cfg.base.cpc = t->cfg_base->cpc; |
427 | cfg.base.ibs = t->cfg_base->ibs; |
428 | cfg.base.obs = t->cfg_base->obs; |
429 | cfg.base.is_pages = t->cfg_base->is_pages; |
430 | cfg.base.audio_fmt = *t->in_fmt; |
431 | cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode; |
432 | |
433 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
434 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
435 | instance_id: &mod->instance_id); |
436 | } |
437 | |
438 | static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod) |
439 | { |
440 | struct avs_tplg_module *t = mod->template; |
441 | struct avs_micsel_cfg cfg; |
442 | |
443 | cfg.base.cpc = t->cfg_base->cpc; |
444 | cfg.base.ibs = t->cfg_base->ibs; |
445 | cfg.base.obs = t->cfg_base->obs; |
446 | cfg.base.is_pages = t->cfg_base->is_pages; |
447 | cfg.base.audio_fmt = *t->in_fmt; |
448 | cfg.out_fmt = *t->cfg_ext->micsel.out_fmt; |
449 | |
450 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
451 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
452 | instance_id: &mod->instance_id); |
453 | } |
454 | |
455 | static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod) |
456 | { |
457 | struct avs_tplg_module *t = mod->template; |
458 | struct avs_modcfg_base cfg; |
459 | |
460 | cfg.cpc = t->cfg_base->cpc; |
461 | cfg.ibs = t->cfg_base->ibs; |
462 | cfg.obs = t->cfg_base->obs; |
463 | cfg.is_pages = t->cfg_base->is_pages; |
464 | cfg.audio_fmt = *t->in_fmt; |
465 | |
466 | return avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
467 | core_id: t->core_id, domain: t->domain, param: &cfg, param_size: sizeof(cfg), |
468 | instance_id: &mod->instance_id); |
469 | } |
470 | |
471 | static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod) |
472 | { |
473 | struct avs_tplg_module *t = mod->template; |
474 | struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext; |
475 | struct avs_modcfg_ext *cfg; |
476 | size_t cfg_size, num_pins; |
477 | int ret, i; |
478 | |
479 | num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins; |
480 | cfg_size = struct_size(cfg, pin_fmts, num_pins); |
481 | |
482 | if (cfg_size > AVS_MAILBOX_SIZE) |
483 | return -EINVAL; |
484 | |
485 | cfg = adev->modcfg_buf; |
486 | memset(cfg, 0, cfg_size); |
487 | cfg->base.cpc = t->cfg_base->cpc; |
488 | cfg->base.ibs = t->cfg_base->ibs; |
489 | cfg->base.obs = t->cfg_base->obs; |
490 | cfg->base.is_pages = t->cfg_base->is_pages; |
491 | cfg->base.audio_fmt = *t->in_fmt; |
492 | cfg->num_input_pins = tcfg->generic.num_input_pins; |
493 | cfg->num_output_pins = tcfg->generic.num_output_pins; |
494 | |
495 | /* configure pin formats */ |
496 | for (i = 0; i < num_pins; i++) { |
497 | struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i]; |
498 | struct avs_pin_format *pin = &cfg->pin_fmts[i]; |
499 | |
500 | pin->pin_index = tpin->pin_index; |
501 | pin->iobs = tpin->iobs; |
502 | pin->audio_fmt = *tpin->fmt; |
503 | } |
504 | |
505 | ret = avs_dsp_init_module(adev, module_id: mod->module_id, ppl_instance_id: mod->owner->instance_id, |
506 | core_id: t->core_id, domain: t->domain, param: cfg, param_size: cfg_size, |
507 | instance_id: &mod->instance_id); |
508 | return ret; |
509 | } |
510 | |
511 | static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod) |
512 | { |
513 | dev_err(adev->dev, "Probe module can't be instantiated by topology" ); |
514 | return -EINVAL; |
515 | } |
516 | |
517 | struct avs_module_create { |
518 | guid_t *guid; |
519 | int (*create)(struct avs_dev *adev, struct avs_path_module *mod); |
520 | }; |
521 | |
522 | static struct avs_module_create avs_module_create[] = { |
523 | { &AVS_MIXIN_MOD_UUID, avs_modbase_create }, |
524 | { &AVS_MIXOUT_MOD_UUID, avs_modbase_create }, |
525 | { &AVS_KPBUFF_MOD_UUID, avs_modbase_create }, |
526 | { &AVS_COPIER_MOD_UUID, avs_copier_create }, |
527 | { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create }, |
528 | { &AVS_GAIN_MOD_UUID, avs_peakvol_create }, |
529 | { &AVS_MICSEL_MOD_UUID, avs_micsel_create }, |
530 | { &AVS_MUX_MOD_UUID, avs_mux_create }, |
531 | { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create }, |
532 | { &AVS_SRCINTC_MOD_UUID, avs_src_create }, |
533 | { &AVS_AEC_MOD_UUID, avs_aec_create }, |
534 | { &AVS_ASRC_MOD_UUID, avs_asrc_create }, |
535 | { &AVS_INTELWOV_MOD_UUID, avs_wov_create }, |
536 | { &AVS_PROBE_MOD_UUID, avs_probe_create }, |
537 | }; |
538 | |
539 | static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod) |
540 | { |
541 | const guid_t *type = &mod->template->cfg_ext->type; |
542 | |
543 | for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++) |
544 | if (guid_equal(u1: type, u2: avs_module_create[i].guid)) |
545 | return avs_module_create[i].create(adev, mod); |
546 | |
547 | return avs_modext_create(adev, mod); |
548 | } |
549 | |
550 | static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod) |
551 | { |
552 | struct avs_soc_component *acomp; |
553 | |
554 | acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp); |
555 | |
556 | u32 num_ids = mod->template->num_config_ids; |
557 | u32 *ids = mod->template->config_ids; |
558 | |
559 | for (int i = 0; i < num_ids; i++) { |
560 | struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]]; |
561 | size_t len = config->length; |
562 | void *data = config->data; |
563 | u32 param = config->param; |
564 | int ret; |
565 | |
566 | ret = avs_ipc_set_large_config(adev, module_id: mod->module_id, instance_id: mod->instance_id, |
567 | param_id: param, request: data, request_size: len); |
568 | if (ret) { |
569 | dev_err(adev->dev, "send initial module config failed: %d\n" , ret); |
570 | return AVS_IPC_RET(ret); |
571 | } |
572 | } |
573 | |
574 | return 0; |
575 | } |
576 | |
577 | static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod) |
578 | { |
579 | kfree(objp: mod); |
580 | } |
581 | |
582 | static struct avs_path_module * |
583 | avs_path_module_create(struct avs_dev *adev, |
584 | struct avs_path_pipeline *owner, |
585 | struct avs_tplg_module *template) |
586 | { |
587 | struct avs_path_module *mod; |
588 | int module_id, ret; |
589 | |
590 | module_id = avs_get_module_id(adev, uuid: &template->cfg_ext->type); |
591 | if (module_id < 0) |
592 | return ERR_PTR(error: module_id); |
593 | |
594 | mod = kzalloc(size: sizeof(*mod), GFP_KERNEL); |
595 | if (!mod) |
596 | return ERR_PTR(error: -ENOMEM); |
597 | |
598 | mod->template = template; |
599 | mod->module_id = module_id; |
600 | mod->owner = owner; |
601 | INIT_LIST_HEAD(list: &mod->node); |
602 | |
603 | ret = avs_path_module_type_create(adev, mod); |
604 | if (ret) { |
605 | dev_err(adev->dev, "module-type create failed: %d\n" , ret); |
606 | kfree(objp: mod); |
607 | return ERR_PTR(error: ret); |
608 | } |
609 | |
610 | ret = avs_path_module_send_init_configs(adev, mod); |
611 | if (ret) { |
612 | kfree(objp: mod); |
613 | return ERR_PTR(error: ret); |
614 | } |
615 | |
616 | return mod; |
617 | } |
618 | |
619 | static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding) |
620 | { |
621 | struct avs_path_module *this_mod, *target_mod; |
622 | struct avs_path_pipeline *target_ppl; |
623 | struct avs_path *target_path; |
624 | struct avs_tplg_binding *t; |
625 | |
626 | t = binding->template; |
627 | this_mod = avs_path_find_module(ppl: binding->owner, |
628 | template_id: t->mod_id); |
629 | if (!this_mod) { |
630 | dev_err(adev->dev, "path mod %d not found\n" , t->mod_id); |
631 | return -EINVAL; |
632 | } |
633 | |
634 | /* update with target_tplg_name too */ |
635 | target_path = avs_path_find_path(adev, name: t->target_tplg_name, |
636 | template_id: t->target_path_tmpl_id); |
637 | if (!target_path) { |
638 | dev_err(adev->dev, "target path %s:%d not found\n" , |
639 | t->target_tplg_name, t->target_path_tmpl_id); |
640 | return -EINVAL; |
641 | } |
642 | |
643 | target_ppl = avs_path_find_pipeline(path: target_path, |
644 | template_id: t->target_ppl_id); |
645 | if (!target_ppl) { |
646 | dev_err(adev->dev, "target ppl %d not found\n" , t->target_ppl_id); |
647 | return -EINVAL; |
648 | } |
649 | |
650 | target_mod = avs_path_find_module(ppl: target_ppl, template_id: t->target_mod_id); |
651 | if (!target_mod) { |
652 | dev_err(adev->dev, "target mod %d not found\n" , t->target_mod_id); |
653 | return -EINVAL; |
654 | } |
655 | |
656 | if (t->is_sink) { |
657 | binding->sink = this_mod; |
658 | binding->sink_pin = t->mod_pin; |
659 | binding->source = target_mod; |
660 | binding->source_pin = t->target_mod_pin; |
661 | } else { |
662 | binding->sink = target_mod; |
663 | binding->sink_pin = t->target_mod_pin; |
664 | binding->source = this_mod; |
665 | binding->source_pin = t->mod_pin; |
666 | } |
667 | |
668 | return 0; |
669 | } |
670 | |
671 | static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding) |
672 | { |
673 | kfree(objp: binding); |
674 | } |
675 | |
676 | static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev, |
677 | struct avs_path_pipeline *owner, |
678 | struct avs_tplg_binding *t) |
679 | { |
680 | struct avs_path_binding *binding; |
681 | |
682 | binding = kzalloc(size: sizeof(*binding), GFP_KERNEL); |
683 | if (!binding) |
684 | return ERR_PTR(error: -ENOMEM); |
685 | |
686 | binding->template = t; |
687 | binding->owner = owner; |
688 | INIT_LIST_HEAD(list: &binding->node); |
689 | |
690 | return binding; |
691 | } |
692 | |
693 | static int avs_path_pipeline_arm(struct avs_dev *adev, |
694 | struct avs_path_pipeline *ppl) |
695 | { |
696 | struct avs_path_module *mod; |
697 | |
698 | list_for_each_entry(mod, &ppl->mod_list, node) { |
699 | struct avs_path_module *source, *sink; |
700 | int ret; |
701 | |
702 | /* |
703 | * Only one module (so it's implicitly last) or it is the last |
704 | * one, either way we don't have next module to bind it to. |
705 | */ |
706 | if (mod == list_last_entry(&ppl->mod_list, |
707 | struct avs_path_module, node)) |
708 | break; |
709 | |
710 | /* bind current module to next module on list */ |
711 | source = mod; |
712 | sink = list_next_entry(mod, node); |
713 | if (!source || !sink) |
714 | return -EINVAL; |
715 | |
716 | ret = avs_ipc_bind(adev, module_id: source->module_id, instance_id: source->instance_id, |
717 | dst_module_id: sink->module_id, dst_instance_id: sink->instance_id, dst_queue: 0, src_queue: 0); |
718 | if (ret) |
719 | return AVS_IPC_RET(ret); |
720 | } |
721 | |
722 | return 0; |
723 | } |
724 | |
725 | static void avs_path_pipeline_free(struct avs_dev *adev, |
726 | struct avs_path_pipeline *ppl) |
727 | { |
728 | struct avs_path_binding *binding, *bsave; |
729 | struct avs_path_module *mod, *save; |
730 | |
731 | list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) { |
732 | list_del(entry: &binding->node); |
733 | avs_path_binding_free(adev, binding); |
734 | } |
735 | |
736 | avs_dsp_delete_pipeline(adev, instance_id: ppl->instance_id); |
737 | |
738 | /* Unload resources occupied by owned modules */ |
739 | list_for_each_entry_safe(mod, save, &ppl->mod_list, node) { |
740 | avs_dsp_delete_module(adev, module_id: mod->module_id, instance_id: mod->instance_id, |
741 | ppl_instance_id: mod->owner->instance_id, |
742 | core_id: mod->template->core_id); |
743 | avs_path_module_free(adev, mod); |
744 | } |
745 | |
746 | list_del(entry: &ppl->node); |
747 | kfree(objp: ppl); |
748 | } |
749 | |
750 | static struct avs_path_pipeline * |
751 | avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner, |
752 | struct avs_tplg_pipeline *template) |
753 | { |
754 | struct avs_path_pipeline *ppl; |
755 | struct avs_tplg_pplcfg *cfg = template->cfg; |
756 | struct avs_tplg_module *tmod; |
757 | int ret, i; |
758 | |
759 | ppl = kzalloc(size: sizeof(*ppl), GFP_KERNEL); |
760 | if (!ppl) |
761 | return ERR_PTR(error: -ENOMEM); |
762 | |
763 | ppl->template = template; |
764 | ppl->owner = owner; |
765 | INIT_LIST_HEAD(list: &ppl->binding_list); |
766 | INIT_LIST_HEAD(list: &ppl->mod_list); |
767 | INIT_LIST_HEAD(list: &ppl->node); |
768 | |
769 | ret = avs_dsp_create_pipeline(adev, req_size: cfg->req_size, priority: cfg->priority, |
770 | lp: cfg->lp, attributes: cfg->attributes, |
771 | instance_id: &ppl->instance_id); |
772 | if (ret) { |
773 | dev_err(adev->dev, "error creating pipeline %d\n" , ret); |
774 | kfree(objp: ppl); |
775 | return ERR_PTR(error: ret); |
776 | } |
777 | |
778 | list_for_each_entry(tmod, &template->mod_list, node) { |
779 | struct avs_path_module *mod; |
780 | |
781 | mod = avs_path_module_create(adev, owner: ppl, template: tmod); |
782 | if (IS_ERR(ptr: mod)) { |
783 | ret = PTR_ERR(ptr: mod); |
784 | dev_err(adev->dev, "error creating module %d\n" , ret); |
785 | goto init_err; |
786 | } |
787 | |
788 | list_add_tail(new: &mod->node, head: &ppl->mod_list); |
789 | } |
790 | |
791 | for (i = 0; i < template->num_bindings; i++) { |
792 | struct avs_path_binding *binding; |
793 | |
794 | binding = avs_path_binding_create(adev, owner: ppl, t: template->bindings[i]); |
795 | if (IS_ERR(ptr: binding)) { |
796 | ret = PTR_ERR(ptr: binding); |
797 | dev_err(adev->dev, "error creating binding %d\n" , ret); |
798 | goto init_err; |
799 | } |
800 | |
801 | list_add_tail(new: &binding->node, head: &ppl->binding_list); |
802 | } |
803 | |
804 | return ppl; |
805 | |
806 | init_err: |
807 | avs_path_pipeline_free(adev, ppl); |
808 | return ERR_PTR(error: ret); |
809 | } |
810 | |
811 | static int avs_path_init(struct avs_dev *adev, struct avs_path *path, |
812 | struct avs_tplg_path *template, u32 dma_id) |
813 | { |
814 | struct avs_tplg_pipeline *tppl; |
815 | |
816 | path->owner = adev; |
817 | path->template = template; |
818 | path->dma_id = dma_id; |
819 | INIT_LIST_HEAD(list: &path->ppl_list); |
820 | INIT_LIST_HEAD(list: &path->node); |
821 | |
822 | /* create all the pipelines */ |
823 | list_for_each_entry(tppl, &template->ppl_list, node) { |
824 | struct avs_path_pipeline *ppl; |
825 | |
826 | ppl = avs_path_pipeline_create(adev, owner: path, template: tppl); |
827 | if (IS_ERR(ptr: ppl)) |
828 | return PTR_ERR(ptr: ppl); |
829 | |
830 | list_add_tail(new: &ppl->node, head: &path->ppl_list); |
831 | } |
832 | |
833 | spin_lock(lock: &adev->path_list_lock); |
834 | list_add_tail(new: &path->node, head: &adev->path_list); |
835 | spin_unlock(lock: &adev->path_list_lock); |
836 | |
837 | return 0; |
838 | } |
839 | |
840 | static int avs_path_arm(struct avs_dev *adev, struct avs_path *path) |
841 | { |
842 | struct avs_path_pipeline *ppl; |
843 | struct avs_path_binding *binding; |
844 | int ret; |
845 | |
846 | list_for_each_entry(ppl, &path->ppl_list, node) { |
847 | /* |
848 | * Arm all ppl bindings before binding internal modules |
849 | * as it costs no IPCs which isn't true for the latter. |
850 | */ |
851 | list_for_each_entry(binding, &ppl->binding_list, node) { |
852 | ret = avs_path_binding_arm(adev, binding); |
853 | if (ret < 0) |
854 | return ret; |
855 | } |
856 | |
857 | ret = avs_path_pipeline_arm(adev, ppl); |
858 | if (ret < 0) |
859 | return ret; |
860 | } |
861 | |
862 | return 0; |
863 | } |
864 | |
865 | static void avs_path_free_unlocked(struct avs_path *path) |
866 | { |
867 | struct avs_path_pipeline *ppl, *save; |
868 | |
869 | spin_lock(lock: &path->owner->path_list_lock); |
870 | list_del(entry: &path->node); |
871 | spin_unlock(lock: &path->owner->path_list_lock); |
872 | |
873 | list_for_each_entry_safe(ppl, save, &path->ppl_list, node) |
874 | avs_path_pipeline_free(adev: path->owner, ppl); |
875 | |
876 | kfree(objp: path); |
877 | } |
878 | |
879 | static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id, |
880 | struct avs_tplg_path *template) |
881 | { |
882 | struct avs_path *path; |
883 | int ret; |
884 | |
885 | path = kzalloc(size: sizeof(*path), GFP_KERNEL); |
886 | if (!path) |
887 | return ERR_PTR(error: -ENOMEM); |
888 | |
889 | ret = avs_path_init(adev, path, template, dma_id); |
890 | if (ret < 0) |
891 | goto err; |
892 | |
893 | ret = avs_path_arm(adev, path); |
894 | if (ret < 0) |
895 | goto err; |
896 | |
897 | path->state = AVS_PPL_STATE_INVALID; |
898 | return path; |
899 | err: |
900 | avs_path_free_unlocked(path); |
901 | return ERR_PTR(error: ret); |
902 | } |
903 | |
904 | void avs_path_free(struct avs_path *path) |
905 | { |
906 | struct avs_dev *adev = path->owner; |
907 | |
908 | mutex_lock(&adev->path_mutex); |
909 | avs_path_free_unlocked(path); |
910 | mutex_unlock(lock: &adev->path_mutex); |
911 | } |
912 | |
913 | struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id, |
914 | struct avs_tplg_path_template *template, |
915 | struct snd_pcm_hw_params *fe_params, |
916 | struct snd_pcm_hw_params *be_params) |
917 | { |
918 | struct avs_tplg_path *variant; |
919 | struct avs_path *path; |
920 | |
921 | variant = avs_path_find_variant(adev, template, fe_params, be_params); |
922 | if (!variant) { |
923 | dev_err(adev->dev, "no matching variant found\n" ); |
924 | return ERR_PTR(error: -ENOENT); |
925 | } |
926 | |
927 | /* Serialize path and its components creation. */ |
928 | mutex_lock(&adev->path_mutex); |
929 | /* Satisfy needs of avs_path_find_tplg(). */ |
930 | mutex_lock(&adev->comp_list_mutex); |
931 | |
932 | path = avs_path_create_unlocked(adev, dma_id, template: variant); |
933 | |
934 | mutex_unlock(lock: &adev->comp_list_mutex); |
935 | mutex_unlock(lock: &adev->path_mutex); |
936 | |
937 | return path; |
938 | } |
939 | |
940 | static int avs_path_bind_prepare(struct avs_dev *adev, |
941 | struct avs_path_binding *binding) |
942 | { |
943 | const struct avs_audio_format *src_fmt, *sink_fmt; |
944 | struct avs_tplg_module *tsource = binding->source->template; |
945 | struct avs_path_module *source = binding->source; |
946 | int ret; |
947 | |
948 | /* |
949 | * only copier modules about to be bound |
950 | * to output pin other than 0 need preparation |
951 | */ |
952 | if (!binding->source_pin) |
953 | return 0; |
954 | if (!guid_equal(u1: &tsource->cfg_ext->type, u2: &AVS_COPIER_MOD_UUID)) |
955 | return 0; |
956 | |
957 | src_fmt = tsource->in_fmt; |
958 | sink_fmt = binding->sink->template->in_fmt; |
959 | |
960 | ret = avs_ipc_copier_set_sink_format(adev, module_id: source->module_id, |
961 | instance_id: source->instance_id, sink_id: binding->source_pin, |
962 | src_fmt, sink_fmt); |
963 | if (ret) { |
964 | dev_err(adev->dev, "config copier failed: %d\n" , ret); |
965 | return AVS_IPC_RET(ret); |
966 | } |
967 | |
968 | return 0; |
969 | } |
970 | |
971 | int avs_path_bind(struct avs_path *path) |
972 | { |
973 | struct avs_path_pipeline *ppl; |
974 | struct avs_dev *adev = path->owner; |
975 | int ret; |
976 | |
977 | list_for_each_entry(ppl, &path->ppl_list, node) { |
978 | struct avs_path_binding *binding; |
979 | |
980 | list_for_each_entry(binding, &ppl->binding_list, node) { |
981 | struct avs_path_module *source, *sink; |
982 | |
983 | source = binding->source; |
984 | sink = binding->sink; |
985 | |
986 | ret = avs_path_bind_prepare(adev, binding); |
987 | if (ret < 0) |
988 | return ret; |
989 | |
990 | ret = avs_ipc_bind(adev, module_id: source->module_id, |
991 | instance_id: source->instance_id, dst_module_id: sink->module_id, |
992 | dst_instance_id: sink->instance_id, dst_queue: binding->sink_pin, |
993 | src_queue: binding->source_pin); |
994 | if (ret) { |
995 | dev_err(adev->dev, "bind path failed: %d\n" , ret); |
996 | return AVS_IPC_RET(ret); |
997 | } |
998 | } |
999 | } |
1000 | |
1001 | return 0; |
1002 | } |
1003 | |
1004 | int avs_path_unbind(struct avs_path *path) |
1005 | { |
1006 | struct avs_path_pipeline *ppl; |
1007 | struct avs_dev *adev = path->owner; |
1008 | int ret; |
1009 | |
1010 | list_for_each_entry(ppl, &path->ppl_list, node) { |
1011 | struct avs_path_binding *binding; |
1012 | |
1013 | list_for_each_entry(binding, &ppl->binding_list, node) { |
1014 | struct avs_path_module *source, *sink; |
1015 | |
1016 | source = binding->source; |
1017 | sink = binding->sink; |
1018 | |
1019 | ret = avs_ipc_unbind(adev, module_id: source->module_id, |
1020 | instance_id: source->instance_id, dst_module_id: sink->module_id, |
1021 | dst_instance_id: sink->instance_id, dst_queue: binding->sink_pin, |
1022 | src_queue: binding->source_pin); |
1023 | if (ret) { |
1024 | dev_err(adev->dev, "unbind path failed: %d\n" , ret); |
1025 | return AVS_IPC_RET(ret); |
1026 | } |
1027 | } |
1028 | } |
1029 | |
1030 | return 0; |
1031 | } |
1032 | |
1033 | int avs_path_reset(struct avs_path *path) |
1034 | { |
1035 | struct avs_path_pipeline *ppl; |
1036 | struct avs_dev *adev = path->owner; |
1037 | int ret; |
1038 | |
1039 | if (path->state == AVS_PPL_STATE_RESET) |
1040 | return 0; |
1041 | |
1042 | list_for_each_entry(ppl, &path->ppl_list, node) { |
1043 | ret = avs_ipc_set_pipeline_state(adev, instance_id: ppl->instance_id, |
1044 | state: AVS_PPL_STATE_RESET); |
1045 | if (ret) { |
1046 | dev_err(adev->dev, "reset path failed: %d\n" , ret); |
1047 | path->state = AVS_PPL_STATE_INVALID; |
1048 | return AVS_IPC_RET(ret); |
1049 | } |
1050 | } |
1051 | |
1052 | path->state = AVS_PPL_STATE_RESET; |
1053 | return 0; |
1054 | } |
1055 | |
1056 | int avs_path_pause(struct avs_path *path) |
1057 | { |
1058 | struct avs_path_pipeline *ppl; |
1059 | struct avs_dev *adev = path->owner; |
1060 | int ret; |
1061 | |
1062 | if (path->state == AVS_PPL_STATE_PAUSED) |
1063 | return 0; |
1064 | |
1065 | list_for_each_entry_reverse(ppl, &path->ppl_list, node) { |
1066 | ret = avs_ipc_set_pipeline_state(adev, instance_id: ppl->instance_id, |
1067 | state: AVS_PPL_STATE_PAUSED); |
1068 | if (ret) { |
1069 | dev_err(adev->dev, "pause path failed: %d\n" , ret); |
1070 | path->state = AVS_PPL_STATE_INVALID; |
1071 | return AVS_IPC_RET(ret); |
1072 | } |
1073 | } |
1074 | |
1075 | path->state = AVS_PPL_STATE_PAUSED; |
1076 | return 0; |
1077 | } |
1078 | |
1079 | int avs_path_run(struct avs_path *path, int trigger) |
1080 | { |
1081 | struct avs_path_pipeline *ppl; |
1082 | struct avs_dev *adev = path->owner; |
1083 | int ret; |
1084 | |
1085 | if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO) |
1086 | return 0; |
1087 | |
1088 | list_for_each_entry(ppl, &path->ppl_list, node) { |
1089 | if (ppl->template->cfg->trigger != trigger) |
1090 | continue; |
1091 | |
1092 | ret = avs_ipc_set_pipeline_state(adev, instance_id: ppl->instance_id, |
1093 | state: AVS_PPL_STATE_RUNNING); |
1094 | if (ret) { |
1095 | dev_err(adev->dev, "run path failed: %d\n" , ret); |
1096 | path->state = AVS_PPL_STATE_INVALID; |
1097 | return AVS_IPC_RET(ret); |
1098 | } |
1099 | } |
1100 | |
1101 | path->state = AVS_PPL_STATE_RUNNING; |
1102 | return 0; |
1103 | } |
1104 | |