• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  skl-topology.c - Implements Platform component ALSA controls/widget
3  *  handlers.
4  *
5  *  Copyright (C) 2014-2015 Intel Corp
6  *  Author: Jeeja KP <jeeja.kp@intel.com>
7  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <linux/uuid.h>
23 #include <sound/soc.h>
24 #include <sound/soc-topology.h>
25 #include <uapi/sound/snd_sst_tokens.h>
26 #include <uapi/sound/skl-tplg-interface.h>
27 #include "skl-sst-dsp.h"
28 #include "skl-sst-ipc.h"
29 #include "skl-topology.h"
30 #include "skl.h"
31 #include "../common/sst-dsp.h"
32 #include "../common/sst-dsp-priv.h"
33 
34 #define SKL_CH_FIXUP_MASK		(1 << 0)
35 #define SKL_RATE_FIXUP_MASK		(1 << 1)
36 #define SKL_FMT_FIXUP_MASK		(1 << 2)
37 #define SKL_IN_DIR_BIT_MASK		BIT(0)
38 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
39 
40 static const int mic_mono_list[] = {
41 0, 1, 2, 3,
42 };
43 static const int mic_stereo_list[][SKL_CH_STEREO] = {
44 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
45 };
46 static const int mic_trio_list[][SKL_CH_TRIO] = {
47 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
48 };
49 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
50 {0, 1, 2, 3},
51 };
52 
53 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
54 	((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
55 
skl_tplg_d0i3_get(struct skl * skl,enum d0i3_capability caps)56 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
57 {
58 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
59 
60 	switch (caps) {
61 	case SKL_D0I3_NONE:
62 		d0i3->non_d0i3++;
63 		break;
64 
65 	case SKL_D0I3_STREAMING:
66 		d0i3->streaming++;
67 		break;
68 
69 	case SKL_D0I3_NON_STREAMING:
70 		d0i3->non_streaming++;
71 		break;
72 	}
73 }
74 
skl_tplg_d0i3_put(struct skl * skl,enum d0i3_capability caps)75 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
76 {
77 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
78 
79 	switch (caps) {
80 	case SKL_D0I3_NONE:
81 		d0i3->non_d0i3--;
82 		break;
83 
84 	case SKL_D0I3_STREAMING:
85 		d0i3->streaming--;
86 		break;
87 
88 	case SKL_D0I3_NON_STREAMING:
89 		d0i3->non_streaming--;
90 		break;
91 	}
92 }
93 
94 /*
95  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
96  * ignore. This helpers checks if the SKL driver handles this widget type
97  */
is_skl_dsp_widget_type(struct snd_soc_dapm_widget * w,struct device * dev)98 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
99 				  struct device *dev)
100 {
101 	if (w->dapm->dev != dev)
102 		return false;
103 
104 	switch (w->id) {
105 	case snd_soc_dapm_dai_link:
106 	case snd_soc_dapm_dai_in:
107 	case snd_soc_dapm_aif_in:
108 	case snd_soc_dapm_aif_out:
109 	case snd_soc_dapm_dai_out:
110 	case snd_soc_dapm_switch:
111 	case snd_soc_dapm_output:
112 	case snd_soc_dapm_mux:
113 
114 		return false;
115 	default:
116 		return true;
117 	}
118 }
119 
120 /*
121  * Each pipelines needs memory to be allocated. Check if we have free memory
122  * from available pool.
123  */
skl_is_pipe_mem_avail(struct skl * skl,struct skl_module_cfg * mconfig)124 static bool skl_is_pipe_mem_avail(struct skl *skl,
125 				struct skl_module_cfg *mconfig)
126 {
127 	struct skl_sst *ctx = skl->skl_sst;
128 
129 	if (skl->resource.mem + mconfig->pipe->memory_pages >
130 				skl->resource.max_mem) {
131 		dev_err(ctx->dev,
132 				"%s: module_id %d instance %d\n", __func__,
133 				mconfig->id.module_id,
134 				mconfig->id.instance_id);
135 		dev_err(ctx->dev,
136 				"exceeds ppl memory available %d mem %d\n",
137 				skl->resource.max_mem, skl->resource.mem);
138 		return false;
139 	} else {
140 		return true;
141 	}
142 }
143 
144 /*
145  * Add the mem to the mem pool. This is freed when pipe is deleted.
146  * Note: DSP does actual memory management we only keep track for complete
147  * pool
148  */
skl_tplg_alloc_pipe_mem(struct skl * skl,struct skl_module_cfg * mconfig)149 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
150 				struct skl_module_cfg *mconfig)
151 {
152 	skl->resource.mem += mconfig->pipe->memory_pages;
153 }
154 
155 /*
156  * Pipeline needs needs DSP CPU resources for computation, this is
157  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
158  *
159  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
160  * pipe.
161  */
162 
skl_is_pipe_mcps_avail(struct skl * skl,struct skl_module_cfg * mconfig)163 static bool skl_is_pipe_mcps_avail(struct skl *skl,
164 				struct skl_module_cfg *mconfig)
165 {
166 	struct skl_sst *ctx = skl->skl_sst;
167 	u8 res_idx = mconfig->res_idx;
168 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
169 
170 	if (skl->resource.mcps + res->cps > skl->resource.max_mcps) {
171 		dev_err(ctx->dev,
172 			"%s: module_id %d instance %d\n", __func__,
173 			mconfig->id.module_id, mconfig->id.instance_id);
174 		dev_err(ctx->dev,
175 			"exceeds ppl mcps available %d > mem %d\n",
176 			skl->resource.max_mcps, skl->resource.mcps);
177 		return false;
178 	} else {
179 		return true;
180 	}
181 }
182 
skl_tplg_alloc_pipe_mcps(struct skl * skl,struct skl_module_cfg * mconfig)183 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
184 				struct skl_module_cfg *mconfig)
185 {
186 	u8 res_idx = mconfig->res_idx;
187 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
188 
189 	skl->resource.mcps += res->cps;
190 }
191 
192 /*
193  * Free the mcps when tearing down
194  */
195 static void
skl_tplg_free_pipe_mcps(struct skl * skl,struct skl_module_cfg * mconfig)196 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
197 {
198 	u8 res_idx = mconfig->res_idx;
199 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
200 
201 	skl->resource.mcps -= res->cps;
202 }
203 
204 /*
205  * Free the memory when tearing down
206  */
207 static void
skl_tplg_free_pipe_mem(struct skl * skl,struct skl_module_cfg * mconfig)208 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
209 {
210 	skl->resource.mem -= mconfig->pipe->memory_pages;
211 }
212 
213 
skl_dump_mconfig(struct skl_sst * ctx,struct skl_module_cfg * mcfg)214 static void skl_dump_mconfig(struct skl_sst *ctx,
215 					struct skl_module_cfg *mcfg)
216 {
217 	struct skl_module_iface *iface = &mcfg->module->formats[0];
218 
219 	dev_dbg(ctx->dev, "Dumping config\n");
220 	dev_dbg(ctx->dev, "Input Format:\n");
221 	dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
222 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
223 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
224 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
225 				iface->inputs[0].fmt.valid_bit_depth);
226 	dev_dbg(ctx->dev, "Output Format:\n");
227 	dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
228 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
229 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
230 				iface->outputs[0].fmt.valid_bit_depth);
231 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
232 }
233 
skl_tplg_update_chmap(struct skl_module_fmt * fmt,int chs)234 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
235 {
236 	int slot_map = 0xFFFFFFFF;
237 	int start_slot = 0;
238 	int i;
239 
240 	for (i = 0; i < chs; i++) {
241 		/*
242 		 * For 2 channels with starting slot as 0, slot map will
243 		 * look like 0xFFFFFF10.
244 		 */
245 		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
246 		start_slot++;
247 	}
248 	fmt->ch_map = slot_map;
249 }
250 
skl_tplg_update_params(struct skl_module_fmt * fmt,struct skl_pipe_params * params,int fixup)251 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
252 			struct skl_pipe_params *params, int fixup)
253 {
254 	if (fixup & SKL_RATE_FIXUP_MASK)
255 		fmt->s_freq = params->s_freq;
256 	if (fixup & SKL_CH_FIXUP_MASK) {
257 		fmt->channels = params->ch;
258 		skl_tplg_update_chmap(fmt, fmt->channels);
259 	}
260 	if (fixup & SKL_FMT_FIXUP_MASK) {
261 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
262 
263 		/*
264 		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
265 		 * container so update bit depth accordingly
266 		 */
267 		switch (fmt->valid_bit_depth) {
268 		case SKL_DEPTH_16BIT:
269 			fmt->bit_depth = fmt->valid_bit_depth;
270 			break;
271 
272 		default:
273 			fmt->bit_depth = SKL_DEPTH_32BIT;
274 			break;
275 		}
276 	}
277 
278 }
279 
280 /*
281  * A pipeline may have modules which impact the pcm parameters, like SRC,
282  * channel converter, format converter.
283  * We need to calculate the output params by applying the 'fixup'
284  * Topology will tell driver which type of fixup is to be applied by
285  * supplying the fixup mask, so based on that we calculate the output
286  *
287  * Now In FE the pcm hw_params is source/target format. Same is applicable
288  * for BE with its hw_params invoked.
289  * here based on FE, BE pipeline and direction we calculate the input and
290  * outfix and then apply that for a module
291  */
skl_tplg_update_params_fixup(struct skl_module_cfg * m_cfg,struct skl_pipe_params * params,bool is_fe)292 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
293 		struct skl_pipe_params *params, bool is_fe)
294 {
295 	int in_fixup, out_fixup;
296 	struct skl_module_fmt *in_fmt, *out_fmt;
297 
298 	/* Fixups will be applied to pin 0 only */
299 	in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
300 	out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
301 
302 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
303 		if (is_fe) {
304 			in_fixup = m_cfg->params_fixup;
305 			out_fixup = (~m_cfg->converter) &
306 					m_cfg->params_fixup;
307 		} else {
308 			out_fixup = m_cfg->params_fixup;
309 			in_fixup = (~m_cfg->converter) &
310 					m_cfg->params_fixup;
311 		}
312 	} else {
313 		if (is_fe) {
314 			out_fixup = m_cfg->params_fixup;
315 			in_fixup = (~m_cfg->converter) &
316 					m_cfg->params_fixup;
317 		} else {
318 			in_fixup = m_cfg->params_fixup;
319 			out_fixup = (~m_cfg->converter) &
320 					m_cfg->params_fixup;
321 		}
322 	}
323 
324 	skl_tplg_update_params(in_fmt, params, in_fixup);
325 	skl_tplg_update_params(out_fmt, params, out_fixup);
326 }
327 
328 /*
329  * A module needs input and output buffers, which are dependent upon pcm
330  * params, so once we have calculate params, we need buffer calculation as
331  * well.
332  */
skl_tplg_update_buffer_size(struct skl_sst * ctx,struct skl_module_cfg * mcfg)333 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
334 				struct skl_module_cfg *mcfg)
335 {
336 	int multiplier = 1;
337 	struct skl_module_fmt *in_fmt, *out_fmt;
338 	struct skl_module_res *res;
339 
340 	/* Since fixups is applied to pin 0 only, ibs, obs needs
341 	 * change for pin 0 only
342 	 */
343 	res = &mcfg->module->resources[0];
344 	in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
345 	out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
346 
347 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
348 		multiplier = 5;
349 
350 	res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
351 			in_fmt->channels * (in_fmt->bit_depth >> 3) *
352 			multiplier;
353 
354 	res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
355 			out_fmt->channels * (out_fmt->bit_depth >> 3) *
356 			multiplier;
357 }
358 
skl_tplg_be_dev_type(int dev_type)359 static u8 skl_tplg_be_dev_type(int dev_type)
360 {
361 	int ret;
362 
363 	switch (dev_type) {
364 	case SKL_DEVICE_BT:
365 		ret = NHLT_DEVICE_BT;
366 		break;
367 
368 	case SKL_DEVICE_DMIC:
369 		ret = NHLT_DEVICE_DMIC;
370 		break;
371 
372 	case SKL_DEVICE_I2S:
373 		ret = NHLT_DEVICE_I2S;
374 		break;
375 
376 	default:
377 		ret = NHLT_DEVICE_INVALID;
378 		break;
379 	}
380 
381 	return ret;
382 }
383 
skl_tplg_update_be_blob(struct snd_soc_dapm_widget * w,struct skl_sst * ctx)384 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
385 						struct skl_sst *ctx)
386 {
387 	struct skl_module_cfg *m_cfg = w->priv;
388 	int link_type, dir;
389 	u32 ch, s_freq, s_fmt;
390 	struct nhlt_specific_cfg *cfg;
391 	struct skl *skl = get_skl_ctx(ctx->dev);
392 	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
393 	int fmt_idx = m_cfg->fmt_idx;
394 	struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
395 
396 	/* check if we already have blob */
397 	if (m_cfg->formats_config.caps_size > 0)
398 		return 0;
399 
400 	dev_dbg(ctx->dev, "Applying default cfg blob\n");
401 	switch (m_cfg->dev_type) {
402 	case SKL_DEVICE_DMIC:
403 		link_type = NHLT_LINK_DMIC;
404 		dir = SNDRV_PCM_STREAM_CAPTURE;
405 		s_freq = m_iface->inputs[0].fmt.s_freq;
406 		s_fmt = m_iface->inputs[0].fmt.bit_depth;
407 		ch = m_iface->inputs[0].fmt.channels;
408 		break;
409 
410 	case SKL_DEVICE_I2S:
411 		link_type = NHLT_LINK_SSP;
412 		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
413 			dir = SNDRV_PCM_STREAM_PLAYBACK;
414 			s_freq = m_iface->outputs[0].fmt.s_freq;
415 			s_fmt = m_iface->outputs[0].fmt.bit_depth;
416 			ch = m_iface->outputs[0].fmt.channels;
417 		} else {
418 			dir = SNDRV_PCM_STREAM_CAPTURE;
419 			s_freq = m_iface->inputs[0].fmt.s_freq;
420 			s_fmt = m_iface->inputs[0].fmt.bit_depth;
421 			ch = m_iface->inputs[0].fmt.channels;
422 		}
423 		break;
424 
425 	default:
426 		return -EINVAL;
427 	}
428 
429 	/* update the blob based on virtual bus_id and default params */
430 	cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
431 					s_fmt, ch, s_freq, dir, dev_type);
432 	if (cfg) {
433 		m_cfg->formats_config.caps_size = cfg->size;
434 		m_cfg->formats_config.caps = (u32 *) &cfg->caps;
435 	} else {
436 		dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
437 					m_cfg->vbus_id, link_type, dir);
438 		dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
439 					ch, s_freq, s_fmt);
440 		return -EIO;
441 	}
442 
443 	return 0;
444 }
445 
skl_tplg_update_module_params(struct snd_soc_dapm_widget * w,struct skl_sst * ctx)446 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
447 							struct skl_sst *ctx)
448 {
449 	struct skl_module_cfg *m_cfg = w->priv;
450 	struct skl_pipe_params *params = m_cfg->pipe->p_params;
451 	int p_conn_type = m_cfg->pipe->conn_type;
452 	bool is_fe;
453 
454 	if (!m_cfg->params_fixup)
455 		return;
456 
457 	dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
458 				w->name);
459 
460 	skl_dump_mconfig(ctx, m_cfg);
461 
462 	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
463 		is_fe = true;
464 	else
465 		is_fe = false;
466 
467 	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
468 	skl_tplg_update_buffer_size(ctx, m_cfg);
469 
470 	dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
471 				w->name);
472 
473 	skl_dump_mconfig(ctx, m_cfg);
474 }
475 
476 /*
477  * some modules can have multiple params set from user control and
478  * need to be set after module is initialized. If set_param flag is
479  * set module params will be done after module is initialised.
480  */
skl_tplg_set_module_params(struct snd_soc_dapm_widget * w,struct skl_sst * ctx)481 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
482 						struct skl_sst *ctx)
483 {
484 	int i, ret;
485 	struct skl_module_cfg *mconfig = w->priv;
486 	const struct snd_kcontrol_new *k;
487 	struct soc_bytes_ext *sb;
488 	struct skl_algo_data *bc;
489 	struct skl_specific_cfg *sp_cfg;
490 
491 	if (mconfig->formats_config.caps_size > 0 &&
492 		mconfig->formats_config.set_params == SKL_PARAM_SET) {
493 		sp_cfg = &mconfig->formats_config;
494 		ret = skl_set_module_params(ctx, sp_cfg->caps,
495 					sp_cfg->caps_size,
496 					sp_cfg->param_id, mconfig);
497 		if (ret < 0)
498 			return ret;
499 	}
500 
501 	for (i = 0; i < w->num_kcontrols; i++) {
502 		k = &w->kcontrol_news[i];
503 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
504 			sb = (void *) k->private_value;
505 			bc = (struct skl_algo_data *)sb->dobj.private;
506 
507 			if (bc->set_params == SKL_PARAM_SET) {
508 				ret = skl_set_module_params(ctx,
509 						(u32 *)bc->params, bc->size,
510 						bc->param_id, mconfig);
511 				if (ret < 0)
512 					return ret;
513 			}
514 		}
515 	}
516 
517 	return 0;
518 }
519 
520 /*
521  * some module param can set from user control and this is required as
522  * when module is initailzed. if module param is required in init it is
523  * identifed by set_param flag. if set_param flag is not set, then this
524  * parameter needs to set as part of module init.
525  */
skl_tplg_set_module_init_data(struct snd_soc_dapm_widget * w)526 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
527 {
528 	const struct snd_kcontrol_new *k;
529 	struct soc_bytes_ext *sb;
530 	struct skl_algo_data *bc;
531 	struct skl_module_cfg *mconfig = w->priv;
532 	int i;
533 
534 	for (i = 0; i < w->num_kcontrols; i++) {
535 		k = &w->kcontrol_news[i];
536 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
537 			sb = (struct soc_bytes_ext *)k->private_value;
538 			bc = (struct skl_algo_data *)sb->dobj.private;
539 
540 			if (bc->set_params != SKL_PARAM_INIT)
541 				continue;
542 
543 			mconfig->formats_config.caps = (u32 *)bc->params;
544 			mconfig->formats_config.caps_size = bc->size;
545 
546 			break;
547 		}
548 	}
549 
550 	return 0;
551 }
552 
skl_tplg_module_prepare(struct skl_sst * ctx,struct skl_pipe * pipe,struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg)553 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
554 		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
555 {
556 	switch (mcfg->dev_type) {
557 	case SKL_DEVICE_HDAHOST:
558 		return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
559 
560 	case SKL_DEVICE_HDALINK:
561 		return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
562 	}
563 
564 	return 0;
565 }
566 
567 /*
568  * Inside a pipe instance, we can have various modules. These modules need
569  * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
570  * skl_init_module() routine, so invoke that for all modules in a pipeline
571  */
572 static int
skl_tplg_init_pipe_modules(struct skl * skl,struct skl_pipe * pipe)573 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
574 {
575 	struct skl_pipe_module *w_module;
576 	struct snd_soc_dapm_widget *w;
577 	struct skl_module_cfg *mconfig;
578 	struct skl_sst *ctx = skl->skl_sst;
579 	u8 cfg_idx;
580 	int ret = 0;
581 
582 	list_for_each_entry(w_module, &pipe->w_list, node) {
583 		uuid_le *uuid_mod;
584 		w = w_module->w;
585 		mconfig = w->priv;
586 
587 		/* check if module ids are populated */
588 		if (mconfig->id.module_id < 0) {
589 			dev_err(skl->skl_sst->dev,
590 					"module %pUL id not populated\n",
591 					(uuid_le *)mconfig->guid);
592 			return -EIO;
593 		}
594 
595 		cfg_idx = mconfig->pipe->cur_config_idx;
596 		mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
597 		mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
598 
599 		/* check resource available */
600 		if (!skl_is_pipe_mcps_avail(skl, mconfig))
601 			return -ENOMEM;
602 
603 		if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) {
604 			ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
605 				mconfig->id.module_id, mconfig->guid);
606 			if (ret < 0)
607 				return ret;
608 
609 			mconfig->m_state = SKL_MODULE_LOADED;
610 		}
611 
612 		/* prepare the DMA if the module is gateway cpr */
613 		ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
614 		if (ret < 0)
615 			return ret;
616 
617 		/* update blob if blob is null for be with default value */
618 		skl_tplg_update_be_blob(w, ctx);
619 
620 		/*
621 		 * apply fix/conversion to module params based on
622 		 * FE/BE params
623 		 */
624 		skl_tplg_update_module_params(w, ctx);
625 		uuid_mod = (uuid_le *)mconfig->guid;
626 		mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
627 						mconfig->id.instance_id);
628 		if (mconfig->id.pvt_id < 0)
629 			return ret;
630 		skl_tplg_set_module_init_data(w);
631 
632 		ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id);
633 		if (ret < 0) {
634 			dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n",
635 						mconfig->core_id, ret);
636 			return ret;
637 		}
638 
639 		ret = skl_init_module(ctx, mconfig);
640 		if (ret < 0) {
641 			skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
642 			goto err;
643 		}
644 		skl_tplg_alloc_pipe_mcps(skl, mconfig);
645 		ret = skl_tplg_set_module_params(w, ctx);
646 		if (ret < 0)
647 			goto err;
648 	}
649 
650 	return 0;
651 err:
652 	skl_dsp_put_core(ctx->dsp, mconfig->core_id);
653 	return ret;
654 }
655 
skl_tplg_unload_pipe_modules(struct skl_sst * ctx,struct skl_pipe * pipe)656 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
657 	 struct skl_pipe *pipe)
658 {
659 	int ret = 0;
660 	struct skl_pipe_module *w_module = NULL;
661 	struct skl_module_cfg *mconfig = NULL;
662 
663 	list_for_each_entry(w_module, &pipe->w_list, node) {
664 		uuid_le *uuid_mod;
665 		mconfig  = w_module->w->priv;
666 		uuid_mod = (uuid_le *)mconfig->guid;
667 
668 		if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod &&
669 			mconfig->m_state > SKL_MODULE_UNINIT) {
670 			ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
671 						mconfig->id.module_id);
672 			if (ret < 0)
673 				return -EIO;
674 		}
675 		skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
676 
677 		ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id);
678 		if (ret < 0) {
679 			/* don't return; continue with other modules */
680 			dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n",
681 				mconfig->core_id, ret);
682 		}
683 	}
684 
685 	/* no modules to unload in this path, so return */
686 	return ret;
687 }
688 
689 /*
690  * Here, we select pipe format based on the pipe type and pipe
691  * direction to determine the current config index for the pipeline.
692  * The config index is then used to select proper module resources.
693  * Intermediate pipes currently have a fixed format hence we select the
694  * 0th configuratation by default for such pipes.
695  */
696 static int
skl_tplg_get_pipe_config(struct skl * skl,struct skl_module_cfg * mconfig)697 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig)
698 {
699 	struct skl_sst *ctx = skl->skl_sst;
700 	struct skl_pipe *pipe = mconfig->pipe;
701 	struct skl_pipe_params *params = pipe->p_params;
702 	struct skl_path_config *pconfig = &pipe->configs[0];
703 	struct skl_pipe_fmt *fmt = NULL;
704 	bool in_fmt = false;
705 	int i;
706 
707 	if (pipe->nr_cfgs == 0) {
708 		pipe->cur_config_idx = 0;
709 		return 0;
710 	}
711 
712 	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
713 		dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n");
714 		pipe->cur_config_idx = 0;
715 		pipe->memory_pages = pconfig->mem_pages;
716 
717 		return 0;
718 	}
719 
720 	if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
721 	     pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
722 	     (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
723 	     pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
724 		in_fmt = true;
725 
726 	for (i = 0; i < pipe->nr_cfgs; i++) {
727 		pconfig = &pipe->configs[i];
728 		if (in_fmt)
729 			fmt = &pconfig->in_fmt;
730 		else
731 			fmt = &pconfig->out_fmt;
732 
733 		if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
734 				    fmt->channels, fmt->freq, fmt->bps)) {
735 			pipe->cur_config_idx = i;
736 			pipe->memory_pages = pconfig->mem_pages;
737 			dev_dbg(ctx->dev, "Using pipe config: %d\n", i);
738 
739 			return 0;
740 		}
741 	}
742 
743 	dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
744 		params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
745 	return -EINVAL;
746 }
747 
748 /*
749  * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
750  * need create the pipeline. So we do following:
751  *   - check the resources
752  *   - Create the pipeline
753  *   - Initialize the modules in pipeline
754  *   - finally bind all modules together
755  */
skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl * skl)756 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
757 							struct skl *skl)
758 {
759 	int ret;
760 	struct skl_module_cfg *mconfig = w->priv;
761 	struct skl_pipe_module *w_module;
762 	struct skl_pipe *s_pipe = mconfig->pipe;
763 	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
764 	struct skl_sst *ctx = skl->skl_sst;
765 	struct skl_module_deferred_bind *modules;
766 
767 	ret = skl_tplg_get_pipe_config(skl, mconfig);
768 	if (ret < 0)
769 		return ret;
770 
771 	/* check resource available */
772 	if (!skl_is_pipe_mcps_avail(skl, mconfig))
773 		return -EBUSY;
774 
775 	if (!skl_is_pipe_mem_avail(skl, mconfig))
776 		return -ENOMEM;
777 
778 	/*
779 	 * Create a list of modules for pipe.
780 	 * This list contains modules from source to sink
781 	 */
782 	ret = skl_create_pipeline(ctx, mconfig->pipe);
783 	if (ret < 0)
784 		return ret;
785 
786 	skl_tplg_alloc_pipe_mem(skl, mconfig);
787 	skl_tplg_alloc_pipe_mcps(skl, mconfig);
788 
789 	/* Init all pipe modules from source to sink */
790 	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
791 	if (ret < 0)
792 		return ret;
793 
794 	/* Bind modules from source to sink */
795 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
796 		dst_module = w_module->w->priv;
797 
798 		if (src_module == NULL) {
799 			src_module = dst_module;
800 			continue;
801 		}
802 
803 		ret = skl_bind_modules(ctx, src_module, dst_module);
804 		if (ret < 0)
805 			return ret;
806 
807 		src_module = dst_module;
808 	}
809 
810 	/*
811 	 * When the destination module is initialized, check for these modules
812 	 * in deferred bind list. If found, bind them.
813 	 */
814 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
815 		if (list_empty(&skl->bind_list))
816 			break;
817 
818 		list_for_each_entry(modules, &skl->bind_list, node) {
819 			module = w_module->w->priv;
820 			if (modules->dst == module)
821 				skl_bind_modules(ctx, modules->src,
822 							modules->dst);
823 		}
824 	}
825 
826 	return 0;
827 }
828 
skl_fill_sink_instance_id(struct skl_sst * ctx,u32 * params,int size,struct skl_module_cfg * mcfg)829 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
830 				int size, struct skl_module_cfg *mcfg)
831 {
832 	int i, pvt_id;
833 
834 	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
835 		struct skl_kpb_params *kpb_params =
836 				(struct skl_kpb_params *)params;
837 		struct skl_mod_inst_map *inst = kpb_params->u.map;
838 
839 		for (i = 0; i < kpb_params->num_modules; i++) {
840 			pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
841 								inst->inst_id);
842 			if (pvt_id < 0)
843 				return -EINVAL;
844 
845 			inst->inst_id = pvt_id;
846 			inst++;
847 		}
848 	}
849 
850 	return 0;
851 }
852 /*
853  * Some modules require params to be set after the module is bound to
854  * all pins connected.
855  *
856  * The module provider initializes set_param flag for such modules and we
857  * send params after binding
858  */
skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg,struct skl_sst * ctx)859 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
860 			struct skl_module_cfg *mcfg, struct skl_sst *ctx)
861 {
862 	int i, ret;
863 	struct skl_module_cfg *mconfig = w->priv;
864 	const struct snd_kcontrol_new *k;
865 	struct soc_bytes_ext *sb;
866 	struct skl_algo_data *bc;
867 	struct skl_specific_cfg *sp_cfg;
868 	u32 *params;
869 
870 	/*
871 	 * check all out/in pins are in bind state.
872 	 * if so set the module param
873 	 */
874 	for (i = 0; i < mcfg->module->max_output_pins; i++) {
875 		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
876 			return 0;
877 	}
878 
879 	for (i = 0; i < mcfg->module->max_input_pins; i++) {
880 		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
881 			return 0;
882 	}
883 
884 	if (mconfig->formats_config.caps_size > 0 &&
885 		mconfig->formats_config.set_params == SKL_PARAM_BIND) {
886 		sp_cfg = &mconfig->formats_config;
887 		ret = skl_set_module_params(ctx, sp_cfg->caps,
888 					sp_cfg->caps_size,
889 					sp_cfg->param_id, mconfig);
890 		if (ret < 0)
891 			return ret;
892 	}
893 
894 	for (i = 0; i < w->num_kcontrols; i++) {
895 		k = &w->kcontrol_news[i];
896 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
897 			sb = (void *) k->private_value;
898 			bc = (struct skl_algo_data *)sb->dobj.private;
899 
900 			if (bc->set_params == SKL_PARAM_BIND) {
901 				params = kzalloc(bc->max, GFP_KERNEL);
902 				if (!params)
903 					return -ENOMEM;
904 
905 				memcpy(params, bc->params, bc->max);
906 				skl_fill_sink_instance_id(ctx, params, bc->max,
907 								mconfig);
908 
909 				ret = skl_set_module_params(ctx, params,
910 						bc->max, bc->param_id, mconfig);
911 				kfree(params);
912 
913 				if (ret < 0)
914 					return ret;
915 			}
916 		}
917 	}
918 
919 	return 0;
920 }
921 
skl_get_module_id(struct skl_sst * ctx,uuid_le * uuid)922 static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid)
923 {
924 	struct uuid_module *module;
925 
926 	list_for_each_entry(module, &ctx->uuid_list, list) {
927 		if (uuid_le_cmp(*uuid, module->uuid) == 0)
928 			return module->id;
929 	}
930 
931 	return -EINVAL;
932 }
933 
skl_tplg_find_moduleid_from_uuid(struct skl * skl,const struct snd_kcontrol_new * k)934 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl,
935 					const struct snd_kcontrol_new *k)
936 {
937 	struct soc_bytes_ext *sb = (void *) k->private_value;
938 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
939 	struct skl_kpb_params *uuid_params, *params;
940 	struct hdac_bus *bus = skl_to_bus(skl);
941 	int i, size, module_id;
942 
943 	if (bc->set_params == SKL_PARAM_BIND && bc->max) {
944 		uuid_params = (struct skl_kpb_params *)bc->params;
945 		size = uuid_params->num_modules *
946 			sizeof(struct skl_mod_inst_map) +
947 			sizeof(uuid_params->num_modules);
948 
949 		params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
950 		if (!params)
951 			return -ENOMEM;
952 
953 		params->num_modules = uuid_params->num_modules;
954 
955 		for (i = 0; i < uuid_params->num_modules; i++) {
956 			module_id = skl_get_module_id(skl->skl_sst,
957 				&uuid_params->u.map_uuid[i].mod_uuid);
958 			if (module_id < 0) {
959 				devm_kfree(bus->dev, params);
960 				return -EINVAL;
961 			}
962 
963 			params->u.map[i].mod_id = module_id;
964 			params->u.map[i].inst_id =
965 				uuid_params->u.map_uuid[i].inst_id;
966 		}
967 
968 		devm_kfree(bus->dev, bc->params);
969 		bc->params = (char *)params;
970 		bc->max = size;
971 	}
972 
973 	return 0;
974 }
975 
976 /*
977  * Retrieve the module id from UUID mentioned in the
978  * post bind params
979  */
skl_tplg_add_moduleid_in_bind_params(struct skl * skl,struct snd_soc_dapm_widget * w)980 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl,
981 				struct snd_soc_dapm_widget *w)
982 {
983 	struct skl_module_cfg *mconfig = w->priv;
984 	int i;
985 
986 	/*
987 	 * Post bind params are used for only for KPB
988 	 * to set copier instances to drain the data
989 	 * in fast mode
990 	 */
991 	if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
992 		return;
993 
994 	for (i = 0; i < w->num_kcontrols; i++)
995 		if ((w->kcontrol_news[i].access &
996 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
997 			(skl_tplg_find_moduleid_from_uuid(skl,
998 			&w->kcontrol_news[i]) < 0))
999 			dev_err(skl->skl_sst->dev,
1000 				"%s: invalid kpb post bind params\n",
1001 				__func__);
1002 }
1003 
skl_tplg_module_add_deferred_bind(struct skl * skl,struct skl_module_cfg * src,struct skl_module_cfg * dst)1004 static int skl_tplg_module_add_deferred_bind(struct skl *skl,
1005 	struct skl_module_cfg *src, struct skl_module_cfg *dst)
1006 {
1007 	struct skl_module_deferred_bind *m_list, *modules;
1008 	int i;
1009 
1010 	/* only supported for module with static pin connection */
1011 	for (i = 0; i < dst->module->max_input_pins; i++) {
1012 		struct skl_module_pin *pin = &dst->m_in_pin[i];
1013 
1014 		if (pin->is_dynamic)
1015 			continue;
1016 
1017 		if ((pin->id.module_id  == src->id.module_id) &&
1018 			(pin->id.instance_id  == src->id.instance_id)) {
1019 
1020 			if (!list_empty(&skl->bind_list)) {
1021 				list_for_each_entry(modules, &skl->bind_list, node) {
1022 					if (modules->src == src && modules->dst == dst)
1023 						return 0;
1024 				}
1025 			}
1026 
1027 			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
1028 			if (!m_list)
1029 				return -ENOMEM;
1030 
1031 			m_list->src = src;
1032 			m_list->dst = dst;
1033 
1034 			list_add(&m_list->node, &skl->bind_list);
1035 		}
1036 	}
1037 
1038 	return 0;
1039 }
1040 
skl_tplg_bind_sinks(struct snd_soc_dapm_widget * w,struct skl * skl,struct snd_soc_dapm_widget * src_w,struct skl_module_cfg * src_mconfig)1041 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
1042 				struct skl *skl,
1043 				struct snd_soc_dapm_widget *src_w,
1044 				struct skl_module_cfg *src_mconfig)
1045 {
1046 	struct snd_soc_dapm_path *p;
1047 	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
1048 	struct skl_module_cfg *sink_mconfig;
1049 	struct skl_sst *ctx = skl->skl_sst;
1050 	int ret;
1051 
1052 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1053 		if (!p->connect)
1054 			continue;
1055 
1056 		dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
1057 		dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
1058 
1059 		next_sink = p->sink;
1060 
1061 		if (!is_skl_dsp_widget_type(p->sink, ctx->dev))
1062 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
1063 
1064 		/*
1065 		 * here we will check widgets in sink pipelines, so that
1066 		 * can be any widgets type and we are only interested if
1067 		 * they are ones used for SKL so check that first
1068 		 */
1069 		if ((p->sink->priv != NULL) &&
1070 				is_skl_dsp_widget_type(p->sink, ctx->dev)) {
1071 
1072 			sink = p->sink;
1073 			sink_mconfig = sink->priv;
1074 
1075 			/*
1076 			 * Modules other than PGA leaf can be connected
1077 			 * directly or via switch to a module in another
1078 			 * pipeline. EX: reference path
1079 			 * when the path is enabled, the dst module that needs
1080 			 * to be bound may not be initialized. if the module is
1081 			 * not initialized, add these modules in the deferred
1082 			 * bind list and when the dst module is initialised,
1083 			 * bind this module to the dst_module in deferred list.
1084 			 */
1085 			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1086 				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1087 
1088 				ret = skl_tplg_module_add_deferred_bind(skl,
1089 						src_mconfig, sink_mconfig);
1090 
1091 				if (ret < 0)
1092 					return ret;
1093 
1094 			}
1095 
1096 
1097 			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1098 				sink_mconfig->m_state == SKL_MODULE_UNINIT)
1099 				continue;
1100 
1101 			/* Bind source to sink, mixin is always source */
1102 			ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1103 			if (ret)
1104 				return ret;
1105 
1106 			/* set module params after bind */
1107 			skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
1108 			skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1109 
1110 			/* Start sinks pipe first */
1111 			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1112 				if (sink_mconfig->pipe->conn_type !=
1113 							SKL_PIPE_CONN_TYPE_FE)
1114 					ret = skl_run_pipe(ctx,
1115 							sink_mconfig->pipe);
1116 				if (ret)
1117 					return ret;
1118 			}
1119 		}
1120 	}
1121 
1122 	if (!sink && next_sink)
1123 		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1124 
1125 	return 0;
1126 }
1127 
1128 /*
1129  * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1130  * we need to do following:
1131  *   - Bind to sink pipeline
1132  *      Since the sink pipes can be running and we don't get mixer event on
1133  *      connect for already running mixer, we need to find the sink pipes
1134  *      here and bind to them. This way dynamic connect works.
1135  *   - Start sink pipeline, if not running
1136  *   - Then run current pipe
1137  */
skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl * skl)1138 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1139 								struct skl *skl)
1140 {
1141 	struct skl_module_cfg *src_mconfig;
1142 	struct skl_sst *ctx = skl->skl_sst;
1143 	int ret = 0;
1144 
1145 	src_mconfig = w->priv;
1146 
1147 	/*
1148 	 * find which sink it is connected to, bind with the sink,
1149 	 * if sink is not started, start sink pipe first, then start
1150 	 * this pipe
1151 	 */
1152 	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1153 	if (ret)
1154 		return ret;
1155 
1156 	/* Start source pipe last after starting all sinks */
1157 	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1158 		return skl_run_pipe(ctx, src_mconfig->pipe);
1159 
1160 	return 0;
1161 }
1162 
skl_get_src_dsp_widget(struct snd_soc_dapm_widget * w,struct skl * skl)1163 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1164 		struct snd_soc_dapm_widget *w, struct skl *skl)
1165 {
1166 	struct snd_soc_dapm_path *p;
1167 	struct snd_soc_dapm_widget *src_w = NULL;
1168 	struct skl_sst *ctx = skl->skl_sst;
1169 
1170 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1171 		src_w = p->source;
1172 		if (!p->connect)
1173 			continue;
1174 
1175 		dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
1176 		dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
1177 
1178 		/*
1179 		 * here we will check widgets in sink pipelines, so that can
1180 		 * be any widgets type and we are only interested if they are
1181 		 * ones used for SKL so check that first
1182 		 */
1183 		if ((p->source->priv != NULL) &&
1184 				is_skl_dsp_widget_type(p->source, ctx->dev)) {
1185 			return p->source;
1186 		}
1187 	}
1188 
1189 	if (src_w != NULL)
1190 		return skl_get_src_dsp_widget(src_w, skl);
1191 
1192 	return NULL;
1193 }
1194 
1195 /*
1196  * in the Post-PMU event of mixer we need to do following:
1197  *   - Check if this pipe is running
1198  *   - if not, then
1199  *	- bind this pipeline to its source pipeline
1200  *	  if source pipe is already running, this means it is a dynamic
1201  *	  connection and we need to bind only to that pipe
1202  *	- start this pipeline
1203  */
skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget * w,struct skl * skl)1204 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1205 							struct skl *skl)
1206 {
1207 	int ret = 0;
1208 	struct snd_soc_dapm_widget *source, *sink;
1209 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1210 	struct skl_sst *ctx = skl->skl_sst;
1211 	int src_pipe_started = 0;
1212 
1213 	sink = w;
1214 	sink_mconfig = sink->priv;
1215 
1216 	/*
1217 	 * If source pipe is already started, that means source is driving
1218 	 * one more sink before this sink got connected, Since source is
1219 	 * started, bind this sink to source and start this pipe.
1220 	 */
1221 	source = skl_get_src_dsp_widget(w, skl);
1222 	if (source != NULL) {
1223 		src_mconfig = source->priv;
1224 		sink_mconfig = sink->priv;
1225 		src_pipe_started = 1;
1226 
1227 		/*
1228 		 * check pipe state, then no need to bind or start the
1229 		 * pipe
1230 		 */
1231 		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1232 			src_pipe_started = 0;
1233 	}
1234 
1235 	if (src_pipe_started) {
1236 		ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1237 		if (ret)
1238 			return ret;
1239 
1240 		/* set module params after bind */
1241 		skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
1242 		skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1243 
1244 		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1245 			ret = skl_run_pipe(ctx, sink_mconfig->pipe);
1246 	}
1247 
1248 	return ret;
1249 }
1250 
1251 /*
1252  * in the Pre-PMD event of mixer we need to do following:
1253  *   - Stop the pipe
1254  *   - find the source connections and remove that from dapm_path_list
1255  *   - unbind with source pipelines if still connected
1256  */
skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget * w,struct skl * skl)1257 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1258 							struct skl *skl)
1259 {
1260 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1261 	int ret = 0, i;
1262 	struct skl_sst *ctx = skl->skl_sst;
1263 
1264 	sink_mconfig = w->priv;
1265 
1266 	/* Stop the pipe */
1267 	ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
1268 	if (ret)
1269 		return ret;
1270 
1271 	for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1272 		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1273 			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1274 			if (!src_mconfig)
1275 				continue;
1276 
1277 			ret = skl_unbind_modules(ctx,
1278 						src_mconfig, sink_mconfig);
1279 		}
1280 	}
1281 
1282 	return ret;
1283 }
1284 
1285 /*
1286  * in the Post-PMD event of mixer we need to do following:
1287  *   - Free the mcps used
1288  *   - Free the mem used
1289  *   - Unbind the modules within the pipeline
1290  *   - Delete the pipeline (modules are not required to be explicitly
1291  *     deleted, pipeline delete is enough here
1292  */
skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl * skl)1293 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1294 							struct skl *skl)
1295 {
1296 	struct skl_module_cfg *mconfig = w->priv;
1297 	struct skl_pipe_module *w_module;
1298 	struct skl_module_cfg *src_module = NULL, *dst_module;
1299 	struct skl_sst *ctx = skl->skl_sst;
1300 	struct skl_pipe *s_pipe = mconfig->pipe;
1301 	struct skl_module_deferred_bind *modules, *tmp;
1302 
1303 	if (s_pipe->state == SKL_PIPE_INVALID)
1304 		return -EINVAL;
1305 
1306 	skl_tplg_free_pipe_mcps(skl, mconfig);
1307 	skl_tplg_free_pipe_mem(skl, mconfig);
1308 
1309 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1310 		if (list_empty(&skl->bind_list))
1311 			break;
1312 
1313 		src_module = w_module->w->priv;
1314 
1315 		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1316 			/*
1317 			 * When the destination module is deleted, Unbind the
1318 			 * modules from deferred bind list.
1319 			 */
1320 			if (modules->dst == src_module) {
1321 				skl_unbind_modules(ctx, modules->src,
1322 						modules->dst);
1323 			}
1324 
1325 			/*
1326 			 * When the source module is deleted, remove this entry
1327 			 * from the deferred bind list.
1328 			 */
1329 			if (modules->src == src_module) {
1330 				list_del(&modules->node);
1331 				modules->src = NULL;
1332 				modules->dst = NULL;
1333 				kfree(modules);
1334 			}
1335 		}
1336 	}
1337 
1338 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1339 		dst_module = w_module->w->priv;
1340 
1341 		if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1342 			skl_tplg_free_pipe_mcps(skl, dst_module);
1343 		if (src_module == NULL) {
1344 			src_module = dst_module;
1345 			continue;
1346 		}
1347 
1348 		skl_unbind_modules(ctx, src_module, dst_module);
1349 		src_module = dst_module;
1350 	}
1351 
1352 	skl_delete_pipe(ctx, mconfig->pipe);
1353 
1354 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1355 		src_module = w_module->w->priv;
1356 		src_module->m_state = SKL_MODULE_UNINIT;
1357 	}
1358 
1359 	return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1360 }
1361 
1362 /*
1363  * in the Post-PMD event of PGA we need to do following:
1364  *   - Free the mcps used
1365  *   - Stop the pipeline
1366  *   - In source pipe is connected, unbind with source pipelines
1367  */
skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl * skl)1368 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1369 								struct skl *skl)
1370 {
1371 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1372 	int ret = 0, i;
1373 	struct skl_sst *ctx = skl->skl_sst;
1374 
1375 	src_mconfig = w->priv;
1376 
1377 	/* Stop the pipe since this is a mixin module */
1378 	ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1379 	if (ret)
1380 		return ret;
1381 
1382 	for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1383 		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1384 			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1385 			if (!sink_mconfig)
1386 				continue;
1387 			/*
1388 			 * This is a connecter and if path is found that means
1389 			 * unbind between source and sink has not happened yet
1390 			 */
1391 			ret = skl_unbind_modules(ctx, src_mconfig,
1392 							sink_mconfig);
1393 		}
1394 	}
1395 
1396 	return ret;
1397 }
1398 
1399 /*
1400  * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1401  * second one is required that is created as another pipe entity.
1402  * The mixer is responsible for pipe management and represent a pipeline
1403  * instance
1404  */
skl_tplg_mixer_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1405 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1406 				struct snd_kcontrol *k, int event)
1407 {
1408 	struct snd_soc_dapm_context *dapm = w->dapm;
1409 	struct skl *skl = get_skl_ctx(dapm->dev);
1410 
1411 	switch (event) {
1412 	case SND_SOC_DAPM_PRE_PMU:
1413 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1414 
1415 	case SND_SOC_DAPM_POST_PMU:
1416 		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1417 
1418 	case SND_SOC_DAPM_PRE_PMD:
1419 		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1420 
1421 	case SND_SOC_DAPM_POST_PMD:
1422 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 /*
1429  * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1430  * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1431  * the sink when it is running (two FE to one BE or one FE to two BE)
1432  * scenarios
1433  */
skl_tplg_pga_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1434 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1435 			struct snd_kcontrol *k, int event)
1436 
1437 {
1438 	struct snd_soc_dapm_context *dapm = w->dapm;
1439 	struct skl *skl = get_skl_ctx(dapm->dev);
1440 
1441 	switch (event) {
1442 	case SND_SOC_DAPM_PRE_PMU:
1443 		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1444 
1445 	case SND_SOC_DAPM_POST_PMD:
1446 		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1447 	}
1448 
1449 	return 0;
1450 }
1451 
skl_tplg_tlv_control_get(struct snd_kcontrol * kcontrol,unsigned int __user * data,unsigned int size)1452 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1453 			unsigned int __user *data, unsigned int size)
1454 {
1455 	struct soc_bytes_ext *sb =
1456 			(struct soc_bytes_ext *)kcontrol->private_value;
1457 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1458 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1459 	struct skl_module_cfg *mconfig = w->priv;
1460 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1461 
1462 	if (w->power)
1463 		skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1464 				      bc->size, bc->param_id, mconfig);
1465 
1466 	/* decrement size for TLV header */
1467 	size -= 2 * sizeof(u32);
1468 
1469 	/* check size as we don't want to send kernel data */
1470 	if (size > bc->max)
1471 		size = bc->max;
1472 
1473 	if (bc->params) {
1474 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1475 			return -EFAULT;
1476 		if (copy_to_user(data + 1, &size, sizeof(u32)))
1477 			return -EFAULT;
1478 		if (copy_to_user(data + 2, bc->params, size))
1479 			return -EFAULT;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 #define SKL_PARAM_VENDOR_ID 0xff
1486 
skl_tplg_tlv_control_set(struct snd_kcontrol * kcontrol,const unsigned int __user * data,unsigned int size)1487 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1488 			const unsigned int __user *data, unsigned int size)
1489 {
1490 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1491 	struct skl_module_cfg *mconfig = w->priv;
1492 	struct soc_bytes_ext *sb =
1493 			(struct soc_bytes_ext *)kcontrol->private_value;
1494 	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1495 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1496 
1497 	if (ac->params) {
1498 		if (size > ac->max)
1499 			return -EINVAL;
1500 
1501 		ac->size = size;
1502 		/*
1503 		 * if the param_is is of type Vendor, firmware expects actual
1504 		 * parameter id and size from the control.
1505 		 */
1506 		if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1507 			if (copy_from_user(ac->params, data, size))
1508 				return -EFAULT;
1509 		} else {
1510 			if (copy_from_user(ac->params,
1511 					   data + 2, size))
1512 				return -EFAULT;
1513 		}
1514 
1515 		if (w->power)
1516 			return skl_set_module_params(skl->skl_sst,
1517 						(u32 *)ac->params, ac->size,
1518 						ac->param_id, mconfig);
1519 	}
1520 
1521 	return 0;
1522 }
1523 
skl_tplg_mic_control_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1524 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1525 		struct snd_ctl_elem_value *ucontrol)
1526 {
1527 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1528 	struct skl_module_cfg *mconfig = w->priv;
1529 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1530 	u32 ch_type = *((u32 *)ec->dobj.private);
1531 
1532 	if (mconfig->dmic_ch_type == ch_type)
1533 		ucontrol->value.enumerated.item[0] =
1534 					mconfig->dmic_ch_combo_index;
1535 	else
1536 		ucontrol->value.enumerated.item[0] = 0;
1537 
1538 	return 0;
1539 }
1540 
skl_fill_mic_sel_params(struct skl_module_cfg * mconfig,struct skl_mic_sel_config * mic_cfg,struct device * dev)1541 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1542 	struct skl_mic_sel_config *mic_cfg, struct device *dev)
1543 {
1544 	struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
1545 
1546 	sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1547 	sp_cfg->set_params = SKL_PARAM_SET;
1548 	sp_cfg->param_id = 0x00;
1549 	if (!sp_cfg->caps) {
1550 		sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1551 		if (!sp_cfg->caps)
1552 			return -ENOMEM;
1553 	}
1554 
1555 	mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1556 	mic_cfg->flags = 0;
1557 	memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1558 
1559 	return 0;
1560 }
1561 
skl_tplg_mic_control_set(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1562 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1563 			struct snd_ctl_elem_value *ucontrol)
1564 {
1565 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1566 	struct skl_module_cfg *mconfig = w->priv;
1567 	struct skl_mic_sel_config mic_cfg = {0};
1568 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1569 	u32 ch_type = *((u32 *)ec->dobj.private);
1570 	const int *list;
1571 	u8 in_ch, out_ch, index;
1572 
1573 	mconfig->dmic_ch_type = ch_type;
1574 	mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1575 
1576 	/* enum control index 0 is INVALID, so no channels to be set */
1577 	if (mconfig->dmic_ch_combo_index == 0)
1578 		return 0;
1579 
1580 	/* No valid channel selection map for index 0, so offset by 1 */
1581 	index = mconfig->dmic_ch_combo_index - 1;
1582 
1583 	switch (ch_type) {
1584 	case SKL_CH_MONO:
1585 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1586 			return -EINVAL;
1587 
1588 		list = &mic_mono_list[index];
1589 		break;
1590 
1591 	case SKL_CH_STEREO:
1592 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1593 			return -EINVAL;
1594 
1595 		list = mic_stereo_list[index];
1596 		break;
1597 
1598 	case SKL_CH_TRIO:
1599 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1600 			return -EINVAL;
1601 
1602 		list = mic_trio_list[index];
1603 		break;
1604 
1605 	case SKL_CH_QUATRO:
1606 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1607 			return -EINVAL;
1608 
1609 		list = mic_quatro_list[index];
1610 		break;
1611 
1612 	default:
1613 		dev_err(w->dapm->dev,
1614 				"Invalid channel %d for mic_select module\n",
1615 				ch_type);
1616 		return -EINVAL;
1617 
1618 	}
1619 
1620 	/* channel type enum map to number of chanels for that type */
1621 	for (out_ch = 0; out_ch < ch_type; out_ch++) {
1622 		in_ch = list[out_ch];
1623 		mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1624 	}
1625 
1626 	return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1627 }
1628 
1629 /*
1630  * Fill the dma id for host and link. In case of passthrough
1631  * pipeline, this will both host and link in the same
1632  * pipeline, so need to copy the link and host based on dev_type
1633  */
skl_tplg_fill_dma_id(struct skl_module_cfg * mcfg,struct skl_pipe_params * params)1634 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1635 				struct skl_pipe_params *params)
1636 {
1637 	struct skl_pipe *pipe = mcfg->pipe;
1638 
1639 	if (pipe->passthru) {
1640 		switch (mcfg->dev_type) {
1641 		case SKL_DEVICE_HDALINK:
1642 			pipe->p_params->link_dma_id = params->link_dma_id;
1643 			pipe->p_params->link_index = params->link_index;
1644 			pipe->p_params->link_bps = params->link_bps;
1645 			break;
1646 
1647 		case SKL_DEVICE_HDAHOST:
1648 			pipe->p_params->host_dma_id = params->host_dma_id;
1649 			pipe->p_params->host_bps = params->host_bps;
1650 			break;
1651 
1652 		default:
1653 			break;
1654 		}
1655 		pipe->p_params->s_fmt = params->s_fmt;
1656 		pipe->p_params->ch = params->ch;
1657 		pipe->p_params->s_freq = params->s_freq;
1658 		pipe->p_params->stream = params->stream;
1659 		pipe->p_params->format = params->format;
1660 
1661 	} else {
1662 		memcpy(pipe->p_params, params, sizeof(*params));
1663 	}
1664 }
1665 
1666 /*
1667  * The FE params are passed by hw_params of the DAI.
1668  * On hw_params, the params are stored in Gateway module of the FE and we
1669  * need to calculate the format in DSP module configuration, that
1670  * conversion is done here
1671  */
skl_tplg_update_pipe_params(struct device * dev,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1672 int skl_tplg_update_pipe_params(struct device *dev,
1673 			struct skl_module_cfg *mconfig,
1674 			struct skl_pipe_params *params)
1675 {
1676 	struct skl_module_res *res = &mconfig->module->resources[0];
1677 	struct skl *skl = get_skl_ctx(dev);
1678 	struct skl_module_fmt *format = NULL;
1679 	u8 cfg_idx = mconfig->pipe->cur_config_idx;
1680 
1681 	skl_tplg_fill_dma_id(mconfig, params);
1682 	mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1683 	mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1684 
1685 	if (skl->nr_modules)
1686 		return 0;
1687 
1688 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1689 		format = &mconfig->module->formats[0].inputs[0].fmt;
1690 	else
1691 		format = &mconfig->module->formats[0].outputs[0].fmt;
1692 
1693 	/* set the hw_params */
1694 	format->s_freq = params->s_freq;
1695 	format->channels = params->ch;
1696 	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1697 
1698 	/*
1699 	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1700 	 * container so update bit depth accordingly
1701 	 */
1702 	switch (format->valid_bit_depth) {
1703 	case SKL_DEPTH_16BIT:
1704 		format->bit_depth = format->valid_bit_depth;
1705 		break;
1706 
1707 	case SKL_DEPTH_24BIT:
1708 	case SKL_DEPTH_32BIT:
1709 		format->bit_depth = SKL_DEPTH_32BIT;
1710 		break;
1711 
1712 	default:
1713 		dev_err(dev, "Invalid bit depth %x for pipe\n",
1714 				format->valid_bit_depth);
1715 		return -EINVAL;
1716 	}
1717 
1718 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1719 		res->ibs = (format->s_freq / 1000) *
1720 				(format->channels) *
1721 				(format->bit_depth >> 3);
1722 	} else {
1723 		res->obs = (format->s_freq / 1000) *
1724 				(format->channels) *
1725 				(format->bit_depth >> 3);
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 /*
1732  * Query the module config for the FE DAI
1733  * This is used to find the hw_params set for that DAI and apply to FE
1734  * pipeline
1735  */
1736 struct skl_module_cfg *
skl_tplg_fe_get_cpr_module(struct snd_soc_dai * dai,int stream)1737 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1738 {
1739 	struct snd_soc_dapm_widget *w;
1740 	struct snd_soc_dapm_path *p = NULL;
1741 
1742 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1743 		w = dai->playback_widget;
1744 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1745 			if (p->connect && p->sink->power &&
1746 				!is_skl_dsp_widget_type(p->sink, dai->dev))
1747 				continue;
1748 
1749 			if (p->sink->priv) {
1750 				dev_dbg(dai->dev, "set params for %s\n",
1751 						p->sink->name);
1752 				return p->sink->priv;
1753 			}
1754 		}
1755 	} else {
1756 		w = dai->capture_widget;
1757 		snd_soc_dapm_widget_for_each_source_path(w, p) {
1758 			if (p->connect && p->source->power &&
1759 				!is_skl_dsp_widget_type(p->source, dai->dev))
1760 				continue;
1761 
1762 			if (p->source->priv) {
1763 				dev_dbg(dai->dev, "set params for %s\n",
1764 						p->source->name);
1765 				return p->source->priv;
1766 			}
1767 		}
1768 	}
1769 
1770 	return NULL;
1771 }
1772 
skl_get_mconfig_pb_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1773 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1774 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1775 {
1776 	struct snd_soc_dapm_path *p;
1777 	struct skl_module_cfg *mconfig = NULL;
1778 
1779 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1780 		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1781 			if (p->connect &&
1782 				    (p->sink->id == snd_soc_dapm_aif_out) &&
1783 				    p->source->priv) {
1784 				mconfig = p->source->priv;
1785 				return mconfig;
1786 			}
1787 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1788 			if (mconfig)
1789 				return mconfig;
1790 		}
1791 	}
1792 	return mconfig;
1793 }
1794 
skl_get_mconfig_cap_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1795 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1796 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1797 {
1798 	struct snd_soc_dapm_path *p;
1799 	struct skl_module_cfg *mconfig = NULL;
1800 
1801 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1802 		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1803 			if (p->connect &&
1804 				    (p->source->id == snd_soc_dapm_aif_in) &&
1805 				    p->sink->priv) {
1806 				mconfig = p->sink->priv;
1807 				return mconfig;
1808 			}
1809 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1810 			if (mconfig)
1811 				return mconfig;
1812 		}
1813 	}
1814 	return mconfig;
1815 }
1816 
1817 struct skl_module_cfg *
skl_tplg_be_get_cpr_module(struct snd_soc_dai * dai,int stream)1818 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1819 {
1820 	struct snd_soc_dapm_widget *w;
1821 	struct skl_module_cfg *mconfig;
1822 
1823 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1824 		w = dai->playback_widget;
1825 		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1826 	} else {
1827 		w = dai->capture_widget;
1828 		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1829 	}
1830 	return mconfig;
1831 }
1832 
skl_tplg_be_link_type(int dev_type)1833 static u8 skl_tplg_be_link_type(int dev_type)
1834 {
1835 	int ret;
1836 
1837 	switch (dev_type) {
1838 	case SKL_DEVICE_BT:
1839 		ret = NHLT_LINK_SSP;
1840 		break;
1841 
1842 	case SKL_DEVICE_DMIC:
1843 		ret = NHLT_LINK_DMIC;
1844 		break;
1845 
1846 	case SKL_DEVICE_I2S:
1847 		ret = NHLT_LINK_SSP;
1848 		break;
1849 
1850 	case SKL_DEVICE_HDALINK:
1851 		ret = NHLT_LINK_HDA;
1852 		break;
1853 
1854 	default:
1855 		ret = NHLT_LINK_INVALID;
1856 		break;
1857 	}
1858 
1859 	return ret;
1860 }
1861 
1862 /*
1863  * Fill the BE gateway parameters
1864  * The BE gateway expects a blob of parameters which are kept in the ACPI
1865  * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1866  * The port can have multiple settings so pick based on the PCM
1867  * parameters
1868  */
skl_tplg_be_fill_pipe_params(struct snd_soc_dai * dai,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1869 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1870 				struct skl_module_cfg *mconfig,
1871 				struct skl_pipe_params *params)
1872 {
1873 	struct nhlt_specific_cfg *cfg;
1874 	struct skl *skl = get_skl_ctx(dai->dev);
1875 	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1876 	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1877 
1878 	skl_tplg_fill_dma_id(mconfig, params);
1879 
1880 	if (link_type == NHLT_LINK_HDA)
1881 		return 0;
1882 
1883 	/* update the blob based on virtual bus_id*/
1884 	cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1885 					params->s_fmt, params->ch,
1886 					params->s_freq, params->stream,
1887 					dev_type);
1888 	if (cfg) {
1889 		mconfig->formats_config.caps_size = cfg->size;
1890 		mconfig->formats_config.caps = (u32 *) &cfg->caps;
1891 	} else {
1892 		dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1893 					mconfig->vbus_id, link_type,
1894 					params->stream);
1895 		dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1896 				 params->ch, params->s_freq, params->s_fmt);
1897 		return -EINVAL;
1898 	}
1899 
1900 	return 0;
1901 }
1902 
skl_tplg_be_set_src_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1903 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1904 				struct snd_soc_dapm_widget *w,
1905 				struct skl_pipe_params *params)
1906 {
1907 	struct snd_soc_dapm_path *p;
1908 	int ret = -EIO;
1909 
1910 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1911 		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1912 						p->source->priv) {
1913 
1914 			ret = skl_tplg_be_fill_pipe_params(dai,
1915 						p->source->priv, params);
1916 			if (ret < 0)
1917 				return ret;
1918 		} else {
1919 			ret = skl_tplg_be_set_src_pipe_params(dai,
1920 						p->source, params);
1921 			if (ret < 0)
1922 				return ret;
1923 		}
1924 	}
1925 
1926 	return ret;
1927 }
1928 
skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1929 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1930 	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1931 {
1932 	struct snd_soc_dapm_path *p = NULL;
1933 	int ret = -EIO;
1934 
1935 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1936 		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1937 						p->sink->priv) {
1938 
1939 			ret = skl_tplg_be_fill_pipe_params(dai,
1940 						p->sink->priv, params);
1941 			if (ret < 0)
1942 				return ret;
1943 		} else {
1944 			ret = skl_tplg_be_set_sink_pipe_params(
1945 						dai, p->sink, params);
1946 			if (ret < 0)
1947 				return ret;
1948 		}
1949 	}
1950 
1951 	return ret;
1952 }
1953 
1954 /*
1955  * BE hw_params can be a source parameters (capture) or sink parameters
1956  * (playback). Based on sink and source we need to either find the source
1957  * list or the sink list and set the pipeline parameters
1958  */
skl_tplg_be_update_params(struct snd_soc_dai * dai,struct skl_pipe_params * params)1959 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1960 				struct skl_pipe_params *params)
1961 {
1962 	struct snd_soc_dapm_widget *w;
1963 
1964 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1965 		w = dai->playback_widget;
1966 
1967 		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1968 
1969 	} else {
1970 		w = dai->capture_widget;
1971 
1972 		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1973 	}
1974 
1975 	return 0;
1976 }
1977 
1978 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1979 	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1980 	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1981 	{SKL_PGA_EVENT, skl_tplg_pga_event},
1982 };
1983 
1984 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1985 	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1986 					skl_tplg_tlv_control_set},
1987 };
1988 
1989 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1990 	{
1991 		.id = SKL_CONTROL_TYPE_MIC_SELECT,
1992 		.get = skl_tplg_mic_control_get,
1993 		.put = skl_tplg_mic_control_set,
1994 	},
1995 };
1996 
skl_tplg_fill_pipe_cfg(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val,int conf_idx,int dir)1997 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1998 			struct skl_pipe *pipe, u32 tkn,
1999 			u32 tkn_val, int conf_idx, int dir)
2000 {
2001 	struct skl_pipe_fmt *fmt;
2002 	struct skl_path_config *config;
2003 
2004 	switch (dir) {
2005 	case SKL_DIR_IN:
2006 		fmt = &pipe->configs[conf_idx].in_fmt;
2007 		break;
2008 
2009 	case SKL_DIR_OUT:
2010 		fmt = &pipe->configs[conf_idx].out_fmt;
2011 		break;
2012 
2013 	default:
2014 		dev_err(dev, "Invalid direction: %d\n", dir);
2015 		return -EINVAL;
2016 	}
2017 
2018 	config = &pipe->configs[conf_idx];
2019 
2020 	switch (tkn) {
2021 	case SKL_TKN_U32_CFG_FREQ:
2022 		fmt->freq = tkn_val;
2023 		break;
2024 
2025 	case SKL_TKN_U8_CFG_CHAN:
2026 		fmt->channels = tkn_val;
2027 		break;
2028 
2029 	case SKL_TKN_U8_CFG_BPS:
2030 		fmt->bps = tkn_val;
2031 		break;
2032 
2033 	case SKL_TKN_U32_PATH_MEM_PGS:
2034 		config->mem_pages = tkn_val;
2035 		break;
2036 
2037 	default:
2038 		dev_err(dev, "Invalid token config: %d\n", tkn);
2039 		return -EINVAL;
2040 	}
2041 
2042 	return 0;
2043 }
2044 
skl_tplg_fill_pipe_tkn(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val)2045 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2046 			struct skl_pipe *pipe, u32 tkn,
2047 			u32 tkn_val)
2048 {
2049 
2050 	switch (tkn) {
2051 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2052 		pipe->conn_type = tkn_val;
2053 		break;
2054 
2055 	case SKL_TKN_U32_PIPE_PRIORITY:
2056 		pipe->pipe_priority = tkn_val;
2057 		break;
2058 
2059 	case SKL_TKN_U32_PIPE_MEM_PGS:
2060 		pipe->memory_pages = tkn_val;
2061 		break;
2062 
2063 	case SKL_TKN_U32_PMODE:
2064 		pipe->lp_mode = tkn_val;
2065 		break;
2066 
2067 	case SKL_TKN_U32_PIPE_DIRECTION:
2068 		pipe->direction = tkn_val;
2069 		break;
2070 
2071 	case SKL_TKN_U32_NUM_CONFIGS:
2072 		pipe->nr_cfgs = tkn_val;
2073 		break;
2074 
2075 	default:
2076 		dev_err(dev, "Token not handled %d\n", tkn);
2077 		return -EINVAL;
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 /*
2084  * Add pipeline by parsing the relevant tokens
2085  * Return an existing pipe if the pipe already exists.
2086  */
skl_tplg_add_pipe(struct device * dev,struct skl_module_cfg * mconfig,struct skl * skl,struct snd_soc_tplg_vendor_value_elem * tkn_elem)2087 static int skl_tplg_add_pipe(struct device *dev,
2088 		struct skl_module_cfg *mconfig, struct skl *skl,
2089 		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2090 {
2091 	struct skl_pipeline *ppl;
2092 	struct skl_pipe *pipe;
2093 	struct skl_pipe_params *params;
2094 
2095 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2096 		if (ppl->pipe->ppl_id == tkn_elem->value) {
2097 			mconfig->pipe = ppl->pipe;
2098 			return -EEXIST;
2099 		}
2100 	}
2101 
2102 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2103 	if (!ppl)
2104 		return -ENOMEM;
2105 
2106 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2107 	if (!pipe)
2108 		return -ENOMEM;
2109 
2110 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2111 	if (!params)
2112 		return -ENOMEM;
2113 
2114 	pipe->p_params = params;
2115 	pipe->ppl_id = tkn_elem->value;
2116 	INIT_LIST_HEAD(&pipe->w_list);
2117 
2118 	ppl->pipe = pipe;
2119 	list_add(&ppl->node, &skl->ppl_list);
2120 
2121 	mconfig->pipe = pipe;
2122 	mconfig->pipe->state = SKL_PIPE_INVALID;
2123 
2124 	return 0;
2125 }
2126 
skl_tplg_get_uuid(struct device * dev,u8 * guid,struct snd_soc_tplg_vendor_uuid_elem * uuid_tkn)2127 static int skl_tplg_get_uuid(struct device *dev, u8 *guid,
2128 	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2129 {
2130 	if (uuid_tkn->token == SKL_TKN_UUID) {
2131 		memcpy(guid, &uuid_tkn->uuid, 16);
2132 		return 0;
2133 	}
2134 
2135 	dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2136 
2137 	return -EINVAL;
2138 }
2139 
skl_tplg_fill_pin(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_pin * m_pin,int pin_index)2140 static int skl_tplg_fill_pin(struct device *dev,
2141 			struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2142 			struct skl_module_pin *m_pin,
2143 			int pin_index)
2144 {
2145 	int ret;
2146 
2147 	switch (tkn_elem->token) {
2148 	case SKL_TKN_U32_PIN_MOD_ID:
2149 		m_pin[pin_index].id.module_id = tkn_elem->value;
2150 		break;
2151 
2152 	case SKL_TKN_U32_PIN_INST_ID:
2153 		m_pin[pin_index].id.instance_id = tkn_elem->value;
2154 		break;
2155 
2156 	case SKL_TKN_UUID:
2157 		ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b,
2158 			(struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2159 		if (ret < 0)
2160 			return ret;
2161 
2162 		break;
2163 
2164 	default:
2165 		dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2166 		return -EINVAL;
2167 	}
2168 
2169 	return 0;
2170 }
2171 
2172 /*
2173  * Parse for pin config specific tokens to fill up the
2174  * module private data
2175  */
skl_tplg_fill_pins_info(struct device * dev,struct skl_module_cfg * mconfig,struct snd_soc_tplg_vendor_value_elem * tkn_elem,int dir,int pin_count)2176 static int skl_tplg_fill_pins_info(struct device *dev,
2177 		struct skl_module_cfg *mconfig,
2178 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2179 		int dir, int pin_count)
2180 {
2181 	int ret;
2182 	struct skl_module_pin *m_pin;
2183 
2184 	switch (dir) {
2185 	case SKL_DIR_IN:
2186 		m_pin = mconfig->m_in_pin;
2187 		break;
2188 
2189 	case SKL_DIR_OUT:
2190 		m_pin = mconfig->m_out_pin;
2191 		break;
2192 
2193 	default:
2194 		dev_err(dev, "Invalid direction value\n");
2195 		return -EINVAL;
2196 	}
2197 
2198 	ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2199 	if (ret < 0)
2200 		return ret;
2201 
2202 	m_pin[pin_count].in_use = false;
2203 	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2204 
2205 	return 0;
2206 }
2207 
2208 /*
2209  * Fill up input/output module config format based
2210  * on the direction
2211  */
skl_tplg_fill_fmt(struct device * dev,struct skl_module_fmt * dst_fmt,u32 tkn,u32 value)2212 static int skl_tplg_fill_fmt(struct device *dev,
2213 		struct skl_module_fmt *dst_fmt,
2214 		u32 tkn, u32 value)
2215 {
2216 	switch (tkn) {
2217 	case SKL_TKN_U32_FMT_CH:
2218 		dst_fmt->channels  = value;
2219 		break;
2220 
2221 	case SKL_TKN_U32_FMT_FREQ:
2222 		dst_fmt->s_freq = value;
2223 		break;
2224 
2225 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2226 		dst_fmt->bit_depth = value;
2227 		break;
2228 
2229 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2230 		dst_fmt->valid_bit_depth = value;
2231 		break;
2232 
2233 	case SKL_TKN_U32_FMT_CH_CONFIG:
2234 		dst_fmt->ch_cfg = value;
2235 		break;
2236 
2237 	case SKL_TKN_U32_FMT_INTERLEAVE:
2238 		dst_fmt->interleaving_style = value;
2239 		break;
2240 
2241 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2242 		dst_fmt->sample_type = value;
2243 		break;
2244 
2245 	case SKL_TKN_U32_FMT_CH_MAP:
2246 		dst_fmt->ch_map = value;
2247 		break;
2248 
2249 	default:
2250 		dev_err(dev, "Invalid token %d\n", tkn);
2251 		return -EINVAL;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
skl_tplg_widget_fill_fmt(struct device * dev,struct skl_module_iface * fmt,u32 tkn,u32 val,u32 dir,int fmt_idx)2257 static int skl_tplg_widget_fill_fmt(struct device *dev,
2258 		struct skl_module_iface *fmt,
2259 		u32 tkn, u32 val, u32 dir, int fmt_idx)
2260 {
2261 	struct skl_module_fmt *dst_fmt;
2262 
2263 	if (!fmt)
2264 		return -EINVAL;
2265 
2266 	switch (dir) {
2267 	case SKL_DIR_IN:
2268 		dst_fmt = &fmt->inputs[fmt_idx].fmt;
2269 		break;
2270 
2271 	case SKL_DIR_OUT:
2272 		dst_fmt = &fmt->outputs[fmt_idx].fmt;
2273 		break;
2274 
2275 	default:
2276 		dev_err(dev, "Invalid direction: %d\n", dir);
2277 		return -EINVAL;
2278 	}
2279 
2280 	return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2281 }
2282 
skl_tplg_fill_pin_dynamic_val(struct skl_module_pin * mpin,u32 pin_count,u32 value)2283 static void skl_tplg_fill_pin_dynamic_val(
2284 		struct skl_module_pin *mpin, u32 pin_count, u32 value)
2285 {
2286 	int i;
2287 
2288 	for (i = 0; i < pin_count; i++)
2289 		mpin[i].is_dynamic = value;
2290 }
2291 
2292 /*
2293  * Resource table in the manifest has pin specific resources
2294  * like pin and pin buffer size
2295  */
skl_tplg_manifest_pin_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2296 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2297 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2298 		struct skl_module_res *res, int pin_idx, int dir)
2299 {
2300 	struct skl_module_pin_resources *m_pin;
2301 
2302 	switch (dir) {
2303 	case SKL_DIR_IN:
2304 		m_pin = &res->input[pin_idx];
2305 		break;
2306 
2307 	case SKL_DIR_OUT:
2308 		m_pin = &res->output[pin_idx];
2309 		break;
2310 
2311 	default:
2312 		dev_err(dev, "Invalid pin direction: %d\n", dir);
2313 		return -EINVAL;
2314 	}
2315 
2316 	switch (tkn_elem->token) {
2317 	case SKL_TKN_MM_U32_RES_PIN_ID:
2318 		m_pin->pin_index = tkn_elem->value;
2319 		break;
2320 
2321 	case SKL_TKN_MM_U32_PIN_BUF:
2322 		m_pin->buf_size = tkn_elem->value;
2323 		break;
2324 
2325 	default:
2326 		dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2327 		return -EINVAL;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 /*
2334  * Fill module specific resources from the manifest's resource
2335  * table like CPS, DMA size, mem_pages.
2336  */
skl_tplg_fill_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2337 static int skl_tplg_fill_res_tkn(struct device *dev,
2338 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2339 		struct skl_module_res *res,
2340 		int pin_idx, int dir)
2341 {
2342 	int ret, tkn_count = 0;
2343 
2344 	if (!res)
2345 		return -EINVAL;
2346 
2347 	switch (tkn_elem->token) {
2348 	case SKL_TKN_MM_U32_CPS:
2349 		res->cps = tkn_elem->value;
2350 		break;
2351 
2352 	case SKL_TKN_MM_U32_DMA_SIZE:
2353 		res->dma_buffer_size = tkn_elem->value;
2354 		break;
2355 
2356 	case SKL_TKN_MM_U32_CPC:
2357 		res->cpc = tkn_elem->value;
2358 		break;
2359 
2360 	case SKL_TKN_U32_MEM_PAGES:
2361 		res->is_pages = tkn_elem->value;
2362 		break;
2363 
2364 	case SKL_TKN_U32_OBS:
2365 		res->obs = tkn_elem->value;
2366 		break;
2367 
2368 	case SKL_TKN_U32_IBS:
2369 		res->ibs = tkn_elem->value;
2370 		break;
2371 
2372 	case SKL_TKN_U32_MAX_MCPS:
2373 		res->cps = tkn_elem->value;
2374 		break;
2375 
2376 	case SKL_TKN_MM_U32_RES_PIN_ID:
2377 	case SKL_TKN_MM_U32_PIN_BUF:
2378 		ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2379 						    pin_idx, dir);
2380 		if (ret < 0)
2381 			return ret;
2382 		break;
2383 
2384 	default:
2385 		dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2386 		return -EINVAL;
2387 
2388 	}
2389 	tkn_count++;
2390 
2391 	return tkn_count;
2392 }
2393 
2394 /*
2395  * Parse tokens to fill up the module private data
2396  */
skl_tplg_get_token(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl * skl,struct skl_module_cfg * mconfig)2397 static int skl_tplg_get_token(struct device *dev,
2398 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2399 		struct skl *skl, struct skl_module_cfg *mconfig)
2400 {
2401 	int tkn_count = 0;
2402 	int ret;
2403 	static int is_pipe_exists;
2404 	static int pin_index, dir, conf_idx;
2405 	struct skl_module_iface *iface = NULL;
2406 	struct skl_module_res *res = NULL;
2407 	int res_idx = mconfig->res_idx;
2408 	int fmt_idx = mconfig->fmt_idx;
2409 
2410 	/*
2411 	 * If the manifest structure contains no modules, fill all
2412 	 * the module data to 0th index.
2413 	 * res_idx and fmt_idx are default set to 0.
2414 	 */
2415 	if (skl->nr_modules == 0) {
2416 		res = &mconfig->module->resources[res_idx];
2417 		iface = &mconfig->module->formats[fmt_idx];
2418 	}
2419 
2420 	if (tkn_elem->token > SKL_TKN_MAX)
2421 		return -EINVAL;
2422 
2423 	switch (tkn_elem->token) {
2424 	case SKL_TKN_U8_IN_QUEUE_COUNT:
2425 		mconfig->module->max_input_pins = tkn_elem->value;
2426 		break;
2427 
2428 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
2429 		mconfig->module->max_output_pins = tkn_elem->value;
2430 		break;
2431 
2432 	case SKL_TKN_U8_DYN_IN_PIN:
2433 		if (!mconfig->m_in_pin)
2434 			mconfig->m_in_pin =
2435 				devm_kcalloc(dev, MAX_IN_QUEUE,
2436 					     sizeof(*mconfig->m_in_pin),
2437 					     GFP_KERNEL);
2438 		if (!mconfig->m_in_pin)
2439 			return -ENOMEM;
2440 
2441 		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2442 					      tkn_elem->value);
2443 		break;
2444 
2445 	case SKL_TKN_U8_DYN_OUT_PIN:
2446 		if (!mconfig->m_out_pin)
2447 			mconfig->m_out_pin =
2448 				devm_kcalloc(dev, MAX_IN_QUEUE,
2449 					     sizeof(*mconfig->m_in_pin),
2450 					     GFP_KERNEL);
2451 		if (!mconfig->m_out_pin)
2452 			return -ENOMEM;
2453 
2454 		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2455 					      tkn_elem->value);
2456 		break;
2457 
2458 	case SKL_TKN_U8_TIME_SLOT:
2459 		mconfig->time_slot = tkn_elem->value;
2460 		break;
2461 
2462 	case SKL_TKN_U8_CORE_ID:
2463 		mconfig->core_id = tkn_elem->value;
2464 		break;
2465 
2466 	case SKL_TKN_U8_MOD_TYPE:
2467 		mconfig->m_type = tkn_elem->value;
2468 		break;
2469 
2470 	case SKL_TKN_U8_DEV_TYPE:
2471 		mconfig->dev_type = tkn_elem->value;
2472 		break;
2473 
2474 	case SKL_TKN_U8_HW_CONN_TYPE:
2475 		mconfig->hw_conn_type = tkn_elem->value;
2476 		break;
2477 
2478 	case SKL_TKN_U16_MOD_INST_ID:
2479 		mconfig->id.instance_id =
2480 		tkn_elem->value;
2481 		break;
2482 
2483 	case SKL_TKN_U32_MEM_PAGES:
2484 	case SKL_TKN_U32_MAX_MCPS:
2485 	case SKL_TKN_U32_OBS:
2486 	case SKL_TKN_U32_IBS:
2487 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2488 		if (ret < 0)
2489 			return ret;
2490 
2491 		break;
2492 
2493 	case SKL_TKN_U32_VBUS_ID:
2494 		mconfig->vbus_id = tkn_elem->value;
2495 		break;
2496 
2497 	case SKL_TKN_U32_PARAMS_FIXUP:
2498 		mconfig->params_fixup = tkn_elem->value;
2499 		break;
2500 
2501 	case SKL_TKN_U32_CONVERTER:
2502 		mconfig->converter = tkn_elem->value;
2503 		break;
2504 
2505 	case SKL_TKN_U32_D0I3_CAPS:
2506 		mconfig->d0i3_caps = tkn_elem->value;
2507 		break;
2508 
2509 	case SKL_TKN_U32_PIPE_ID:
2510 		ret = skl_tplg_add_pipe(dev,
2511 				mconfig, skl, tkn_elem);
2512 
2513 		if (ret < 0) {
2514 			if (ret == -EEXIST) {
2515 				is_pipe_exists = 1;
2516 				break;
2517 			}
2518 			return is_pipe_exists;
2519 		}
2520 
2521 		break;
2522 
2523 	case SKL_TKN_U32_PIPE_CONFIG_ID:
2524 		conf_idx = tkn_elem->value;
2525 		break;
2526 
2527 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2528 	case SKL_TKN_U32_PIPE_PRIORITY:
2529 	case SKL_TKN_U32_PIPE_MEM_PGS:
2530 	case SKL_TKN_U32_PMODE:
2531 	case SKL_TKN_U32_PIPE_DIRECTION:
2532 	case SKL_TKN_U32_NUM_CONFIGS:
2533 		if (is_pipe_exists) {
2534 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2535 					tkn_elem->token, tkn_elem->value);
2536 			if (ret < 0)
2537 				return ret;
2538 		}
2539 
2540 		break;
2541 
2542 	case SKL_TKN_U32_PATH_MEM_PGS:
2543 	case SKL_TKN_U32_CFG_FREQ:
2544 	case SKL_TKN_U8_CFG_CHAN:
2545 	case SKL_TKN_U8_CFG_BPS:
2546 		if (mconfig->pipe->nr_cfgs) {
2547 			ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2548 					tkn_elem->token, tkn_elem->value,
2549 					conf_idx, dir);
2550 			if (ret < 0)
2551 				return ret;
2552 		}
2553 		break;
2554 
2555 	case SKL_TKN_CFG_MOD_RES_ID:
2556 		mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2557 		break;
2558 
2559 	case SKL_TKN_CFG_MOD_FMT_ID:
2560 		mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2561 		break;
2562 
2563 	/*
2564 	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2565 	 * direction and the pin count. The first four bits represent
2566 	 * direction and next four the pin count.
2567 	 */
2568 	case SKL_TKN_U32_DIR_PIN_COUNT:
2569 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2570 		pin_index = (tkn_elem->value &
2571 			SKL_PIN_COUNT_MASK) >> 4;
2572 
2573 		break;
2574 
2575 	case SKL_TKN_U32_FMT_CH:
2576 	case SKL_TKN_U32_FMT_FREQ:
2577 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2578 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2579 	case SKL_TKN_U32_FMT_CH_CONFIG:
2580 	case SKL_TKN_U32_FMT_INTERLEAVE:
2581 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2582 	case SKL_TKN_U32_FMT_CH_MAP:
2583 		ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2584 				tkn_elem->value, dir, pin_index);
2585 
2586 		if (ret < 0)
2587 			return ret;
2588 
2589 		break;
2590 
2591 	case SKL_TKN_U32_PIN_MOD_ID:
2592 	case SKL_TKN_U32_PIN_INST_ID:
2593 	case SKL_TKN_UUID:
2594 		ret = skl_tplg_fill_pins_info(dev,
2595 				mconfig, tkn_elem, dir,
2596 				pin_index);
2597 		if (ret < 0)
2598 			return ret;
2599 
2600 		break;
2601 
2602 	case SKL_TKN_U32_CAPS_SIZE:
2603 		mconfig->formats_config.caps_size =
2604 			tkn_elem->value;
2605 
2606 		break;
2607 
2608 	case SKL_TKN_U32_CAPS_SET_PARAMS:
2609 		mconfig->formats_config.set_params =
2610 				tkn_elem->value;
2611 		break;
2612 
2613 	case SKL_TKN_U32_CAPS_PARAMS_ID:
2614 		mconfig->formats_config.param_id =
2615 				tkn_elem->value;
2616 		break;
2617 
2618 	case SKL_TKN_U32_PROC_DOMAIN:
2619 		mconfig->domain =
2620 			tkn_elem->value;
2621 
2622 		break;
2623 
2624 	case SKL_TKN_U32_DMA_BUF_SIZE:
2625 		mconfig->dma_buffer_size = tkn_elem->value;
2626 		break;
2627 
2628 	case SKL_TKN_U8_IN_PIN_TYPE:
2629 	case SKL_TKN_U8_OUT_PIN_TYPE:
2630 	case SKL_TKN_U8_CONN_TYPE:
2631 		break;
2632 
2633 	default:
2634 		dev_err(dev, "Token %d not handled\n",
2635 				tkn_elem->token);
2636 		return -EINVAL;
2637 	}
2638 
2639 	tkn_count++;
2640 
2641 	return tkn_count;
2642 }
2643 
2644 /*
2645  * Parse the vendor array for specific tokens to construct
2646  * module private data
2647  */
skl_tplg_get_tokens(struct device * dev,char * pvt_data,struct skl * skl,struct skl_module_cfg * mconfig,int block_size)2648 static int skl_tplg_get_tokens(struct device *dev,
2649 		char *pvt_data,	struct skl *skl,
2650 		struct skl_module_cfg *mconfig, int block_size)
2651 {
2652 	struct snd_soc_tplg_vendor_array *array;
2653 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2654 	int tkn_count = 0, ret;
2655 	int off = 0, tuple_size = 0;
2656 	bool is_module_guid = true;
2657 
2658 	if (block_size <= 0)
2659 		return -EINVAL;
2660 
2661 	while (tuple_size < block_size) {
2662 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2663 
2664 		off += array->size;
2665 
2666 		switch (array->type) {
2667 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2668 			dev_warn(dev, "no string tokens expected for skl tplg\n");
2669 			continue;
2670 
2671 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2672 			if (is_module_guid) {
2673 				ret = skl_tplg_get_uuid(dev, mconfig->guid,
2674 							array->uuid);
2675 				is_module_guid = false;
2676 			} else {
2677 				ret = skl_tplg_get_token(dev, array->value, skl,
2678 							 mconfig);
2679 			}
2680 
2681 			if (ret < 0)
2682 				return ret;
2683 
2684 			tuple_size += sizeof(*array->uuid);
2685 
2686 			continue;
2687 
2688 		default:
2689 			tkn_elem = array->value;
2690 			tkn_count = 0;
2691 			break;
2692 		}
2693 
2694 		while (tkn_count <= (array->num_elems - 1)) {
2695 			ret = skl_tplg_get_token(dev, tkn_elem,
2696 					skl, mconfig);
2697 
2698 			if (ret < 0)
2699 				return ret;
2700 
2701 			tkn_count = tkn_count + ret;
2702 			tkn_elem++;
2703 		}
2704 
2705 		tuple_size += tkn_count * sizeof(*tkn_elem);
2706 	}
2707 
2708 	return off;
2709 }
2710 
2711 /*
2712  * Every data block is preceded by a descriptor to read the number
2713  * of data blocks, they type of the block and it's size
2714  */
skl_tplg_get_desc_blocks(struct device * dev,struct snd_soc_tplg_vendor_array * array)2715 static int skl_tplg_get_desc_blocks(struct device *dev,
2716 		struct snd_soc_tplg_vendor_array *array)
2717 {
2718 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2719 
2720 	tkn_elem = array->value;
2721 
2722 	switch (tkn_elem->token) {
2723 	case SKL_TKN_U8_NUM_BLOCKS:
2724 	case SKL_TKN_U8_BLOCK_TYPE:
2725 	case SKL_TKN_U16_BLOCK_SIZE:
2726 		return tkn_elem->value;
2727 
2728 	default:
2729 		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2730 		break;
2731 	}
2732 
2733 	return -EINVAL;
2734 }
2735 
2736 /* Functions to parse private data from configuration file format v4 */
2737 
2738 /*
2739  * Add pipeline from topology binary into driver pipeline list
2740  *
2741  * If already added we return that instance
2742  * Otherwise we create a new instance and add into driver list
2743  */
skl_tplg_add_pipe_v4(struct device * dev,struct skl_module_cfg * mconfig,struct skl * skl,struct skl_dfw_v4_pipe * dfw_pipe)2744 static int skl_tplg_add_pipe_v4(struct device *dev,
2745 				struct skl_module_cfg *mconfig, struct skl *skl,
2746 				struct skl_dfw_v4_pipe *dfw_pipe)
2747 {
2748 	struct skl_pipeline *ppl;
2749 	struct skl_pipe *pipe;
2750 	struct skl_pipe_params *params;
2751 
2752 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2753 		if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2754 			mconfig->pipe = ppl->pipe;
2755 			return 0;
2756 		}
2757 	}
2758 
2759 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2760 	if (!ppl)
2761 		return -ENOMEM;
2762 
2763 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2764 	if (!pipe)
2765 		return -ENOMEM;
2766 
2767 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2768 	if (!params)
2769 		return -ENOMEM;
2770 
2771 	pipe->ppl_id = dfw_pipe->pipe_id;
2772 	pipe->memory_pages = dfw_pipe->memory_pages;
2773 	pipe->pipe_priority = dfw_pipe->pipe_priority;
2774 	pipe->conn_type = dfw_pipe->conn_type;
2775 	pipe->state = SKL_PIPE_INVALID;
2776 	pipe->p_params = params;
2777 	INIT_LIST_HEAD(&pipe->w_list);
2778 
2779 	ppl->pipe = pipe;
2780 	list_add(&ppl->node, &skl->ppl_list);
2781 
2782 	mconfig->pipe = pipe;
2783 
2784 	return 0;
2785 }
2786 
skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin * dfw_pin,struct skl_module_pin * m_pin,bool is_dynamic,int max_pin)2787 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2788 					struct skl_module_pin *m_pin,
2789 					bool is_dynamic, int max_pin)
2790 {
2791 	int i;
2792 
2793 	for (i = 0; i < max_pin; i++) {
2794 		m_pin[i].id.module_id = dfw_pin[i].module_id;
2795 		m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2796 		m_pin[i].in_use = false;
2797 		m_pin[i].is_dynamic = is_dynamic;
2798 		m_pin[i].pin_state = SKL_PIN_UNBIND;
2799 	}
2800 }
2801 
skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt * dst_fmt,struct skl_dfw_v4_module_fmt * src_fmt,int pins)2802 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2803 				 struct skl_dfw_v4_module_fmt *src_fmt,
2804 				 int pins)
2805 {
2806 	int i;
2807 
2808 	for (i = 0; i < pins; i++) {
2809 		dst_fmt[i].fmt.channels  = src_fmt[i].channels;
2810 		dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2811 		dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2812 		dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2813 		dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2814 		dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2815 		dst_fmt[i].fmt.interleaving_style =
2816 						src_fmt[i].interleaving_style;
2817 		dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2818 	}
2819 }
2820 
skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl * skl,struct device * dev,struct skl_module_cfg * mconfig)2821 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2822 				    struct skl *skl, struct device *dev,
2823 				    struct skl_module_cfg *mconfig)
2824 {
2825 	struct skl_dfw_v4_module *dfw =
2826 				(struct skl_dfw_v4_module *)tplg_w->priv.data;
2827 	int ret;
2828 
2829 	dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2830 
2831 	ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2832 	if (ret)
2833 		return ret;
2834 	mconfig->id.module_id = -1;
2835 	mconfig->id.instance_id = dfw->instance_id;
2836 	mconfig->module->resources[0].cps = dfw->max_mcps;
2837 	mconfig->module->resources[0].ibs = dfw->ibs;
2838 	mconfig->module->resources[0].obs = dfw->obs;
2839 	mconfig->core_id = dfw->core_id;
2840 	mconfig->module->max_input_pins = dfw->max_in_queue;
2841 	mconfig->module->max_output_pins = dfw->max_out_queue;
2842 	mconfig->module->loadable = dfw->is_loadable;
2843 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2844 			     MAX_IN_QUEUE);
2845 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2846 			     MAX_OUT_QUEUE);
2847 
2848 	mconfig->params_fixup = dfw->params_fixup;
2849 	mconfig->converter = dfw->converter;
2850 	mconfig->m_type = dfw->module_type;
2851 	mconfig->vbus_id = dfw->vbus_id;
2852 	mconfig->module->resources[0].is_pages = dfw->mem_pages;
2853 
2854 	ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2855 	if (ret)
2856 		return ret;
2857 
2858 	mconfig->dev_type = dfw->dev_type;
2859 	mconfig->hw_conn_type = dfw->hw_conn_type;
2860 	mconfig->time_slot = dfw->time_slot;
2861 	mconfig->formats_config.caps_size = dfw->caps.caps_size;
2862 
2863 	mconfig->m_in_pin = devm_kcalloc(dev,
2864 				MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2865 				GFP_KERNEL);
2866 	if (!mconfig->m_in_pin)
2867 		return -ENOMEM;
2868 
2869 	mconfig->m_out_pin = devm_kcalloc(dev,
2870 				MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2871 				GFP_KERNEL);
2872 	if (!mconfig->m_out_pin)
2873 		return -ENOMEM;
2874 
2875 	skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2876 				    dfw->is_dynamic_in_pin,
2877 				    mconfig->module->max_input_pins);
2878 	skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2879 				    dfw->is_dynamic_out_pin,
2880 				    mconfig->module->max_output_pins);
2881 
2882 	if (mconfig->formats_config.caps_size) {
2883 		mconfig->formats_config.set_params = dfw->caps.set_params;
2884 		mconfig->formats_config.param_id = dfw->caps.param_id;
2885 		mconfig->formats_config.caps =
2886 		devm_kzalloc(dev, mconfig->formats_config.caps_size,
2887 			     GFP_KERNEL);
2888 		if (!mconfig->formats_config.caps)
2889 			return -ENOMEM;
2890 		memcpy(mconfig->formats_config.caps, dfw->caps.caps,
2891 		       dfw->caps.caps_size);
2892 	}
2893 
2894 	return 0;
2895 }
2896 
2897 /*
2898  * Parse the private data for the token and corresponding value.
2899  * The private data can have multiple data blocks. So, a data block
2900  * is preceded by a descriptor for number of blocks and a descriptor
2901  * for the type and size of the suceeding data block.
2902  */
skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl * skl,struct device * dev,struct skl_module_cfg * mconfig)2903 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2904 				struct skl *skl, struct device *dev,
2905 				struct skl_module_cfg *mconfig)
2906 {
2907 	struct snd_soc_tplg_vendor_array *array;
2908 	int num_blocks, block_size = 0, block_type, off = 0;
2909 	char *data;
2910 	int ret;
2911 
2912 	/*
2913 	 * v4 configuration files have a valid UUID at the start of
2914 	 * the widget's private data.
2915 	 */
2916 	if (uuid_is_valid((char *)tplg_w->priv.data))
2917 		return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2918 
2919 	/* Read the NUM_DATA_BLOCKS descriptor */
2920 	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2921 	ret = skl_tplg_get_desc_blocks(dev, array);
2922 	if (ret < 0)
2923 		return ret;
2924 	num_blocks = ret;
2925 
2926 	off += array->size;
2927 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2928 	while (num_blocks > 0) {
2929 		array = (struct snd_soc_tplg_vendor_array *)
2930 				(tplg_w->priv.data + off);
2931 
2932 		ret = skl_tplg_get_desc_blocks(dev, array);
2933 
2934 		if (ret < 0)
2935 			return ret;
2936 		block_type = ret;
2937 		off += array->size;
2938 
2939 		array = (struct snd_soc_tplg_vendor_array *)
2940 			(tplg_w->priv.data + off);
2941 
2942 		ret = skl_tplg_get_desc_blocks(dev, array);
2943 
2944 		if (ret < 0)
2945 			return ret;
2946 		block_size = ret;
2947 		off += array->size;
2948 
2949 		array = (struct snd_soc_tplg_vendor_array *)
2950 			(tplg_w->priv.data + off);
2951 
2952 		data = (tplg_w->priv.data + off);
2953 
2954 		if (block_type == SKL_TYPE_TUPLE) {
2955 			ret = skl_tplg_get_tokens(dev, data,
2956 					skl, mconfig, block_size);
2957 
2958 			if (ret < 0)
2959 				return ret;
2960 
2961 			--num_blocks;
2962 		} else {
2963 			if (mconfig->formats_config.caps_size > 0)
2964 				memcpy(mconfig->formats_config.caps, data,
2965 					mconfig->formats_config.caps_size);
2966 			--num_blocks;
2967 			ret = mconfig->formats_config.caps_size;
2968 		}
2969 		off += ret;
2970 	}
2971 
2972 	return 0;
2973 }
2974 
skl_clear_pin_config(struct snd_soc_component * component,struct snd_soc_dapm_widget * w)2975 static void skl_clear_pin_config(struct snd_soc_component *component,
2976 				struct snd_soc_dapm_widget *w)
2977 {
2978 	int i;
2979 	struct skl_module_cfg *mconfig;
2980 	struct skl_pipe *pipe;
2981 
2982 	if (!strncmp(w->dapm->component->name, component->name,
2983 					strlen(component->name))) {
2984 		mconfig = w->priv;
2985 		pipe = mconfig->pipe;
2986 		for (i = 0; i < mconfig->module->max_input_pins; i++) {
2987 			mconfig->m_in_pin[i].in_use = false;
2988 			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2989 		}
2990 		for (i = 0; i < mconfig->module->max_output_pins; i++) {
2991 			mconfig->m_out_pin[i].in_use = false;
2992 			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2993 		}
2994 		pipe->state = SKL_PIPE_INVALID;
2995 		mconfig->m_state = SKL_MODULE_UNINIT;
2996 	}
2997 }
2998 
skl_cleanup_resources(struct skl * skl)2999 void skl_cleanup_resources(struct skl *skl)
3000 {
3001 	struct skl_sst *ctx = skl->skl_sst;
3002 	struct snd_soc_component *soc_component = skl->component;
3003 	struct snd_soc_dapm_widget *w;
3004 	struct snd_soc_card *card;
3005 
3006 	if (soc_component == NULL)
3007 		return;
3008 
3009 	card = soc_component->card;
3010 	if (!card || !card->instantiated)
3011 		return;
3012 
3013 	skl->resource.mem = 0;
3014 	skl->resource.mcps = 0;
3015 
3016 	list_for_each_entry(w, &card->widgets, list) {
3017 		if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL)
3018 			skl_clear_pin_config(soc_component, w);
3019 	}
3020 
3021 	skl_clear_module_cnt(ctx->dsp);
3022 }
3023 
3024 /*
3025  * Topology core widget load callback
3026  *
3027  * This is used to save the private data for each widget which gives
3028  * information to the driver about module and pipeline parameters which DSP
3029  * FW expects like ids, resource values, formats etc
3030  */
skl_tplg_widget_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_dapm_widget * w,struct snd_soc_tplg_dapm_widget * tplg_w)3031 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
3032 				struct snd_soc_dapm_widget *w,
3033 				struct snd_soc_tplg_dapm_widget *tplg_w)
3034 {
3035 	int ret;
3036 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3037 	struct skl *skl = bus_to_skl(bus);
3038 	struct skl_module_cfg *mconfig;
3039 
3040 	if (!tplg_w->priv.size)
3041 		goto bind_event;
3042 
3043 	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3044 
3045 	if (!mconfig)
3046 		return -ENOMEM;
3047 
3048 	if (skl->nr_modules == 0) {
3049 		mconfig->module = devm_kzalloc(bus->dev,
3050 				sizeof(*mconfig->module), GFP_KERNEL);
3051 		if (!mconfig->module)
3052 			return -ENOMEM;
3053 	}
3054 
3055 	w->priv = mconfig;
3056 
3057 	/*
3058 	 * module binary can be loaded later, so set it to query when
3059 	 * module is load for a use case
3060 	 */
3061 	mconfig->id.module_id = -1;
3062 
3063 	/* Parse private data for tuples */
3064 	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3065 	if (ret < 0)
3066 		return ret;
3067 
3068 	skl_debug_init_module(skl->debugfs, w, mconfig);
3069 
3070 bind_event:
3071 	if (tplg_w->event_type == 0) {
3072 		dev_dbg(bus->dev, "ASoC: No event handler required\n");
3073 		return 0;
3074 	}
3075 
3076 	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3077 					ARRAY_SIZE(skl_tplg_widget_ops),
3078 					tplg_w->event_type);
3079 
3080 	if (ret) {
3081 		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3082 					__func__, tplg_w->event_type);
3083 		return -EINVAL;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
skl_init_algo_data(struct device * dev,struct soc_bytes_ext * be,struct snd_soc_tplg_bytes_control * bc)3089 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3090 					struct snd_soc_tplg_bytes_control *bc)
3091 {
3092 	struct skl_algo_data *ac;
3093 	struct skl_dfw_algo_data *dfw_ac =
3094 				(struct skl_dfw_algo_data *)bc->priv.data;
3095 
3096 	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3097 	if (!ac)
3098 		return -ENOMEM;
3099 
3100 	/* Fill private data */
3101 	ac->max = dfw_ac->max;
3102 	ac->param_id = dfw_ac->param_id;
3103 	ac->set_params = dfw_ac->set_params;
3104 	ac->size = dfw_ac->max;
3105 
3106 	if (ac->max) {
3107 		ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
3108 		if (!ac->params)
3109 			return -ENOMEM;
3110 
3111 		memcpy(ac->params, dfw_ac->params, ac->max);
3112 	}
3113 
3114 	be->dobj.private  = ac;
3115 	return 0;
3116 }
3117 
skl_init_enum_data(struct device * dev,struct soc_enum * se,struct snd_soc_tplg_enum_control * ec)3118 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3119 				struct snd_soc_tplg_enum_control *ec)
3120 {
3121 
3122 	void *data;
3123 
3124 	if (ec->priv.size) {
3125 		data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3126 		if (!data)
3127 			return -ENOMEM;
3128 		memcpy(data, ec->priv.data, ec->priv.size);
3129 		se->dobj.private = data;
3130 	}
3131 
3132 	return 0;
3133 
3134 }
3135 
skl_tplg_control_load(struct snd_soc_component * cmpnt,int index,struct snd_kcontrol_new * kctl,struct snd_soc_tplg_ctl_hdr * hdr)3136 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3137 				int index,
3138 				struct snd_kcontrol_new *kctl,
3139 				struct snd_soc_tplg_ctl_hdr *hdr)
3140 {
3141 	struct soc_bytes_ext *sb;
3142 	struct snd_soc_tplg_bytes_control *tplg_bc;
3143 	struct snd_soc_tplg_enum_control *tplg_ec;
3144 	struct hdac_bus *bus  = snd_soc_component_get_drvdata(cmpnt);
3145 	struct soc_enum *se;
3146 
3147 	switch (hdr->ops.info) {
3148 	case SND_SOC_TPLG_CTL_BYTES:
3149 		tplg_bc = container_of(hdr,
3150 				struct snd_soc_tplg_bytes_control, hdr);
3151 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3152 			sb = (struct soc_bytes_ext *)kctl->private_value;
3153 			if (tplg_bc->priv.size)
3154 				return skl_init_algo_data(
3155 						bus->dev, sb, tplg_bc);
3156 		}
3157 		break;
3158 
3159 	case SND_SOC_TPLG_CTL_ENUM:
3160 		tplg_ec = container_of(hdr,
3161 				struct snd_soc_tplg_enum_control, hdr);
3162 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
3163 			se = (struct soc_enum *)kctl->private_value;
3164 			if (tplg_ec->priv.size)
3165 				return skl_init_enum_data(bus->dev, se,
3166 						tplg_ec);
3167 		}
3168 		break;
3169 
3170 	default:
3171 		dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3172 			hdr->ops.get, hdr->ops.put, hdr->ops.info);
3173 		break;
3174 	}
3175 
3176 	return 0;
3177 }
3178 
skl_tplg_fill_str_mfest_tkn(struct device * dev,struct snd_soc_tplg_vendor_string_elem * str_elem,struct skl * skl)3179 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3180 		struct snd_soc_tplg_vendor_string_elem *str_elem,
3181 		struct skl *skl)
3182 {
3183 	int tkn_count = 0;
3184 	static int ref_count;
3185 
3186 	switch (str_elem->token) {
3187 	case SKL_TKN_STR_LIB_NAME:
3188 		if (ref_count > skl->skl_sst->lib_count - 1) {
3189 			ref_count = 0;
3190 			return -EINVAL;
3191 		}
3192 
3193 		strncpy(skl->skl_sst->lib_info[ref_count].name,
3194 			str_elem->string,
3195 			ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
3196 		ref_count++;
3197 		break;
3198 
3199 	default:
3200 		dev_err(dev, "Not a string token %d\n", str_elem->token);
3201 		break;
3202 	}
3203 	tkn_count++;
3204 
3205 	return tkn_count;
3206 }
3207 
skl_tplg_get_str_tkn(struct device * dev,struct snd_soc_tplg_vendor_array * array,struct skl * skl)3208 static int skl_tplg_get_str_tkn(struct device *dev,
3209 		struct snd_soc_tplg_vendor_array *array,
3210 		struct skl *skl)
3211 {
3212 	int tkn_count = 0, ret;
3213 	struct snd_soc_tplg_vendor_string_elem *str_elem;
3214 
3215 	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3216 	while (tkn_count < array->num_elems) {
3217 		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3218 		str_elem++;
3219 
3220 		if (ret < 0)
3221 			return ret;
3222 
3223 		tkn_count = tkn_count + ret;
3224 	}
3225 
3226 	return tkn_count;
3227 }
3228 
skl_tplg_manifest_fill_fmt(struct device * dev,struct skl_module_iface * fmt,struct snd_soc_tplg_vendor_value_elem * tkn_elem,u32 dir,int fmt_idx)3229 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3230 		struct skl_module_iface *fmt,
3231 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3232 		u32 dir, int fmt_idx)
3233 {
3234 	struct skl_module_pin_fmt *dst_fmt;
3235 	struct skl_module_fmt *mod_fmt;
3236 	int ret;
3237 
3238 	if (!fmt)
3239 		return -EINVAL;
3240 
3241 	switch (dir) {
3242 	case SKL_DIR_IN:
3243 		dst_fmt = &fmt->inputs[fmt_idx];
3244 		break;
3245 
3246 	case SKL_DIR_OUT:
3247 		dst_fmt = &fmt->outputs[fmt_idx];
3248 		break;
3249 
3250 	default:
3251 		dev_err(dev, "Invalid direction: %d\n", dir);
3252 		return -EINVAL;
3253 	}
3254 
3255 	mod_fmt = &dst_fmt->fmt;
3256 
3257 	switch (tkn_elem->token) {
3258 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3259 		dst_fmt->id = tkn_elem->value;
3260 		break;
3261 
3262 	default:
3263 		ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3264 					tkn_elem->value);
3265 		if (ret < 0)
3266 			return ret;
3267 		break;
3268 	}
3269 
3270 	return 0;
3271 }
3272 
skl_tplg_fill_mod_info(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module * mod)3273 static int skl_tplg_fill_mod_info(struct device *dev,
3274 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3275 		struct skl_module *mod)
3276 {
3277 
3278 	if (!mod)
3279 		return -EINVAL;
3280 
3281 	switch (tkn_elem->token) {
3282 	case SKL_TKN_U8_IN_PIN_TYPE:
3283 		mod->input_pin_type = tkn_elem->value;
3284 		break;
3285 
3286 	case SKL_TKN_U8_OUT_PIN_TYPE:
3287 		mod->output_pin_type = tkn_elem->value;
3288 		break;
3289 
3290 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3291 		mod->max_input_pins = tkn_elem->value;
3292 		break;
3293 
3294 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3295 		mod->max_output_pins = tkn_elem->value;
3296 		break;
3297 
3298 	case SKL_TKN_MM_U8_NUM_RES:
3299 		mod->nr_resources = tkn_elem->value;
3300 		break;
3301 
3302 	case SKL_TKN_MM_U8_NUM_INTF:
3303 		mod->nr_interfaces = tkn_elem->value;
3304 		break;
3305 
3306 	default:
3307 		dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3308 		return -EINVAL;
3309 	}
3310 
3311 	return 0;
3312 }
3313 
3314 
skl_tplg_get_int_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl * skl)3315 static int skl_tplg_get_int_tkn(struct device *dev,
3316 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3317 		struct skl *skl)
3318 {
3319 	int tkn_count = 0, ret, size;
3320 	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3321 	struct skl_module_res *res = NULL;
3322 	struct skl_module_iface *fmt = NULL;
3323 	struct skl_module *mod = NULL;
3324 	static struct skl_astate_param *astate_table;
3325 	static int astate_cfg_idx, count;
3326 	int i;
3327 
3328 	if (skl->modules) {
3329 		mod = skl->modules[mod_idx];
3330 		res = &mod->resources[res_val_idx];
3331 		fmt = &mod->formats[intf_val_idx];
3332 	}
3333 
3334 	switch (tkn_elem->token) {
3335 	case SKL_TKN_U32_LIB_COUNT:
3336 		skl->skl_sst->lib_count = tkn_elem->value;
3337 		break;
3338 
3339 	case SKL_TKN_U8_NUM_MOD:
3340 		skl->nr_modules = tkn_elem->value;
3341 		skl->modules = devm_kcalloc(dev, skl->nr_modules,
3342 				sizeof(*skl->modules), GFP_KERNEL);
3343 		if (!skl->modules)
3344 			return -ENOMEM;
3345 
3346 		for (i = 0; i < skl->nr_modules; i++) {
3347 			skl->modules[i] = devm_kzalloc(dev,
3348 					sizeof(struct skl_module), GFP_KERNEL);
3349 			if (!skl->modules[i])
3350 				return -ENOMEM;
3351 		}
3352 		break;
3353 
3354 	case SKL_TKN_MM_U8_MOD_IDX:
3355 		mod_idx = tkn_elem->value;
3356 		break;
3357 
3358 	case SKL_TKN_U32_ASTATE_COUNT:
3359 		if (astate_table != NULL) {
3360 			dev_err(dev, "More than one entry for A-State count");
3361 			return -EINVAL;
3362 		}
3363 
3364 		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3365 			dev_err(dev, "Invalid A-State count %d\n",
3366 				tkn_elem->value);
3367 			return -EINVAL;
3368 		}
3369 
3370 		size = tkn_elem->value * sizeof(struct skl_astate_param) +
3371 				sizeof(count);
3372 		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3373 		if (!skl->cfg.astate_cfg)
3374 			return -ENOMEM;
3375 
3376 		astate_table = skl->cfg.astate_cfg->astate_table;
3377 		count = skl->cfg.astate_cfg->count = tkn_elem->value;
3378 		break;
3379 
3380 	case SKL_TKN_U32_ASTATE_IDX:
3381 		if (tkn_elem->value >= count) {
3382 			dev_err(dev, "Invalid A-State index %d\n",
3383 				tkn_elem->value);
3384 			return -EINVAL;
3385 		}
3386 
3387 		astate_cfg_idx = tkn_elem->value;
3388 		break;
3389 
3390 	case SKL_TKN_U32_ASTATE_KCPS:
3391 		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3392 		break;
3393 
3394 	case SKL_TKN_U32_ASTATE_CLK_SRC:
3395 		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3396 		break;
3397 
3398 	case SKL_TKN_U8_IN_PIN_TYPE:
3399 	case SKL_TKN_U8_OUT_PIN_TYPE:
3400 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3401 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3402 	case SKL_TKN_MM_U8_NUM_RES:
3403 	case SKL_TKN_MM_U8_NUM_INTF:
3404 		ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3405 		if (ret < 0)
3406 			return ret;
3407 		break;
3408 
3409 	case SKL_TKN_U32_DIR_PIN_COUNT:
3410 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3411 		pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3412 		break;
3413 
3414 	case SKL_TKN_MM_U32_RES_ID:
3415 		if (!res)
3416 			return -EINVAL;
3417 
3418 		res->id = tkn_elem->value;
3419 		res_val_idx = tkn_elem->value;
3420 		break;
3421 
3422 	case SKL_TKN_MM_U32_FMT_ID:
3423 		if (!fmt)
3424 			return -EINVAL;
3425 
3426 		fmt->fmt_idx = tkn_elem->value;
3427 		intf_val_idx = tkn_elem->value;
3428 		break;
3429 
3430 	case SKL_TKN_MM_U32_CPS:
3431 	case SKL_TKN_MM_U32_DMA_SIZE:
3432 	case SKL_TKN_MM_U32_CPC:
3433 	case SKL_TKN_U32_MEM_PAGES:
3434 	case SKL_TKN_U32_OBS:
3435 	case SKL_TKN_U32_IBS:
3436 	case SKL_TKN_MM_U32_RES_PIN_ID:
3437 	case SKL_TKN_MM_U32_PIN_BUF:
3438 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3439 		if (ret < 0)
3440 			return ret;
3441 
3442 		break;
3443 
3444 	case SKL_TKN_MM_U32_NUM_IN_FMT:
3445 		if (!fmt)
3446 			return -EINVAL;
3447 
3448 		res->nr_input_pins = tkn_elem->value;
3449 		break;
3450 
3451 	case SKL_TKN_MM_U32_NUM_OUT_FMT:
3452 		if (!fmt)
3453 			return -EINVAL;
3454 
3455 		res->nr_output_pins = tkn_elem->value;
3456 		break;
3457 
3458 	case SKL_TKN_U32_FMT_CH:
3459 	case SKL_TKN_U32_FMT_FREQ:
3460 	case SKL_TKN_U32_FMT_BIT_DEPTH:
3461 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3462 	case SKL_TKN_U32_FMT_CH_CONFIG:
3463 	case SKL_TKN_U32_FMT_INTERLEAVE:
3464 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3465 	case SKL_TKN_U32_FMT_CH_MAP:
3466 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3467 		ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3468 						 dir, pin_idx);
3469 		if (ret < 0)
3470 			return ret;
3471 		break;
3472 
3473 	default:
3474 		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3475 		return -EINVAL;
3476 	}
3477 	tkn_count++;
3478 
3479 	return tkn_count;
3480 }
3481 
skl_tplg_get_manifest_uuid(struct device * dev,struct skl * skl,struct snd_soc_tplg_vendor_uuid_elem * uuid_tkn)3482 static int skl_tplg_get_manifest_uuid(struct device *dev,
3483 				struct skl *skl,
3484 				struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
3485 {
3486 	static int ref_count;
3487 	struct skl_module *mod;
3488 
3489 	if (uuid_tkn->token == SKL_TKN_UUID) {
3490 		mod = skl->modules[ref_count];
3491 		memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid));
3492 		ref_count++;
3493 	} else {
3494 		dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
3495 		return -EINVAL;
3496 	}
3497 
3498 	return 0;
3499 }
3500 
3501 /*
3502  * Fill the manifest structure by parsing the tokens based on the
3503  * type.
3504  */
skl_tplg_get_manifest_tkn(struct device * dev,char * pvt_data,struct skl * skl,int block_size)3505 static int skl_tplg_get_manifest_tkn(struct device *dev,
3506 		char *pvt_data, struct skl *skl,
3507 		int block_size)
3508 {
3509 	int tkn_count = 0, ret;
3510 	int off = 0, tuple_size = 0;
3511 	struct snd_soc_tplg_vendor_array *array;
3512 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3513 
3514 	if (block_size <= 0)
3515 		return -EINVAL;
3516 
3517 	while (tuple_size < block_size) {
3518 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3519 		off += array->size;
3520 		switch (array->type) {
3521 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3522 			ret = skl_tplg_get_str_tkn(dev, array, skl);
3523 
3524 			if (ret < 0)
3525 				return ret;
3526 			tkn_count = ret;
3527 
3528 			tuple_size += tkn_count *
3529 				sizeof(struct snd_soc_tplg_vendor_string_elem);
3530 			continue;
3531 
3532 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3533 			ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid);
3534 			if (ret < 0)
3535 				return ret;
3536 
3537 			tuple_size += sizeof(*array->uuid);
3538 			continue;
3539 
3540 		default:
3541 			tkn_elem = array->value;
3542 			tkn_count = 0;
3543 			break;
3544 		}
3545 
3546 		while (tkn_count <= array->num_elems - 1) {
3547 			ret = skl_tplg_get_int_tkn(dev,
3548 					tkn_elem, skl);
3549 			if (ret < 0)
3550 				return ret;
3551 
3552 			tkn_count = tkn_count + ret;
3553 			tkn_elem++;
3554 		}
3555 		tuple_size += (tkn_count * sizeof(*tkn_elem));
3556 		tkn_count = 0;
3557 	}
3558 
3559 	return off;
3560 }
3561 
3562 /*
3563  * Parse manifest private data for tokens. The private data block is
3564  * preceded by descriptors for type and size of data block.
3565  */
skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest * manifest,struct device * dev,struct skl * skl)3566 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3567 			struct device *dev, struct skl *skl)
3568 {
3569 	struct snd_soc_tplg_vendor_array *array;
3570 	int num_blocks, block_size = 0, block_type, off = 0;
3571 	char *data;
3572 	int ret;
3573 
3574 	/* Read the NUM_DATA_BLOCKS descriptor */
3575 	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3576 	ret = skl_tplg_get_desc_blocks(dev, array);
3577 	if (ret < 0)
3578 		return ret;
3579 	num_blocks = ret;
3580 
3581 	off += array->size;
3582 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3583 	while (num_blocks > 0) {
3584 		array = (struct snd_soc_tplg_vendor_array *)
3585 				(manifest->priv.data + off);
3586 		ret = skl_tplg_get_desc_blocks(dev, array);
3587 
3588 		if (ret < 0)
3589 			return ret;
3590 		block_type = ret;
3591 		off += array->size;
3592 
3593 		array = (struct snd_soc_tplg_vendor_array *)
3594 			(manifest->priv.data + off);
3595 
3596 		ret = skl_tplg_get_desc_blocks(dev, array);
3597 
3598 		if (ret < 0)
3599 			return ret;
3600 		block_size = ret;
3601 		off += array->size;
3602 
3603 		array = (struct snd_soc_tplg_vendor_array *)
3604 			(manifest->priv.data + off);
3605 
3606 		data = (manifest->priv.data + off);
3607 
3608 		if (block_type == SKL_TYPE_TUPLE) {
3609 			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3610 					block_size);
3611 
3612 			if (ret < 0)
3613 				return ret;
3614 
3615 			--num_blocks;
3616 		} else {
3617 			return -EINVAL;
3618 		}
3619 		off += ret;
3620 	}
3621 
3622 	return 0;
3623 }
3624 
skl_manifest_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_tplg_manifest * manifest)3625 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3626 				struct snd_soc_tplg_manifest *manifest)
3627 {
3628 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3629 	struct skl *skl = bus_to_skl(bus);
3630 
3631 	/* proceed only if we have private data defined */
3632 	if (manifest->priv.size == 0)
3633 		return 0;
3634 
3635 	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3636 
3637 	if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
3638 		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3639 					skl->skl_sst->lib_count);
3640 		return  -EINVAL;
3641 	}
3642 
3643 	return 0;
3644 }
3645 
3646 static struct snd_soc_tplg_ops skl_tplg_ops  = {
3647 	.widget_load = skl_tplg_widget_load,
3648 	.control_load = skl_tplg_control_load,
3649 	.bytes_ext_ops = skl_tlv_ops,
3650 	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3651 	.io_ops = skl_tplg_kcontrol_ops,
3652 	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3653 	.manifest = skl_manifest_load,
3654 	.dai_load = skl_dai_load,
3655 };
3656 
3657 /*
3658  * A pipe can have multiple modules, each of them will be a DAPM widget as
3659  * well. While managing a pipeline we need to get the list of all the
3660  * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3661  * helps to get the SKL type widgets in that pipeline
3662  */
skl_tplg_create_pipe_widget_list(struct snd_soc_component * component)3663 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3664 {
3665 	struct snd_soc_dapm_widget *w;
3666 	struct skl_module_cfg *mcfg = NULL;
3667 	struct skl_pipe_module *p_module = NULL;
3668 	struct skl_pipe *pipe;
3669 
3670 	list_for_each_entry(w, &component->card->widgets, list) {
3671 		if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3672 			mcfg = w->priv;
3673 			pipe = mcfg->pipe;
3674 
3675 			p_module = devm_kzalloc(component->dev,
3676 						sizeof(*p_module), GFP_KERNEL);
3677 			if (!p_module)
3678 				return -ENOMEM;
3679 
3680 			p_module->w = w;
3681 			list_add_tail(&p_module->node, &pipe->w_list);
3682 		}
3683 	}
3684 
3685 	return 0;
3686 }
3687 
skl_tplg_set_pipe_type(struct skl * skl,struct skl_pipe * pipe)3688 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
3689 {
3690 	struct skl_pipe_module *w_module;
3691 	struct snd_soc_dapm_widget *w;
3692 	struct skl_module_cfg *mconfig;
3693 	bool host_found = false, link_found = false;
3694 
3695 	list_for_each_entry(w_module, &pipe->w_list, node) {
3696 		w = w_module->w;
3697 		mconfig = w->priv;
3698 
3699 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3700 			host_found = true;
3701 		else if (mconfig->dev_type != SKL_DEVICE_NONE)
3702 			link_found = true;
3703 	}
3704 
3705 	if (host_found && link_found)
3706 		pipe->passthru = true;
3707 	else
3708 		pipe->passthru = false;
3709 }
3710 
3711 /* This will be read from topology manifest, currently defined here */
3712 #define SKL_MAX_MCPS 30000000
3713 #define SKL_FW_MAX_MEM 1000000
3714 
3715 /*
3716  * SKL topology init routine
3717  */
skl_tplg_init(struct snd_soc_component * component,struct hdac_bus * bus)3718 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3719 {
3720 	int ret;
3721 	const struct firmware *fw;
3722 	struct skl *skl = bus_to_skl(bus);
3723 	struct skl_pipeline *ppl;
3724 
3725 	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3726 	if (ret < 0) {
3727 		dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
3728 				skl->tplg_name, ret);
3729 		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3730 		if (ret < 0) {
3731 			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3732 					"dfw_sst.bin", ret);
3733 			return ret;
3734 		}
3735 	}
3736 
3737 	/*
3738 	 * The complete tplg for SKL is loaded as index 0, we don't use
3739 	 * any other index
3740 	 */
3741 	ret = snd_soc_tplg_component_load(component,
3742 					&skl_tplg_ops, fw, 0);
3743 	if (ret < 0) {
3744 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
3745 		release_firmware(fw);
3746 		return -EINVAL;
3747 	}
3748 
3749 	skl->resource.max_mcps = SKL_MAX_MCPS;
3750 	skl->resource.max_mem = SKL_FW_MAX_MEM;
3751 
3752 	skl->tplg = fw;
3753 	ret = skl_tplg_create_pipe_widget_list(component);
3754 	if (ret < 0)
3755 		return ret;
3756 
3757 	list_for_each_entry(ppl, &skl->ppl_list, node)
3758 		skl_tplg_set_pipe_type(skl, ppl->pipe);
3759 
3760 	return 0;
3761 }
3762