1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  skl-topology.c - Implements Platform component ALSA controls/widget
4 *  handlers.
5 *
6 *  Copyright (C) 2014-2015 Intel Corp
7 *  Author: Jeeja KP <jeeja.kp@intel.com>
8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 */
10
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/firmware.h>
14#include <linux/uuid.h>
15#include <sound/intel-nhlt.h>
16#include <sound/soc.h>
17#include <sound/soc-acpi.h>
18#include <sound/soc-topology.h>
19#include <uapi/sound/snd_sst_tokens.h>
20#include <uapi/sound/skl-tplg-interface.h>
21#include "skl-sst-dsp.h"
22#include "skl-sst-ipc.h"
23#include "skl-topology.h"
24#include "skl.h"
25#include "../common/sst-dsp.h"
26#include "../common/sst-dsp-priv.h"
27
28#define SKL_CH_FIXUP_MASK		(1 << 0)
29#define SKL_RATE_FIXUP_MASK		(1 << 1)
30#define SKL_FMT_FIXUP_MASK		(1 << 2)
31#define SKL_IN_DIR_BIT_MASK		BIT(0)
32#define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
33
34static const int mic_mono_list[] = {
350, 1, 2, 3,
36};
37static const int mic_stereo_list[][SKL_CH_STEREO] = {
38{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
39};
40static const int mic_trio_list[][SKL_CH_TRIO] = {
41{0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
42};
43static const int mic_quatro_list[][SKL_CH_QUATRO] = {
44{0, 1, 2, 3},
45};
46
47#define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
48	((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
49
50void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps)
51{
52	struct skl_d0i3_data *d0i3 =  &skl->d0i3;
53
54	switch (caps) {
55	case SKL_D0I3_NONE:
56		d0i3->non_d0i3++;
57		break;
58
59	case SKL_D0I3_STREAMING:
60		d0i3->streaming++;
61		break;
62
63	case SKL_D0I3_NON_STREAMING:
64		d0i3->non_streaming++;
65		break;
66	}
67}
68
69void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps)
70{
71	struct skl_d0i3_data *d0i3 =  &skl->d0i3;
72
73	switch (caps) {
74	case SKL_D0I3_NONE:
75		d0i3->non_d0i3--;
76		break;
77
78	case SKL_D0I3_STREAMING:
79		d0i3->streaming--;
80		break;
81
82	case SKL_D0I3_NON_STREAMING:
83		d0i3->non_streaming--;
84		break;
85	}
86}
87
88/*
89 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
90 * ignore. This helpers checks if the SKL driver handles this widget type
91 */
92static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
93				  struct device *dev)
94{
95	if (w->dapm->dev != dev)
96		return false;
97
98	switch (w->id) {
99	case snd_soc_dapm_dai_link:
100	case snd_soc_dapm_dai_in:
101	case snd_soc_dapm_aif_in:
102	case snd_soc_dapm_aif_out:
103	case snd_soc_dapm_dai_out:
104	case snd_soc_dapm_switch:
105	case snd_soc_dapm_output:
106	case snd_soc_dapm_mux:
107
108		return false;
109	default:
110		return true;
111	}
112}
113
114static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
115{
116	struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
117
118	dev_dbg(skl->dev, "Dumping config\n");
119	dev_dbg(skl->dev, "Input Format:\n");
120	dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
121	dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
122	dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
123	dev_dbg(skl->dev, "valid bit depth = %d\n",
124				iface->inputs[0].fmt.valid_bit_depth);
125	dev_dbg(skl->dev, "Output Format:\n");
126	dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
127	dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
128	dev_dbg(skl->dev, "valid bit depth = %d\n",
129				iface->outputs[0].fmt.valid_bit_depth);
130	dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
131}
132
133static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
134{
135	int slot_map = 0xFFFFFFFF;
136	int start_slot = 0;
137	int i;
138
139	for (i = 0; i < chs; i++) {
140		/*
141		 * For 2 channels with starting slot as 0, slot map will
142		 * look like 0xFFFFFF10.
143		 */
144		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
145		start_slot++;
146	}
147	fmt->ch_map = slot_map;
148}
149
150static void skl_tplg_update_params(struct skl_module_fmt *fmt,
151			struct skl_pipe_params *params, int fixup)
152{
153	if (fixup & SKL_RATE_FIXUP_MASK)
154		fmt->s_freq = params->s_freq;
155	if (fixup & SKL_CH_FIXUP_MASK) {
156		fmt->channels = params->ch;
157		skl_tplg_update_chmap(fmt, fmt->channels);
158	}
159	if (fixup & SKL_FMT_FIXUP_MASK) {
160		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
161
162		/*
163		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
164		 * container so update bit depth accordingly
165		 */
166		switch (fmt->valid_bit_depth) {
167		case SKL_DEPTH_16BIT:
168			fmt->bit_depth = fmt->valid_bit_depth;
169			break;
170
171		default:
172			fmt->bit_depth = SKL_DEPTH_32BIT;
173			break;
174		}
175	}
176
177}
178
179/*
180 * A pipeline may have modules which impact the pcm parameters, like SRC,
181 * channel converter, format converter.
182 * We need to calculate the output params by applying the 'fixup'
183 * Topology will tell driver which type of fixup is to be applied by
184 * supplying the fixup mask, so based on that we calculate the output
185 *
186 * Now In FE the pcm hw_params is source/target format. Same is applicable
187 * for BE with its hw_params invoked.
188 * here based on FE, BE pipeline and direction we calculate the input and
189 * outfix and then apply that for a module
190 */
191static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
192		struct skl_pipe_params *params, bool is_fe)
193{
194	int in_fixup, out_fixup;
195	struct skl_module_fmt *in_fmt, *out_fmt;
196
197	/* Fixups will be applied to pin 0 only */
198	in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
199	out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
200
201	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
202		if (is_fe) {
203			in_fixup = m_cfg->params_fixup;
204			out_fixup = (~m_cfg->converter) &
205					m_cfg->params_fixup;
206		} else {
207			out_fixup = m_cfg->params_fixup;
208			in_fixup = (~m_cfg->converter) &
209					m_cfg->params_fixup;
210		}
211	} else {
212		if (is_fe) {
213			out_fixup = m_cfg->params_fixup;
214			in_fixup = (~m_cfg->converter) &
215					m_cfg->params_fixup;
216		} else {
217			in_fixup = m_cfg->params_fixup;
218			out_fixup = (~m_cfg->converter) &
219					m_cfg->params_fixup;
220		}
221	}
222
223	skl_tplg_update_params(in_fmt, params, in_fixup);
224	skl_tplg_update_params(out_fmt, params, out_fixup);
225}
226
227/*
228 * A module needs input and output buffers, which are dependent upon pcm
229 * params, so once we have calculate params, we need buffer calculation as
230 * well.
231 */
232static void skl_tplg_update_buffer_size(struct skl_dev *skl,
233				struct skl_module_cfg *mcfg)
234{
235	int multiplier = 1;
236	struct skl_module_fmt *in_fmt, *out_fmt;
237	struct skl_module_res *res;
238
239	/* Since fixups is applied to pin 0 only, ibs, obs needs
240	 * change for pin 0 only
241	 */
242	res = &mcfg->module->resources[mcfg->res_idx];
243	in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
244	out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
245
246	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
247		multiplier = 5;
248
249	res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
250			in_fmt->channels * (in_fmt->bit_depth >> 3) *
251			multiplier;
252
253	res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
254			out_fmt->channels * (out_fmt->bit_depth >> 3) *
255			multiplier;
256}
257
258static u8 skl_tplg_be_dev_type(int dev_type)
259{
260	int ret;
261
262	switch (dev_type) {
263	case SKL_DEVICE_BT:
264		ret = NHLT_DEVICE_BT;
265		break;
266
267	case SKL_DEVICE_DMIC:
268		ret = NHLT_DEVICE_DMIC;
269		break;
270
271	case SKL_DEVICE_I2S:
272		ret = NHLT_DEVICE_I2S;
273		break;
274
275	default:
276		ret = NHLT_DEVICE_INVALID;
277		break;
278	}
279
280	return ret;
281}
282
283static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
284						struct skl_dev *skl)
285{
286	struct skl_module_cfg *m_cfg = w->priv;
287	int link_type, dir;
288	u32 ch, s_freq, s_fmt, s_cont;
289	struct nhlt_specific_cfg *cfg;
290	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
291	int fmt_idx = m_cfg->fmt_idx;
292	struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
293
294	/* check if we already have blob */
295	if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0)
296		return 0;
297
298	dev_dbg(skl->dev, "Applying default cfg blob\n");
299	switch (m_cfg->dev_type) {
300	case SKL_DEVICE_DMIC:
301		link_type = NHLT_LINK_DMIC;
302		dir = SNDRV_PCM_STREAM_CAPTURE;
303		s_freq = m_iface->inputs[0].fmt.s_freq;
304		s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
305		s_cont = m_iface->inputs[0].fmt.bit_depth;
306		ch = m_iface->inputs[0].fmt.channels;
307		break;
308
309	case SKL_DEVICE_I2S:
310		link_type = NHLT_LINK_SSP;
311		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
312			dir = SNDRV_PCM_STREAM_PLAYBACK;
313			s_freq = m_iface->outputs[0].fmt.s_freq;
314			s_fmt = m_iface->outputs[0].fmt.valid_bit_depth;
315			s_cont = m_iface->outputs[0].fmt.bit_depth;
316			ch = m_iface->outputs[0].fmt.channels;
317		} else {
318			dir = SNDRV_PCM_STREAM_CAPTURE;
319			s_freq = m_iface->inputs[0].fmt.s_freq;
320			s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
321			s_cont = m_iface->inputs[0].fmt.bit_depth;
322			ch = m_iface->inputs[0].fmt.channels;
323		}
324		break;
325
326	default:
327		return -EINVAL;
328	}
329
330	/* update the blob based on virtual bus_id and default params */
331	cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id,
332					   link_type, s_fmt, s_cont, ch,
333					   s_freq, dir, dev_type);
334	if (cfg) {
335		m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
336		m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
337	} else {
338		dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n",
339					m_cfg->vbus_id, link_type, dir);
340		dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n",
341					ch, s_freq, s_fmt, s_cont);
342		return -EIO;
343	}
344
345	return 0;
346}
347
348static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
349							struct skl_dev *skl)
350{
351	struct skl_module_cfg *m_cfg = w->priv;
352	struct skl_pipe_params *params = m_cfg->pipe->p_params;
353	int p_conn_type = m_cfg->pipe->conn_type;
354	bool is_fe;
355
356	if (!m_cfg->params_fixup)
357		return;
358
359	dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n",
360				w->name);
361
362	skl_dump_mconfig(skl, m_cfg);
363
364	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
365		is_fe = true;
366	else
367		is_fe = false;
368
369	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
370	skl_tplg_update_buffer_size(skl, m_cfg);
371
372	dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n",
373				w->name);
374
375	skl_dump_mconfig(skl, m_cfg);
376}
377
378/*
379 * some modules can have multiple params set from user control and
380 * need to be set after module is initialized. If set_param flag is
381 * set module params will be done after module is initialised.
382 */
383static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
384						struct skl_dev *skl)
385{
386	int i, ret;
387	struct skl_module_cfg *mconfig = w->priv;
388	const struct snd_kcontrol_new *k;
389	struct soc_bytes_ext *sb;
390	struct skl_algo_data *bc;
391	struct skl_specific_cfg *sp_cfg;
392
393	if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 &&
394	    mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) {
395		sp_cfg = &mconfig->formats_config[SKL_PARAM_SET];
396		ret = skl_set_module_params(skl, sp_cfg->caps,
397					sp_cfg->caps_size,
398					sp_cfg->param_id, mconfig);
399		if (ret < 0)
400			return ret;
401	}
402
403	for (i = 0; i < w->num_kcontrols; i++) {
404		k = &w->kcontrol_news[i];
405		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
406			sb = (void *) k->private_value;
407			bc = (struct skl_algo_data *)sb->dobj.private;
408
409			if (bc->set_params == SKL_PARAM_SET) {
410				ret = skl_set_module_params(skl,
411						(u32 *)bc->params, bc->size,
412						bc->param_id, mconfig);
413				if (ret < 0)
414					return ret;
415			}
416		}
417	}
418
419	return 0;
420}
421
422/*
423 * some module param can set from user control and this is required as
424 * when module is initailzed. if module param is required in init it is
425 * identifed by set_param flag. if set_param flag is not set, then this
426 * parameter needs to set as part of module init.
427 */
428static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
429{
430	const struct snd_kcontrol_new *k;
431	struct soc_bytes_ext *sb;
432	struct skl_algo_data *bc;
433	struct skl_module_cfg *mconfig = w->priv;
434	int i;
435
436	for (i = 0; i < w->num_kcontrols; i++) {
437		k = &w->kcontrol_news[i];
438		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
439			sb = (struct soc_bytes_ext *)k->private_value;
440			bc = (struct skl_algo_data *)sb->dobj.private;
441
442			if (bc->set_params != SKL_PARAM_INIT)
443				continue;
444
445			mconfig->formats_config[SKL_PARAM_INIT].caps =
446							(u32 *)bc->params;
447			mconfig->formats_config[SKL_PARAM_INIT].caps_size =
448								bc->size;
449
450			break;
451		}
452	}
453
454	return 0;
455}
456
457static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe,
458		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
459{
460	switch (mcfg->dev_type) {
461	case SKL_DEVICE_HDAHOST:
462		return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params);
463
464	case SKL_DEVICE_HDALINK:
465		return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params);
466	}
467
468	return 0;
469}
470
471/*
472 * Inside a pipe instance, we can have various modules. These modules need
473 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
474 * skl_init_module() routine, so invoke that for all modules in a pipeline
475 */
476static int
477skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe)
478{
479	struct skl_pipe_module *w_module;
480	struct snd_soc_dapm_widget *w;
481	struct skl_module_cfg *mconfig;
482	u8 cfg_idx;
483	int ret = 0;
484
485	list_for_each_entry(w_module, &pipe->w_list, node) {
486		guid_t *uuid_mod;
487		w = w_module->w;
488		mconfig = w->priv;
489
490		/* check if module ids are populated */
491		if (mconfig->id.module_id < 0) {
492			dev_err(skl->dev,
493					"module %pUL id not populated\n",
494					(guid_t *)mconfig->guid);
495			return -EIO;
496		}
497
498		cfg_idx = mconfig->pipe->cur_config_idx;
499		mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
500		mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
501
502		if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) {
503			ret = skl->dsp->fw_ops.load_mod(skl->dsp,
504				mconfig->id.module_id, mconfig->guid);
505			if (ret < 0)
506				return ret;
507		}
508
509		/* prepare the DMA if the module is gateway cpr */
510		ret = skl_tplg_module_prepare(skl, pipe, w, mconfig);
511		if (ret < 0)
512			return ret;
513
514		/* update blob if blob is null for be with default value */
515		skl_tplg_update_be_blob(w, skl);
516
517		/*
518		 * apply fix/conversion to module params based on
519		 * FE/BE params
520		 */
521		skl_tplg_update_module_params(w, skl);
522		uuid_mod = (guid_t *)mconfig->guid;
523		mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod,
524						mconfig->id.instance_id);
525		if (mconfig->id.pvt_id < 0)
526			return ret;
527		skl_tplg_set_module_init_data(w);
528
529		ret = skl_dsp_get_core(skl->dsp, mconfig->core_id);
530		if (ret < 0) {
531			dev_err(skl->dev, "Failed to wake up core %d ret=%d\n",
532						mconfig->core_id, ret);
533			return ret;
534		}
535
536		ret = skl_init_module(skl, mconfig);
537		if (ret < 0) {
538			skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
539			goto err;
540		}
541
542		ret = skl_tplg_set_module_params(w, skl);
543		if (ret < 0)
544			goto err;
545	}
546
547	return 0;
548err:
549	skl_dsp_put_core(skl->dsp, mconfig->core_id);
550	return ret;
551}
552
553static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
554	 struct skl_pipe *pipe)
555{
556	int ret = 0;
557	struct skl_pipe_module *w_module;
558	struct skl_module_cfg *mconfig;
559
560	list_for_each_entry(w_module, &pipe->w_list, node) {
561		guid_t *uuid_mod;
562		mconfig  = w_module->w->priv;
563		uuid_mod = (guid_t *)mconfig->guid;
564
565		if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) {
566			ret = skl->dsp->fw_ops.unload_mod(skl->dsp,
567						mconfig->id.module_id);
568			if (ret < 0)
569				return -EIO;
570		}
571		skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
572
573		ret = skl_dsp_put_core(skl->dsp, mconfig->core_id);
574		if (ret < 0) {
575			/* don't return; continue with other modules */
576			dev_err(skl->dev, "Failed to sleep core %d ret=%d\n",
577				mconfig->core_id, ret);
578		}
579	}
580
581	/* no modules to unload in this path, so return */
582	return ret;
583}
584
585static void skl_tplg_set_pipe_config_idx(struct skl_pipe *pipe, int idx)
586{
587	pipe->cur_config_idx = idx;
588	pipe->memory_pages = pipe->configs[idx].mem_pages;
589}
590
591/*
592 * Here, we select pipe format based on the pipe type and pipe
593 * direction to determine the current config index for the pipeline.
594 * The config index is then used to select proper module resources.
595 * Intermediate pipes currently have a fixed format hence we select the
596 * 0th configuratation by default for such pipes.
597 */
598static int
599skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
600{
601	struct skl_pipe *pipe = mconfig->pipe;
602	struct skl_pipe_params *params = pipe->p_params;
603	struct skl_path_config *pconfig = &pipe->configs[0];
604	struct skl_pipe_fmt *fmt = NULL;
605	bool in_fmt = false;
606	int i;
607
608	if (pipe->nr_cfgs == 0) {
609		skl_tplg_set_pipe_config_idx(pipe, 0);
610		return 0;
611	}
612
613	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
614		dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
615			pipe->ppl_id);
616		skl_tplg_set_pipe_config_idx(pipe, 0);
617		return 0;
618	}
619
620	if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
621	     pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
622	     (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
623	     pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
624		in_fmt = true;
625
626	for (i = 0; i < pipe->nr_cfgs; i++) {
627		pconfig = &pipe->configs[i];
628		if (in_fmt)
629			fmt = &pconfig->in_fmt;
630		else
631			fmt = &pconfig->out_fmt;
632
633		if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
634				    fmt->channels, fmt->freq, fmt->bps)) {
635			skl_tplg_set_pipe_config_idx(pipe, i);
636			dev_dbg(skl->dev, "Using pipe config: %d\n", i);
637			return 0;
638		}
639	}
640
641	dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
642		params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
643	return -EINVAL;
644}
645
646/*
647 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
648 * need create the pipeline. So we do following:
649 *   - Create the pipeline
650 *   - Initialize the modules in pipeline
651 *   - finally bind all modules together
652 */
653static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
654							struct skl_dev *skl)
655{
656	int ret;
657	struct skl_module_cfg *mconfig = w->priv;
658	struct skl_pipe_module *w_module;
659	struct skl_pipe *s_pipe = mconfig->pipe;
660	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
661	struct skl_module_deferred_bind *modules;
662
663	ret = skl_tplg_get_pipe_config(skl, mconfig);
664	if (ret < 0)
665		return ret;
666
667	/*
668	 * Create a list of modules for pipe.
669	 * This list contains modules from source to sink
670	 */
671	ret = skl_create_pipeline(skl, mconfig->pipe);
672	if (ret < 0)
673		return ret;
674
675	/* Init all pipe modules from source to sink */
676	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
677	if (ret < 0)
678		return ret;
679
680	/* Bind modules from source to sink */
681	list_for_each_entry(w_module, &s_pipe->w_list, node) {
682		dst_module = w_module->w->priv;
683
684		if (src_module == NULL) {
685			src_module = dst_module;
686			continue;
687		}
688
689		ret = skl_bind_modules(skl, src_module, dst_module);
690		if (ret < 0)
691			return ret;
692
693		src_module = dst_module;
694	}
695
696	/*
697	 * When the destination module is initialized, check for these modules
698	 * in deferred bind list. If found, bind them.
699	 */
700	list_for_each_entry(w_module, &s_pipe->w_list, node) {
701		if (list_empty(&skl->bind_list))
702			break;
703
704		list_for_each_entry(modules, &skl->bind_list, node) {
705			module = w_module->w->priv;
706			if (modules->dst == module)
707				skl_bind_modules(skl, modules->src,
708							modules->dst);
709		}
710	}
711
712	return 0;
713}
714
715static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params,
716				int size, struct skl_module_cfg *mcfg)
717{
718	int i, pvt_id;
719
720	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
721		struct skl_kpb_params *kpb_params =
722				(struct skl_kpb_params *)params;
723		struct skl_mod_inst_map *inst = kpb_params->u.map;
724
725		for (i = 0; i < kpb_params->num_modules; i++) {
726			pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id,
727								inst->inst_id);
728			if (pvt_id < 0)
729				return -EINVAL;
730
731			inst->inst_id = pvt_id;
732			inst++;
733		}
734	}
735
736	return 0;
737}
738/*
739 * Some modules require params to be set after the module is bound to
740 * all pins connected.
741 *
742 * The module provider initializes set_param flag for such modules and we
743 * send params after binding
744 */
745static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
746			struct skl_module_cfg *mcfg, struct skl_dev *skl)
747{
748	int i, ret;
749	struct skl_module_cfg *mconfig = w->priv;
750	const struct snd_kcontrol_new *k;
751	struct soc_bytes_ext *sb;
752	struct skl_algo_data *bc;
753	struct skl_specific_cfg *sp_cfg;
754	u32 *params;
755
756	/*
757	 * check all out/in pins are in bind state.
758	 * if so set the module param
759	 */
760	for (i = 0; i < mcfg->module->max_output_pins; i++) {
761		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
762			return 0;
763	}
764
765	for (i = 0; i < mcfg->module->max_input_pins; i++) {
766		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
767			return 0;
768	}
769
770	if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 &&
771	    mconfig->formats_config[SKL_PARAM_BIND].set_params ==
772								SKL_PARAM_BIND) {
773		sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND];
774		ret = skl_set_module_params(skl, sp_cfg->caps,
775					sp_cfg->caps_size,
776					sp_cfg->param_id, mconfig);
777		if (ret < 0)
778			return ret;
779	}
780
781	for (i = 0; i < w->num_kcontrols; i++) {
782		k = &w->kcontrol_news[i];
783		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
784			sb = (void *) k->private_value;
785			bc = (struct skl_algo_data *)sb->dobj.private;
786
787			if (bc->set_params == SKL_PARAM_BIND) {
788				params = kmemdup(bc->params, bc->max, GFP_KERNEL);
789				if (!params)
790					return -ENOMEM;
791
792				skl_fill_sink_instance_id(skl, params, bc->max,
793								mconfig);
794
795				ret = skl_set_module_params(skl, params,
796						bc->max, bc->param_id, mconfig);
797				kfree(params);
798
799				if (ret < 0)
800					return ret;
801			}
802		}
803	}
804
805	return 0;
806}
807
808static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid)
809{
810	struct uuid_module *module;
811
812	list_for_each_entry(module, &skl->uuid_list, list) {
813		if (guid_equal(uuid, &module->uuid))
814			return module->id;
815	}
816
817	return -EINVAL;
818}
819
820static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl,
821					const struct snd_kcontrol_new *k)
822{
823	struct soc_bytes_ext *sb = (void *) k->private_value;
824	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
825	struct skl_kpb_params *uuid_params, *params;
826	struct hdac_bus *bus = skl_to_bus(skl);
827	int i, size, module_id;
828
829	if (bc->set_params == SKL_PARAM_BIND && bc->max) {
830		uuid_params = (struct skl_kpb_params *)bc->params;
831		size = struct_size(params, u.map, uuid_params->num_modules);
832
833		params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
834		if (!params)
835			return -ENOMEM;
836
837		params->num_modules = uuid_params->num_modules;
838
839		for (i = 0; i < uuid_params->num_modules; i++) {
840			module_id = skl_get_module_id(skl,
841				&uuid_params->u.map_uuid[i].mod_uuid);
842			if (module_id < 0) {
843				devm_kfree(bus->dev, params);
844				return -EINVAL;
845			}
846
847			params->u.map[i].mod_id = module_id;
848			params->u.map[i].inst_id =
849				uuid_params->u.map_uuid[i].inst_id;
850		}
851
852		devm_kfree(bus->dev, bc->params);
853		bc->params = (char *)params;
854		bc->max = size;
855	}
856
857	return 0;
858}
859
860/*
861 * Retrieve the module id from UUID mentioned in the
862 * post bind params
863 */
864void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
865				struct snd_soc_dapm_widget *w)
866{
867	struct skl_module_cfg *mconfig = w->priv;
868	int i;
869
870	/*
871	 * Post bind params are used for only for KPB
872	 * to set copier instances to drain the data
873	 * in fast mode
874	 */
875	if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
876		return;
877
878	for (i = 0; i < w->num_kcontrols; i++)
879		if ((w->kcontrol_news[i].access &
880			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
881			(skl_tplg_find_moduleid_from_uuid(skl,
882			&w->kcontrol_news[i]) < 0))
883			dev_err(skl->dev,
884				"%s: invalid kpb post bind params\n",
885				__func__);
886}
887
888static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl,
889	struct skl_module_cfg *src, struct skl_module_cfg *dst)
890{
891	struct skl_module_deferred_bind *m_list, *modules;
892	int i;
893
894	/* only supported for module with static pin connection */
895	for (i = 0; i < dst->module->max_input_pins; i++) {
896		struct skl_module_pin *pin = &dst->m_in_pin[i];
897
898		if (pin->is_dynamic)
899			continue;
900
901		if ((pin->id.module_id  == src->id.module_id) &&
902			(pin->id.instance_id  == src->id.instance_id)) {
903
904			if (!list_empty(&skl->bind_list)) {
905				list_for_each_entry(modules, &skl->bind_list, node) {
906					if (modules->src == src && modules->dst == dst)
907						return 0;
908				}
909			}
910
911			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
912			if (!m_list)
913				return -ENOMEM;
914
915			m_list->src = src;
916			m_list->dst = dst;
917
918			list_add(&m_list->node, &skl->bind_list);
919		}
920	}
921
922	return 0;
923}
924
925static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
926				struct skl_dev *skl,
927				struct snd_soc_dapm_widget *src_w,
928				struct skl_module_cfg *src_mconfig)
929{
930	struct snd_soc_dapm_path *p;
931	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
932	struct skl_module_cfg *sink_mconfig;
933	int ret;
934
935	snd_soc_dapm_widget_for_each_sink_path(w, p) {
936		if (!p->connect)
937			continue;
938
939		dev_dbg(skl->dev,
940			"%s: src widget=%s\n", __func__, w->name);
941		dev_dbg(skl->dev,
942			"%s: sink widget=%s\n", __func__, p->sink->name);
943
944		next_sink = p->sink;
945
946		if (!is_skl_dsp_widget_type(p->sink, skl->dev))
947			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
948
949		/*
950		 * here we will check widgets in sink pipelines, so that
951		 * can be any widgets type and we are only interested if
952		 * they are ones used for SKL so check that first
953		 */
954		if ((p->sink->priv != NULL) &&
955				is_skl_dsp_widget_type(p->sink, skl->dev)) {
956
957			sink = p->sink;
958			sink_mconfig = sink->priv;
959
960			/*
961			 * Modules other than PGA leaf can be connected
962			 * directly or via switch to a module in another
963			 * pipeline. EX: reference path
964			 * when the path is enabled, the dst module that needs
965			 * to be bound may not be initialized. if the module is
966			 * not initialized, add these modules in the deferred
967			 * bind list and when the dst module is initialised,
968			 * bind this module to the dst_module in deferred list.
969			 */
970			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
971				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
972
973				ret = skl_tplg_module_add_deferred_bind(skl,
974						src_mconfig, sink_mconfig);
975
976				if (ret < 0)
977					return ret;
978
979			}
980
981
982			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
983				sink_mconfig->m_state == SKL_MODULE_UNINIT)
984				continue;
985
986			/* Bind source to sink, mixin is always source */
987			ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
988			if (ret)
989				return ret;
990
991			/* set module params after bind */
992			skl_tplg_set_module_bind_params(src_w,
993					src_mconfig, skl);
994			skl_tplg_set_module_bind_params(sink,
995					sink_mconfig, skl);
996
997			/* Start sinks pipe first */
998			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
999				if (sink_mconfig->pipe->conn_type !=
1000							SKL_PIPE_CONN_TYPE_FE)
1001					ret = skl_run_pipe(skl,
1002							sink_mconfig->pipe);
1003				if (ret)
1004					return ret;
1005			}
1006		}
1007	}
1008
1009	if (!sink && next_sink)
1010		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1011
1012	return 0;
1013}
1014
1015/*
1016 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1017 * we need to do following:
1018 *   - Bind to sink pipeline
1019 *      Since the sink pipes can be running and we don't get mixer event on
1020 *      connect for already running mixer, we need to find the sink pipes
1021 *      here and bind to them. This way dynamic connect works.
1022 *   - Start sink pipeline, if not running
1023 *   - Then run current pipe
1024 */
1025static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1026							struct skl_dev *skl)
1027{
1028	struct skl_module_cfg *src_mconfig;
1029	int ret = 0;
1030
1031	src_mconfig = w->priv;
1032
1033	/*
1034	 * find which sink it is connected to, bind with the sink,
1035	 * if sink is not started, start sink pipe first, then start
1036	 * this pipe
1037	 */
1038	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1039	if (ret)
1040		return ret;
1041
1042	/* Start source pipe last after starting all sinks */
1043	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1044		return skl_run_pipe(skl, src_mconfig->pipe);
1045
1046	return 0;
1047}
1048
1049static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1050		struct snd_soc_dapm_widget *w, struct skl_dev *skl)
1051{
1052	struct snd_soc_dapm_path *p;
1053	struct snd_soc_dapm_widget *src_w = NULL;
1054
1055	snd_soc_dapm_widget_for_each_source_path(w, p) {
1056		src_w = p->source;
1057		if (!p->connect)
1058			continue;
1059
1060		dev_dbg(skl->dev, "sink widget=%s\n", w->name);
1061		dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
1062
1063		/*
1064		 * here we will check widgets in sink pipelines, so that can
1065		 * be any widgets type and we are only interested if they are
1066		 * ones used for SKL so check that first
1067		 */
1068		if ((p->source->priv != NULL) &&
1069				is_skl_dsp_widget_type(p->source, skl->dev)) {
1070			return p->source;
1071		}
1072	}
1073
1074	if (src_w != NULL)
1075		return skl_get_src_dsp_widget(src_w, skl);
1076
1077	return NULL;
1078}
1079
1080/*
1081 * in the Post-PMU event of mixer we need to do following:
1082 *   - Check if this pipe is running
1083 *   - if not, then
1084 *	- bind this pipeline to its source pipeline
1085 *	  if source pipe is already running, this means it is a dynamic
1086 *	  connection and we need to bind only to that pipe
1087 *	- start this pipeline
1088 */
1089static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1090							struct skl_dev *skl)
1091{
1092	int ret = 0;
1093	struct snd_soc_dapm_widget *source, *sink;
1094	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1095	int src_pipe_started = 0;
1096
1097	sink = w;
1098	sink_mconfig = sink->priv;
1099
1100	/*
1101	 * If source pipe is already started, that means source is driving
1102	 * one more sink before this sink got connected, Since source is
1103	 * started, bind this sink to source and start this pipe.
1104	 */
1105	source = skl_get_src_dsp_widget(w, skl);
1106	if (source != NULL) {
1107		src_mconfig = source->priv;
1108		sink_mconfig = sink->priv;
1109		src_pipe_started = 1;
1110
1111		/*
1112		 * check pipe state, then no need to bind or start the
1113		 * pipe
1114		 */
1115		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1116			src_pipe_started = 0;
1117	}
1118
1119	if (src_pipe_started) {
1120		ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1121		if (ret)
1122			return ret;
1123
1124		/* set module params after bind */
1125		skl_tplg_set_module_bind_params(source, src_mconfig, skl);
1126		skl_tplg_set_module_bind_params(sink, sink_mconfig, skl);
1127
1128		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1129			ret = skl_run_pipe(skl, sink_mconfig->pipe);
1130	}
1131
1132	return ret;
1133}
1134
1135/*
1136 * in the Pre-PMD event of mixer we need to do following:
1137 *   - Stop the pipe
1138 *   - find the source connections and remove that from dapm_path_list
1139 *   - unbind with source pipelines if still connected
1140 */
1141static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1142							struct skl_dev *skl)
1143{
1144	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1145	int ret = 0, i;
1146
1147	sink_mconfig = w->priv;
1148
1149	/* Stop the pipe */
1150	ret = skl_stop_pipe(skl, sink_mconfig->pipe);
1151	if (ret)
1152		return ret;
1153
1154	for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1155		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1156			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1157			if (!src_mconfig)
1158				continue;
1159
1160			ret = skl_unbind_modules(skl,
1161						src_mconfig, sink_mconfig);
1162		}
1163	}
1164
1165	return ret;
1166}
1167
1168/*
1169 * in the Post-PMD event of mixer we need to do following:
1170 *   - Unbind the modules within the pipeline
1171 *   - Delete the pipeline (modules are not required to be explicitly
1172 *     deleted, pipeline delete is enough here
1173 */
1174static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1175							struct skl_dev *skl)
1176{
1177	struct skl_module_cfg *mconfig = w->priv;
1178	struct skl_pipe_module *w_module;
1179	struct skl_module_cfg *src_module = NULL, *dst_module;
1180	struct skl_pipe *s_pipe = mconfig->pipe;
1181	struct skl_module_deferred_bind *modules, *tmp;
1182
1183	if (s_pipe->state == SKL_PIPE_INVALID)
1184		return -EINVAL;
1185
1186	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1187		if (list_empty(&skl->bind_list))
1188			break;
1189
1190		src_module = w_module->w->priv;
1191
1192		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1193			/*
1194			 * When the destination module is deleted, Unbind the
1195			 * modules from deferred bind list.
1196			 */
1197			if (modules->dst == src_module) {
1198				skl_unbind_modules(skl, modules->src,
1199						modules->dst);
1200			}
1201
1202			/*
1203			 * When the source module is deleted, remove this entry
1204			 * from the deferred bind list.
1205			 */
1206			if (modules->src == src_module) {
1207				list_del(&modules->node);
1208				modules->src = NULL;
1209				modules->dst = NULL;
1210				kfree(modules);
1211			}
1212		}
1213	}
1214
1215	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1216		dst_module = w_module->w->priv;
1217
1218		if (src_module == NULL) {
1219			src_module = dst_module;
1220			continue;
1221		}
1222
1223		skl_unbind_modules(skl, src_module, dst_module);
1224		src_module = dst_module;
1225	}
1226
1227	skl_delete_pipe(skl, mconfig->pipe);
1228
1229	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1230		src_module = w_module->w->priv;
1231		src_module->m_state = SKL_MODULE_UNINIT;
1232	}
1233
1234	return skl_tplg_unload_pipe_modules(skl, s_pipe);
1235}
1236
1237/*
1238 * in the Post-PMD event of PGA we need to do following:
1239 *   - Stop the pipeline
1240 *   - In source pipe is connected, unbind with source pipelines
1241 */
1242static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1243							struct skl_dev *skl)
1244{
1245	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1246	int ret = 0, i;
1247
1248	src_mconfig = w->priv;
1249
1250	/* Stop the pipe since this is a mixin module */
1251	ret = skl_stop_pipe(skl, src_mconfig->pipe);
1252	if (ret)
1253		return ret;
1254
1255	for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1256		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1257			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1258			if (!sink_mconfig)
1259				continue;
1260			/*
1261			 * This is a connecter and if path is found that means
1262			 * unbind between source and sink has not happened yet
1263			 */
1264			ret = skl_unbind_modules(skl, src_mconfig,
1265							sink_mconfig);
1266		}
1267	}
1268
1269	return ret;
1270}
1271
1272/*
1273 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1274 * second one is required that is created as another pipe entity.
1275 * The mixer is responsible for pipe management and represent a pipeline
1276 * instance
1277 */
1278static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1279				struct snd_kcontrol *k, int event)
1280{
1281	struct snd_soc_dapm_context *dapm = w->dapm;
1282	struct skl_dev *skl = get_skl_ctx(dapm->dev);
1283
1284	switch (event) {
1285	case SND_SOC_DAPM_PRE_PMU:
1286		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1287
1288	case SND_SOC_DAPM_POST_PMU:
1289		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1290
1291	case SND_SOC_DAPM_PRE_PMD:
1292		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1293
1294	case SND_SOC_DAPM_POST_PMD:
1295		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1296	}
1297
1298	return 0;
1299}
1300
1301/*
1302 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1303 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1304 * the sink when it is running (two FE to one BE or one FE to two BE)
1305 * scenarios
1306 */
1307static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1308			struct snd_kcontrol *k, int event)
1309
1310{
1311	struct snd_soc_dapm_context *dapm = w->dapm;
1312	struct skl_dev *skl = get_skl_ctx(dapm->dev);
1313
1314	switch (event) {
1315	case SND_SOC_DAPM_PRE_PMU:
1316		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1317
1318	case SND_SOC_DAPM_POST_PMD:
1319		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1320	}
1321
1322	return 0;
1323}
1324
1325static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
1326					 struct snd_ctl_elem_value *ucontrol,
1327					 bool is_set)
1328{
1329	struct snd_soc_component *component =
1330		snd_soc_kcontrol_component(kcontrol);
1331	struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
1332	struct skl_dev *skl = bus_to_skl(bus);
1333	struct skl_pipeline *ppl;
1334	struct skl_pipe *pipe = NULL;
1335	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1336	u32 *pipe_id;
1337
1338	if (!ec)
1339		return -EINVAL;
1340
1341	if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
1342		return -EINVAL;
1343
1344	pipe_id = ec->dobj.private;
1345
1346	list_for_each_entry(ppl, &skl->ppl_list, node) {
1347		if (ppl->pipe->ppl_id == *pipe_id) {
1348			pipe = ppl->pipe;
1349			break;
1350		}
1351	}
1352	if (!pipe)
1353		return -EIO;
1354
1355	if (is_set)
1356		skl_tplg_set_pipe_config_idx(pipe, ucontrol->value.enumerated.item[0]);
1357	else
1358		ucontrol->value.enumerated.item[0] = pipe->cur_config_idx;
1359
1360	return 0;
1361}
1362
1363static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
1364				     struct snd_ctl_elem_value *ucontrol)
1365{
1366	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1367}
1368
1369static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
1370				     struct snd_ctl_elem_value *ucontrol)
1371{
1372	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1373}
1374
1375static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
1376					  struct snd_ctl_elem_value *ucontrol)
1377{
1378	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1379}
1380
1381static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
1382					  struct snd_ctl_elem_value *ucontrol)
1383{
1384	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1385}
1386
1387static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1388			unsigned int __user *data, unsigned int size)
1389{
1390	struct soc_bytes_ext *sb =
1391			(struct soc_bytes_ext *)kcontrol->private_value;
1392	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1393	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1394	struct skl_module_cfg *mconfig = w->priv;
1395	struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1396
1397	if (w->power)
1398		skl_get_module_params(skl, (u32 *)bc->params,
1399				      bc->size, bc->param_id, mconfig);
1400
1401	/* decrement size for TLV header */
1402	size -= 2 * sizeof(u32);
1403
1404	/* check size as we don't want to send kernel data */
1405	if (size > bc->max)
1406		size = bc->max;
1407
1408	if (bc->params) {
1409		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1410			return -EFAULT;
1411		if (copy_to_user(data + 1, &size, sizeof(u32)))
1412			return -EFAULT;
1413		if (copy_to_user(data + 2, bc->params, size))
1414			return -EFAULT;
1415	}
1416
1417	return 0;
1418}
1419
1420#define SKL_PARAM_VENDOR_ID 0xff
1421
1422static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1423			const unsigned int __user *data, unsigned int size)
1424{
1425	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1426	struct skl_module_cfg *mconfig = w->priv;
1427	struct soc_bytes_ext *sb =
1428			(struct soc_bytes_ext *)kcontrol->private_value;
1429	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1430	struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1431
1432	if (ac->params) {
1433		if (size > ac->max)
1434			return -EINVAL;
1435		ac->size = size;
1436
1437		if (copy_from_user(ac->params, data, size))
1438			return -EFAULT;
1439
1440		if (w->power)
1441			return skl_set_module_params(skl,
1442						(u32 *)ac->params, ac->size,
1443						ac->param_id, mconfig);
1444	}
1445
1446	return 0;
1447}
1448
1449static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1450		struct snd_ctl_elem_value *ucontrol)
1451{
1452	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1453	struct skl_module_cfg *mconfig = w->priv;
1454	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1455	u32 ch_type = *((u32 *)ec->dobj.private);
1456
1457	if (mconfig->dmic_ch_type == ch_type)
1458		ucontrol->value.enumerated.item[0] =
1459					mconfig->dmic_ch_combo_index;
1460	else
1461		ucontrol->value.enumerated.item[0] = 0;
1462
1463	return 0;
1464}
1465
1466static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1467	struct skl_mic_sel_config *mic_cfg, struct device *dev)
1468{
1469	struct skl_specific_cfg *sp_cfg =
1470				&mconfig->formats_config[SKL_PARAM_INIT];
1471
1472	sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1473	sp_cfg->set_params = SKL_PARAM_SET;
1474	sp_cfg->param_id = 0x00;
1475	if (!sp_cfg->caps) {
1476		sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1477		if (!sp_cfg->caps)
1478			return -ENOMEM;
1479	}
1480
1481	mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1482	mic_cfg->flags = 0;
1483	memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1484
1485	return 0;
1486}
1487
1488static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1489			struct snd_ctl_elem_value *ucontrol)
1490{
1491	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1492	struct skl_module_cfg *mconfig = w->priv;
1493	struct skl_mic_sel_config mic_cfg = {0};
1494	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1495	u32 ch_type = *((u32 *)ec->dobj.private);
1496	const int *list;
1497	u8 in_ch, out_ch, index;
1498
1499	mconfig->dmic_ch_type = ch_type;
1500	mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1501
1502	/* enum control index 0 is INVALID, so no channels to be set */
1503	if (mconfig->dmic_ch_combo_index == 0)
1504		return 0;
1505
1506	/* No valid channel selection map for index 0, so offset by 1 */
1507	index = mconfig->dmic_ch_combo_index - 1;
1508
1509	switch (ch_type) {
1510	case SKL_CH_MONO:
1511		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1512			return -EINVAL;
1513
1514		list = &mic_mono_list[index];
1515		break;
1516
1517	case SKL_CH_STEREO:
1518		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1519			return -EINVAL;
1520
1521		list = mic_stereo_list[index];
1522		break;
1523
1524	case SKL_CH_TRIO:
1525		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1526			return -EINVAL;
1527
1528		list = mic_trio_list[index];
1529		break;
1530
1531	case SKL_CH_QUATRO:
1532		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1533			return -EINVAL;
1534
1535		list = mic_quatro_list[index];
1536		break;
1537
1538	default:
1539		dev_err(w->dapm->dev,
1540				"Invalid channel %d for mic_select module\n",
1541				ch_type);
1542		return -EINVAL;
1543
1544	}
1545
1546	/* channel type enum map to number of chanels for that type */
1547	for (out_ch = 0; out_ch < ch_type; out_ch++) {
1548		in_ch = list[out_ch];
1549		mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1550	}
1551
1552	return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1553}
1554
1555/*
1556 * Fill the dma id for host and link. In case of passthrough
1557 * pipeline, this will both host and link in the same
1558 * pipeline, so need to copy the link and host based on dev_type
1559 */
1560static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1561				struct skl_pipe_params *params)
1562{
1563	struct skl_pipe *pipe = mcfg->pipe;
1564
1565	if (pipe->passthru) {
1566		switch (mcfg->dev_type) {
1567		case SKL_DEVICE_HDALINK:
1568			pipe->p_params->link_dma_id = params->link_dma_id;
1569			pipe->p_params->link_index = params->link_index;
1570			pipe->p_params->link_bps = params->link_bps;
1571			break;
1572
1573		case SKL_DEVICE_HDAHOST:
1574			pipe->p_params->host_dma_id = params->host_dma_id;
1575			pipe->p_params->host_bps = params->host_bps;
1576			break;
1577
1578		default:
1579			break;
1580		}
1581		pipe->p_params->s_fmt = params->s_fmt;
1582		pipe->p_params->ch = params->ch;
1583		pipe->p_params->s_freq = params->s_freq;
1584		pipe->p_params->stream = params->stream;
1585		pipe->p_params->format = params->format;
1586
1587	} else {
1588		memcpy(pipe->p_params, params, sizeof(*params));
1589	}
1590}
1591
1592/*
1593 * The FE params are passed by hw_params of the DAI.
1594 * On hw_params, the params are stored in Gateway module of the FE and we
1595 * need to calculate the format in DSP module configuration, that
1596 * conversion is done here
1597 */
1598int skl_tplg_update_pipe_params(struct device *dev,
1599			struct skl_module_cfg *mconfig,
1600			struct skl_pipe_params *params)
1601{
1602	struct skl_module_res *res;
1603	struct skl_dev *skl = get_skl_ctx(dev);
1604	struct skl_module_fmt *format = NULL;
1605	u8 cfg_idx = mconfig->pipe->cur_config_idx;
1606
1607	res = &mconfig->module->resources[mconfig->res_idx];
1608	skl_tplg_fill_dma_id(mconfig, params);
1609	mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1610	mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1611
1612	if (skl->nr_modules)
1613		return 0;
1614
1615	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1616		format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
1617	else
1618		format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
1619
1620	/* set the hw_params */
1621	format->s_freq = params->s_freq;
1622	format->channels = params->ch;
1623	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1624
1625	/*
1626	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1627	 * container so update bit depth accordingly
1628	 */
1629	switch (format->valid_bit_depth) {
1630	case SKL_DEPTH_16BIT:
1631		format->bit_depth = format->valid_bit_depth;
1632		break;
1633
1634	case SKL_DEPTH_24BIT:
1635	case SKL_DEPTH_32BIT:
1636		format->bit_depth = SKL_DEPTH_32BIT;
1637		break;
1638
1639	default:
1640		dev_err(dev, "Invalid bit depth %x for pipe\n",
1641				format->valid_bit_depth);
1642		return -EINVAL;
1643	}
1644
1645	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1646		res->ibs = (format->s_freq / 1000) *
1647				(format->channels) *
1648				(format->bit_depth >> 3);
1649	} else {
1650		res->obs = (format->s_freq / 1000) *
1651				(format->channels) *
1652				(format->bit_depth >> 3);
1653	}
1654
1655	return 0;
1656}
1657
1658/*
1659 * Query the module config for the FE DAI
1660 * This is used to find the hw_params set for that DAI and apply to FE
1661 * pipeline
1662 */
1663struct skl_module_cfg *
1664skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1665{
1666	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream);
1667	struct snd_soc_dapm_path *p = NULL;
1668
1669	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1670		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1671			if (p->connect && p->sink->power &&
1672				!is_skl_dsp_widget_type(p->sink, dai->dev))
1673				continue;
1674
1675			if (p->sink->priv) {
1676				dev_dbg(dai->dev, "set params for %s\n",
1677						p->sink->name);
1678				return p->sink->priv;
1679			}
1680		}
1681	} else {
1682		snd_soc_dapm_widget_for_each_source_path(w, p) {
1683			if (p->connect && p->source->power &&
1684				!is_skl_dsp_widget_type(p->source, dai->dev))
1685				continue;
1686
1687			if (p->source->priv) {
1688				dev_dbg(dai->dev, "set params for %s\n",
1689						p->source->name);
1690				return p->source->priv;
1691			}
1692		}
1693	}
1694
1695	return NULL;
1696}
1697
1698static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1699		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1700{
1701	struct snd_soc_dapm_path *p;
1702	struct skl_module_cfg *mconfig = NULL;
1703
1704	snd_soc_dapm_widget_for_each_source_path(w, p) {
1705		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1706			if (p->connect &&
1707				    (p->sink->id == snd_soc_dapm_aif_out) &&
1708				    p->source->priv) {
1709				mconfig = p->source->priv;
1710				return mconfig;
1711			}
1712			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1713			if (mconfig)
1714				return mconfig;
1715		}
1716	}
1717	return mconfig;
1718}
1719
1720static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1721		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1722{
1723	struct snd_soc_dapm_path *p;
1724	struct skl_module_cfg *mconfig = NULL;
1725
1726	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1727		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1728			if (p->connect &&
1729				    (p->source->id == snd_soc_dapm_aif_in) &&
1730				    p->sink->priv) {
1731				mconfig = p->sink->priv;
1732				return mconfig;
1733			}
1734			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1735			if (mconfig)
1736				return mconfig;
1737		}
1738	}
1739	return mconfig;
1740}
1741
1742struct skl_module_cfg *
1743skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1744{
1745	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream);
1746	struct skl_module_cfg *mconfig;
1747
1748	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1749		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1750	} else {
1751		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1752	}
1753	return mconfig;
1754}
1755
1756static u8 skl_tplg_be_link_type(int dev_type)
1757{
1758	int ret;
1759
1760	switch (dev_type) {
1761	case SKL_DEVICE_BT:
1762		ret = NHLT_LINK_SSP;
1763		break;
1764
1765	case SKL_DEVICE_DMIC:
1766		ret = NHLT_LINK_DMIC;
1767		break;
1768
1769	case SKL_DEVICE_I2S:
1770		ret = NHLT_LINK_SSP;
1771		break;
1772
1773	case SKL_DEVICE_HDALINK:
1774		ret = NHLT_LINK_HDA;
1775		break;
1776
1777	default:
1778		ret = NHLT_LINK_INVALID;
1779		break;
1780	}
1781
1782	return ret;
1783}
1784
1785/*
1786 * Fill the BE gateway parameters
1787 * The BE gateway expects a blob of parameters which are kept in the ACPI
1788 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1789 * The port can have multiple settings so pick based on the pipeline
1790 * parameters
1791 */
1792static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1793				struct skl_module_cfg *mconfig,
1794				struct skl_pipe_params *params)
1795{
1796	struct nhlt_specific_cfg *cfg;
1797	struct skl_pipe *pipe = mconfig->pipe;
1798	struct skl_pipe_params save = *pipe->p_params;
1799	struct skl_pipe_fmt *pipe_fmt;
1800	struct skl_dev *skl = get_skl_ctx(dai->dev);
1801	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1802	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1803	int ret;
1804
1805	skl_tplg_fill_dma_id(mconfig, params);
1806
1807	if (link_type == NHLT_LINK_HDA)
1808		return 0;
1809
1810	*pipe->p_params = *params;
1811	ret = skl_tplg_get_pipe_config(skl, mconfig);
1812	if (ret)
1813		goto err;
1814
1815	dev_dbg(skl->dev, "%s using pipe config: %d\n", __func__, pipe->cur_config_idx);
1816	if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
1817		pipe_fmt = &pipe->configs[pipe->cur_config_idx].out_fmt;
1818	else
1819		pipe_fmt = &pipe->configs[pipe->cur_config_idx].in_fmt;
1820
1821	/* update the blob based on virtual bus_id*/
1822	cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt,
1823					mconfig->vbus_id, link_type,
1824					pipe_fmt->bps, params->s_cont,
1825					pipe_fmt->channels, pipe_fmt->freq,
1826					pipe->direction, dev_type);
1827	if (cfg) {
1828		mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
1829		mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
1830	} else {
1831		dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
1832			mconfig->vbus_id, link_type, params->stream,
1833			params->ch, params->s_freq, params->s_fmt);
1834		ret = -EINVAL;
1835		goto err;
1836	}
1837
1838	return 0;
1839
1840err:
1841	*pipe->p_params = save;
1842	return ret;
1843}
1844
1845static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1846				struct snd_soc_dapm_widget *w,
1847				struct skl_pipe_params *params)
1848{
1849	struct snd_soc_dapm_path *p;
1850	int ret = -EIO;
1851
1852	snd_soc_dapm_widget_for_each_source_path(w, p) {
1853		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1854						p->source->priv) {
1855
1856			ret = skl_tplg_be_fill_pipe_params(dai,
1857						p->source->priv, params);
1858			if (ret < 0)
1859				return ret;
1860		} else {
1861			ret = skl_tplg_be_set_src_pipe_params(dai,
1862						p->source, params);
1863			if (ret < 0)
1864				return ret;
1865		}
1866	}
1867
1868	return ret;
1869}
1870
1871static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1872	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1873{
1874	struct snd_soc_dapm_path *p;
1875	int ret = -EIO;
1876
1877	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1878		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1879						p->sink->priv) {
1880
1881			ret = skl_tplg_be_fill_pipe_params(dai,
1882						p->sink->priv, params);
1883			if (ret < 0)
1884				return ret;
1885		} else {
1886			ret = skl_tplg_be_set_sink_pipe_params(
1887						dai, p->sink, params);
1888			if (ret < 0)
1889				return ret;
1890		}
1891	}
1892
1893	return ret;
1894}
1895
1896/*
1897 * BE hw_params can be a source parameters (capture) or sink parameters
1898 * (playback). Based on sink and source we need to either find the source
1899 * list or the sink list and set the pipeline parameters
1900 */
1901int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1902				struct skl_pipe_params *params)
1903{
1904	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, params->stream);
1905
1906	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1907		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1908	} else {
1909		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1910	}
1911}
1912
1913static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1914	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1915	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1916	{SKL_PGA_EVENT, skl_tplg_pga_event},
1917};
1918
1919static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1920	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1921					skl_tplg_tlv_control_set},
1922};
1923
1924static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1925	{
1926		.id = SKL_CONTROL_TYPE_MIC_SELECT,
1927		.get = skl_tplg_mic_control_get,
1928		.put = skl_tplg_mic_control_set,
1929	},
1930	{
1931		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
1932		.get = skl_tplg_multi_config_get,
1933		.put = skl_tplg_multi_config_set,
1934	},
1935	{
1936		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
1937		.get = skl_tplg_multi_config_get_dmic,
1938		.put = skl_tplg_multi_config_set_dmic,
1939	}
1940};
1941
1942static int skl_tplg_fill_pipe_cfg(struct device *dev,
1943			struct skl_pipe *pipe, u32 tkn,
1944			u32 tkn_val, int conf_idx, int dir)
1945{
1946	struct skl_pipe_fmt *fmt;
1947	struct skl_path_config *config;
1948
1949	switch (dir) {
1950	case SKL_DIR_IN:
1951		fmt = &pipe->configs[conf_idx].in_fmt;
1952		break;
1953
1954	case SKL_DIR_OUT:
1955		fmt = &pipe->configs[conf_idx].out_fmt;
1956		break;
1957
1958	default:
1959		dev_err(dev, "Invalid direction: %d\n", dir);
1960		return -EINVAL;
1961	}
1962
1963	config = &pipe->configs[conf_idx];
1964
1965	switch (tkn) {
1966	case SKL_TKN_U32_CFG_FREQ:
1967		fmt->freq = tkn_val;
1968		break;
1969
1970	case SKL_TKN_U8_CFG_CHAN:
1971		fmt->channels = tkn_val;
1972		break;
1973
1974	case SKL_TKN_U8_CFG_BPS:
1975		fmt->bps = tkn_val;
1976		break;
1977
1978	case SKL_TKN_U32_PATH_MEM_PGS:
1979		config->mem_pages = tkn_val;
1980		break;
1981
1982	default:
1983		dev_err(dev, "Invalid token config: %d\n", tkn);
1984		return -EINVAL;
1985	}
1986
1987	return 0;
1988}
1989
1990static int skl_tplg_fill_pipe_tkn(struct device *dev,
1991			struct skl_pipe *pipe, u32 tkn,
1992			u32 tkn_val)
1993{
1994
1995	switch (tkn) {
1996	case SKL_TKN_U32_PIPE_CONN_TYPE:
1997		pipe->conn_type = tkn_val;
1998		break;
1999
2000	case SKL_TKN_U32_PIPE_PRIORITY:
2001		pipe->pipe_priority = tkn_val;
2002		break;
2003
2004	case SKL_TKN_U32_PIPE_MEM_PGS:
2005		pipe->memory_pages = tkn_val;
2006		break;
2007
2008	case SKL_TKN_U32_PMODE:
2009		pipe->lp_mode = tkn_val;
2010		break;
2011
2012	case SKL_TKN_U32_PIPE_DIRECTION:
2013		pipe->direction = tkn_val;
2014		break;
2015
2016	case SKL_TKN_U32_NUM_CONFIGS:
2017		pipe->nr_cfgs = tkn_val;
2018		break;
2019
2020	default:
2021		dev_err(dev, "Token not handled %d\n", tkn);
2022		return -EINVAL;
2023	}
2024
2025	return 0;
2026}
2027
2028/*
2029 * Add pipeline by parsing the relevant tokens
2030 * Return an existing pipe if the pipe already exists.
2031 */
2032static int skl_tplg_add_pipe(struct device *dev,
2033		struct skl_module_cfg *mconfig, struct skl_dev *skl,
2034		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2035{
2036	struct skl_pipeline *ppl;
2037	struct skl_pipe *pipe;
2038	struct skl_pipe_params *params;
2039
2040	list_for_each_entry(ppl, &skl->ppl_list, node) {
2041		if (ppl->pipe->ppl_id == tkn_elem->value) {
2042			mconfig->pipe = ppl->pipe;
2043			return -EEXIST;
2044		}
2045	}
2046
2047	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2048	if (!ppl)
2049		return -ENOMEM;
2050
2051	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2052	if (!pipe)
2053		return -ENOMEM;
2054
2055	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2056	if (!params)
2057		return -ENOMEM;
2058
2059	pipe->p_params = params;
2060	pipe->ppl_id = tkn_elem->value;
2061	INIT_LIST_HEAD(&pipe->w_list);
2062
2063	ppl->pipe = pipe;
2064	list_add(&ppl->node, &skl->ppl_list);
2065
2066	mconfig->pipe = pipe;
2067	mconfig->pipe->state = SKL_PIPE_INVALID;
2068
2069	return 0;
2070}
2071
2072static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
2073	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2074{
2075	if (uuid_tkn->token == SKL_TKN_UUID) {
2076		import_guid(guid, uuid_tkn->uuid);
2077		return 0;
2078	}
2079
2080	dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2081
2082	return -EINVAL;
2083}
2084
2085static int skl_tplg_fill_pin(struct device *dev,
2086			struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2087			struct skl_module_pin *m_pin,
2088			int pin_index)
2089{
2090	int ret;
2091
2092	switch (tkn_elem->token) {
2093	case SKL_TKN_U32_PIN_MOD_ID:
2094		m_pin[pin_index].id.module_id = tkn_elem->value;
2095		break;
2096
2097	case SKL_TKN_U32_PIN_INST_ID:
2098		m_pin[pin_index].id.instance_id = tkn_elem->value;
2099		break;
2100
2101	case SKL_TKN_UUID:
2102		ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
2103			(struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2104		if (ret < 0)
2105			return ret;
2106
2107		break;
2108
2109	default:
2110		dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2111		return -EINVAL;
2112	}
2113
2114	return 0;
2115}
2116
2117/*
2118 * Parse for pin config specific tokens to fill up the
2119 * module private data
2120 */
2121static int skl_tplg_fill_pins_info(struct device *dev,
2122		struct skl_module_cfg *mconfig,
2123		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2124		int dir, int pin_count)
2125{
2126	int ret;
2127	struct skl_module_pin *m_pin;
2128
2129	switch (dir) {
2130	case SKL_DIR_IN:
2131		m_pin = mconfig->m_in_pin;
2132		break;
2133
2134	case SKL_DIR_OUT:
2135		m_pin = mconfig->m_out_pin;
2136		break;
2137
2138	default:
2139		dev_err(dev, "Invalid direction value\n");
2140		return -EINVAL;
2141	}
2142
2143	ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2144	if (ret < 0)
2145		return ret;
2146
2147	m_pin[pin_count].in_use = false;
2148	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2149
2150	return 0;
2151}
2152
2153/*
2154 * Fill up input/output module config format based
2155 * on the direction
2156 */
2157static int skl_tplg_fill_fmt(struct device *dev,
2158		struct skl_module_fmt *dst_fmt,
2159		u32 tkn, u32 value)
2160{
2161	switch (tkn) {
2162	case SKL_TKN_U32_FMT_CH:
2163		dst_fmt->channels  = value;
2164		break;
2165
2166	case SKL_TKN_U32_FMT_FREQ:
2167		dst_fmt->s_freq = value;
2168		break;
2169
2170	case SKL_TKN_U32_FMT_BIT_DEPTH:
2171		dst_fmt->bit_depth = value;
2172		break;
2173
2174	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2175		dst_fmt->valid_bit_depth = value;
2176		break;
2177
2178	case SKL_TKN_U32_FMT_CH_CONFIG:
2179		dst_fmt->ch_cfg = value;
2180		break;
2181
2182	case SKL_TKN_U32_FMT_INTERLEAVE:
2183		dst_fmt->interleaving_style = value;
2184		break;
2185
2186	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2187		dst_fmt->sample_type = value;
2188		break;
2189
2190	case SKL_TKN_U32_FMT_CH_MAP:
2191		dst_fmt->ch_map = value;
2192		break;
2193
2194	default:
2195		dev_err(dev, "Invalid token %d\n", tkn);
2196		return -EINVAL;
2197	}
2198
2199	return 0;
2200}
2201
2202static int skl_tplg_widget_fill_fmt(struct device *dev,
2203		struct skl_module_iface *fmt,
2204		u32 tkn, u32 val, u32 dir, int fmt_idx)
2205{
2206	struct skl_module_fmt *dst_fmt;
2207
2208	if (!fmt)
2209		return -EINVAL;
2210
2211	switch (dir) {
2212	case SKL_DIR_IN:
2213		dst_fmt = &fmt->inputs[fmt_idx].fmt;
2214		break;
2215
2216	case SKL_DIR_OUT:
2217		dst_fmt = &fmt->outputs[fmt_idx].fmt;
2218		break;
2219
2220	default:
2221		dev_err(dev, "Invalid direction: %d\n", dir);
2222		return -EINVAL;
2223	}
2224
2225	return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2226}
2227
2228static void skl_tplg_fill_pin_dynamic_val(
2229		struct skl_module_pin *mpin, u32 pin_count, u32 value)
2230{
2231	int i;
2232
2233	for (i = 0; i < pin_count; i++)
2234		mpin[i].is_dynamic = value;
2235}
2236
2237/*
2238 * Resource table in the manifest has pin specific resources
2239 * like pin and pin buffer size
2240 */
2241static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2242		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2243		struct skl_module_res *res, int pin_idx, int dir)
2244{
2245	struct skl_module_pin_resources *m_pin;
2246
2247	switch (dir) {
2248	case SKL_DIR_IN:
2249		m_pin = &res->input[pin_idx];
2250		break;
2251
2252	case SKL_DIR_OUT:
2253		m_pin = &res->output[pin_idx];
2254		break;
2255
2256	default:
2257		dev_err(dev, "Invalid pin direction: %d\n", dir);
2258		return -EINVAL;
2259	}
2260
2261	switch (tkn_elem->token) {
2262	case SKL_TKN_MM_U32_RES_PIN_ID:
2263		m_pin->pin_index = tkn_elem->value;
2264		break;
2265
2266	case SKL_TKN_MM_U32_PIN_BUF:
2267		m_pin->buf_size = tkn_elem->value;
2268		break;
2269
2270	default:
2271		dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2272		return -EINVAL;
2273	}
2274
2275	return 0;
2276}
2277
2278/*
2279 * Fill module specific resources from the manifest's resource
2280 * table like CPS, DMA size, mem_pages.
2281 */
2282static int skl_tplg_fill_res_tkn(struct device *dev,
2283		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2284		struct skl_module_res *res,
2285		int pin_idx, int dir)
2286{
2287	int ret, tkn_count = 0;
2288
2289	if (!res)
2290		return -EINVAL;
2291
2292	switch (tkn_elem->token) {
2293	case SKL_TKN_MM_U32_DMA_SIZE:
2294		res->dma_buffer_size = tkn_elem->value;
2295		break;
2296
2297	case SKL_TKN_MM_U32_CPC:
2298		res->cpc = tkn_elem->value;
2299		break;
2300
2301	case SKL_TKN_U32_MEM_PAGES:
2302		res->is_pages = tkn_elem->value;
2303		break;
2304
2305	case SKL_TKN_U32_OBS:
2306		res->obs = tkn_elem->value;
2307		break;
2308
2309	case SKL_TKN_U32_IBS:
2310		res->ibs = tkn_elem->value;
2311		break;
2312
2313	case SKL_TKN_MM_U32_RES_PIN_ID:
2314	case SKL_TKN_MM_U32_PIN_BUF:
2315		ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2316						    pin_idx, dir);
2317		if (ret < 0)
2318			return ret;
2319		break;
2320
2321	case SKL_TKN_MM_U32_CPS:
2322	case SKL_TKN_U32_MAX_MCPS:
2323		/* ignore unused tokens */
2324		break;
2325
2326	default:
2327		dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2328		return -EINVAL;
2329
2330	}
2331	tkn_count++;
2332
2333	return tkn_count;
2334}
2335
2336/*
2337 * Parse tokens to fill up the module private data
2338 */
2339static int skl_tplg_get_token(struct device *dev,
2340		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2341		struct skl_dev *skl, struct skl_module_cfg *mconfig)
2342{
2343	int tkn_count = 0;
2344	int ret;
2345	static int is_pipe_exists;
2346	static int pin_index, dir, conf_idx;
2347	struct skl_module_iface *iface = NULL;
2348	struct skl_module_res *res = NULL;
2349	int res_idx = mconfig->res_idx;
2350	int fmt_idx = mconfig->fmt_idx;
2351
2352	/*
2353	 * If the manifest structure contains no modules, fill all
2354	 * the module data to 0th index.
2355	 * res_idx and fmt_idx are default set to 0.
2356	 */
2357	if (skl->nr_modules == 0) {
2358		res = &mconfig->module->resources[res_idx];
2359		iface = &mconfig->module->formats[fmt_idx];
2360	}
2361
2362	if (tkn_elem->token > SKL_TKN_MAX)
2363		return -EINVAL;
2364
2365	switch (tkn_elem->token) {
2366	case SKL_TKN_U8_IN_QUEUE_COUNT:
2367		mconfig->module->max_input_pins = tkn_elem->value;
2368		break;
2369
2370	case SKL_TKN_U8_OUT_QUEUE_COUNT:
2371		mconfig->module->max_output_pins = tkn_elem->value;
2372		break;
2373
2374	case SKL_TKN_U8_DYN_IN_PIN:
2375		if (!mconfig->m_in_pin)
2376			mconfig->m_in_pin =
2377				devm_kcalloc(dev, MAX_IN_QUEUE,
2378					     sizeof(*mconfig->m_in_pin),
2379					     GFP_KERNEL);
2380		if (!mconfig->m_in_pin)
2381			return -ENOMEM;
2382
2383		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2384					      tkn_elem->value);
2385		break;
2386
2387	case SKL_TKN_U8_DYN_OUT_PIN:
2388		if (!mconfig->m_out_pin)
2389			mconfig->m_out_pin =
2390				devm_kcalloc(dev, MAX_IN_QUEUE,
2391					     sizeof(*mconfig->m_in_pin),
2392					     GFP_KERNEL);
2393		if (!mconfig->m_out_pin)
2394			return -ENOMEM;
2395
2396		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2397					      tkn_elem->value);
2398		break;
2399
2400	case SKL_TKN_U8_TIME_SLOT:
2401		mconfig->time_slot = tkn_elem->value;
2402		break;
2403
2404	case SKL_TKN_U8_CORE_ID:
2405		mconfig->core_id = tkn_elem->value;
2406		break;
2407
2408	case SKL_TKN_U8_MOD_TYPE:
2409		mconfig->m_type = tkn_elem->value;
2410		break;
2411
2412	case SKL_TKN_U8_DEV_TYPE:
2413		mconfig->dev_type = tkn_elem->value;
2414		break;
2415
2416	case SKL_TKN_U8_HW_CONN_TYPE:
2417		mconfig->hw_conn_type = tkn_elem->value;
2418		break;
2419
2420	case SKL_TKN_U16_MOD_INST_ID:
2421		mconfig->id.instance_id =
2422		tkn_elem->value;
2423		break;
2424
2425	case SKL_TKN_U32_MEM_PAGES:
2426	case SKL_TKN_U32_MAX_MCPS:
2427	case SKL_TKN_U32_OBS:
2428	case SKL_TKN_U32_IBS:
2429		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2430		if (ret < 0)
2431			return ret;
2432
2433		break;
2434
2435	case SKL_TKN_U32_VBUS_ID:
2436		mconfig->vbus_id = tkn_elem->value;
2437		break;
2438
2439	case SKL_TKN_U32_PARAMS_FIXUP:
2440		mconfig->params_fixup = tkn_elem->value;
2441		break;
2442
2443	case SKL_TKN_U32_CONVERTER:
2444		mconfig->converter = tkn_elem->value;
2445		break;
2446
2447	case SKL_TKN_U32_D0I3_CAPS:
2448		mconfig->d0i3_caps = tkn_elem->value;
2449		break;
2450
2451	case SKL_TKN_U32_PIPE_ID:
2452		ret = skl_tplg_add_pipe(dev,
2453				mconfig, skl, tkn_elem);
2454
2455		if (ret < 0) {
2456			if (ret == -EEXIST) {
2457				is_pipe_exists = 1;
2458				break;
2459			}
2460			return is_pipe_exists;
2461		}
2462
2463		break;
2464
2465	case SKL_TKN_U32_PIPE_CONFIG_ID:
2466		conf_idx = tkn_elem->value;
2467		break;
2468
2469	case SKL_TKN_U32_PIPE_CONN_TYPE:
2470	case SKL_TKN_U32_PIPE_PRIORITY:
2471	case SKL_TKN_U32_PIPE_MEM_PGS:
2472	case SKL_TKN_U32_PMODE:
2473	case SKL_TKN_U32_PIPE_DIRECTION:
2474	case SKL_TKN_U32_NUM_CONFIGS:
2475		if (is_pipe_exists) {
2476			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2477					tkn_elem->token, tkn_elem->value);
2478			if (ret < 0)
2479				return ret;
2480		}
2481
2482		break;
2483
2484	case SKL_TKN_U32_PATH_MEM_PGS:
2485	case SKL_TKN_U32_CFG_FREQ:
2486	case SKL_TKN_U8_CFG_CHAN:
2487	case SKL_TKN_U8_CFG_BPS:
2488		if (mconfig->pipe->nr_cfgs) {
2489			ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2490					tkn_elem->token, tkn_elem->value,
2491					conf_idx, dir);
2492			if (ret < 0)
2493				return ret;
2494		}
2495		break;
2496
2497	case SKL_TKN_CFG_MOD_RES_ID:
2498		mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2499		break;
2500
2501	case SKL_TKN_CFG_MOD_FMT_ID:
2502		mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2503		break;
2504
2505	/*
2506	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2507	 * direction and the pin count. The first four bits represent
2508	 * direction and next four the pin count.
2509	 */
2510	case SKL_TKN_U32_DIR_PIN_COUNT:
2511		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2512		pin_index = (tkn_elem->value &
2513			SKL_PIN_COUNT_MASK) >> 4;
2514
2515		break;
2516
2517	case SKL_TKN_U32_FMT_CH:
2518	case SKL_TKN_U32_FMT_FREQ:
2519	case SKL_TKN_U32_FMT_BIT_DEPTH:
2520	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2521	case SKL_TKN_U32_FMT_CH_CONFIG:
2522	case SKL_TKN_U32_FMT_INTERLEAVE:
2523	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2524	case SKL_TKN_U32_FMT_CH_MAP:
2525		ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2526				tkn_elem->value, dir, pin_index);
2527
2528		if (ret < 0)
2529			return ret;
2530
2531		break;
2532
2533	case SKL_TKN_U32_PIN_MOD_ID:
2534	case SKL_TKN_U32_PIN_INST_ID:
2535	case SKL_TKN_UUID:
2536		ret = skl_tplg_fill_pins_info(dev,
2537				mconfig, tkn_elem, dir,
2538				pin_index);
2539		if (ret < 0)
2540			return ret;
2541
2542		break;
2543
2544	case SKL_TKN_U32_FMT_CFG_IDX:
2545		if (tkn_elem->value > SKL_MAX_PARAMS_TYPES)
2546			return -EINVAL;
2547
2548		mconfig->fmt_cfg_idx = tkn_elem->value;
2549		break;
2550
2551	case SKL_TKN_U32_CAPS_SIZE:
2552		mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size =
2553			tkn_elem->value;
2554
2555		break;
2556
2557	case SKL_TKN_U32_CAPS_SET_PARAMS:
2558		mconfig->formats_config[mconfig->fmt_cfg_idx].set_params =
2559				tkn_elem->value;
2560		break;
2561
2562	case SKL_TKN_U32_CAPS_PARAMS_ID:
2563		mconfig->formats_config[mconfig->fmt_cfg_idx].param_id =
2564				tkn_elem->value;
2565		break;
2566
2567	case SKL_TKN_U32_PROC_DOMAIN:
2568		mconfig->domain =
2569			tkn_elem->value;
2570
2571		break;
2572
2573	case SKL_TKN_U32_DMA_BUF_SIZE:
2574		mconfig->dma_buffer_size = tkn_elem->value;
2575		break;
2576
2577	case SKL_TKN_U8_IN_PIN_TYPE:
2578	case SKL_TKN_U8_OUT_PIN_TYPE:
2579	case SKL_TKN_U8_CONN_TYPE:
2580		break;
2581
2582	default:
2583		dev_err(dev, "Token %d not handled\n",
2584				tkn_elem->token);
2585		return -EINVAL;
2586	}
2587
2588	tkn_count++;
2589
2590	return tkn_count;
2591}
2592
2593/*
2594 * Parse the vendor array for specific tokens to construct
2595 * module private data
2596 */
2597static int skl_tplg_get_tokens(struct device *dev,
2598		char *pvt_data,	struct skl_dev *skl,
2599		struct skl_module_cfg *mconfig, int block_size)
2600{
2601	struct snd_soc_tplg_vendor_array *array;
2602	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2603	int tkn_count = 0, ret;
2604	int off = 0, tuple_size = 0;
2605	bool is_module_guid = true;
2606
2607	if (block_size <= 0)
2608		return -EINVAL;
2609
2610	while (tuple_size < block_size) {
2611		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2612
2613		off += array->size;
2614
2615		switch (array->type) {
2616		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2617			dev_warn(dev, "no string tokens expected for skl tplg\n");
2618			continue;
2619
2620		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2621			if (is_module_guid) {
2622				ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
2623							array->uuid);
2624				is_module_guid = false;
2625			} else {
2626				ret = skl_tplg_get_token(dev, array->value, skl,
2627							 mconfig);
2628			}
2629
2630			if (ret < 0)
2631				return ret;
2632
2633			tuple_size += sizeof(*array->uuid);
2634
2635			continue;
2636
2637		default:
2638			tkn_elem = array->value;
2639			tkn_count = 0;
2640			break;
2641		}
2642
2643		while (tkn_count <= (array->num_elems - 1)) {
2644			ret = skl_tplg_get_token(dev, tkn_elem,
2645					skl, mconfig);
2646
2647			if (ret < 0)
2648				return ret;
2649
2650			tkn_count = tkn_count + ret;
2651			tkn_elem++;
2652		}
2653
2654		tuple_size += tkn_count * sizeof(*tkn_elem);
2655	}
2656
2657	return off;
2658}
2659
2660/*
2661 * Every data block is preceded by a descriptor to read the number
2662 * of data blocks, they type of the block and it's size
2663 */
2664static int skl_tplg_get_desc_blocks(struct device *dev,
2665		struct snd_soc_tplg_vendor_array *array)
2666{
2667	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2668
2669	tkn_elem = array->value;
2670
2671	switch (tkn_elem->token) {
2672	case SKL_TKN_U8_NUM_BLOCKS:
2673	case SKL_TKN_U8_BLOCK_TYPE:
2674	case SKL_TKN_U16_BLOCK_SIZE:
2675		return tkn_elem->value;
2676
2677	default:
2678		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2679		break;
2680	}
2681
2682	return -EINVAL;
2683}
2684
2685/* Functions to parse private data from configuration file format v4 */
2686
2687/*
2688 * Add pipeline from topology binary into driver pipeline list
2689 *
2690 * If already added we return that instance
2691 * Otherwise we create a new instance and add into driver list
2692 */
2693static int skl_tplg_add_pipe_v4(struct device *dev,
2694			struct skl_module_cfg *mconfig, struct skl_dev *skl,
2695			struct skl_dfw_v4_pipe *dfw_pipe)
2696{
2697	struct skl_pipeline *ppl;
2698	struct skl_pipe *pipe;
2699	struct skl_pipe_params *params;
2700
2701	list_for_each_entry(ppl, &skl->ppl_list, node) {
2702		if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2703			mconfig->pipe = ppl->pipe;
2704			return 0;
2705		}
2706	}
2707
2708	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2709	if (!ppl)
2710		return -ENOMEM;
2711
2712	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2713	if (!pipe)
2714		return -ENOMEM;
2715
2716	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2717	if (!params)
2718		return -ENOMEM;
2719
2720	pipe->ppl_id = dfw_pipe->pipe_id;
2721	pipe->memory_pages = dfw_pipe->memory_pages;
2722	pipe->pipe_priority = dfw_pipe->pipe_priority;
2723	pipe->conn_type = dfw_pipe->conn_type;
2724	pipe->state = SKL_PIPE_INVALID;
2725	pipe->p_params = params;
2726	INIT_LIST_HEAD(&pipe->w_list);
2727
2728	ppl->pipe = pipe;
2729	list_add(&ppl->node, &skl->ppl_list);
2730
2731	mconfig->pipe = pipe;
2732
2733	return 0;
2734}
2735
2736static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2737					struct skl_module_pin *m_pin,
2738					bool is_dynamic, int max_pin)
2739{
2740	int i;
2741
2742	for (i = 0; i < max_pin; i++) {
2743		m_pin[i].id.module_id = dfw_pin[i].module_id;
2744		m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2745		m_pin[i].in_use = false;
2746		m_pin[i].is_dynamic = is_dynamic;
2747		m_pin[i].pin_state = SKL_PIN_UNBIND;
2748	}
2749}
2750
2751static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2752				 struct skl_dfw_v4_module_fmt *src_fmt,
2753				 int pins)
2754{
2755	int i;
2756
2757	for (i = 0; i < pins; i++) {
2758		dst_fmt[i].fmt.channels  = src_fmt[i].channels;
2759		dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2760		dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2761		dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2762		dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2763		dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2764		dst_fmt[i].fmt.interleaving_style =
2765						src_fmt[i].interleaving_style;
2766		dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2767	}
2768}
2769
2770static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2771				    struct skl_dev *skl, struct device *dev,
2772				    struct skl_module_cfg *mconfig)
2773{
2774	struct skl_dfw_v4_module *dfw =
2775				(struct skl_dfw_v4_module *)tplg_w->priv.data;
2776	int ret;
2777	int idx = mconfig->fmt_cfg_idx;
2778
2779	dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2780
2781	ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2782	if (ret)
2783		return ret;
2784	mconfig->id.module_id = -1;
2785	mconfig->id.instance_id = dfw->instance_id;
2786	mconfig->module->resources[0].cpc = dfw->max_mcps / 1000;
2787	mconfig->module->resources[0].ibs = dfw->ibs;
2788	mconfig->module->resources[0].obs = dfw->obs;
2789	mconfig->core_id = dfw->core_id;
2790	mconfig->module->max_input_pins = dfw->max_in_queue;
2791	mconfig->module->max_output_pins = dfw->max_out_queue;
2792	mconfig->module->loadable = dfw->is_loadable;
2793	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2794			     MAX_IN_QUEUE);
2795	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2796			     MAX_OUT_QUEUE);
2797
2798	mconfig->params_fixup = dfw->params_fixup;
2799	mconfig->converter = dfw->converter;
2800	mconfig->m_type = dfw->module_type;
2801	mconfig->vbus_id = dfw->vbus_id;
2802	mconfig->module->resources[0].is_pages = dfw->mem_pages;
2803
2804	ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2805	if (ret)
2806		return ret;
2807
2808	mconfig->dev_type = dfw->dev_type;
2809	mconfig->hw_conn_type = dfw->hw_conn_type;
2810	mconfig->time_slot = dfw->time_slot;
2811	mconfig->formats_config[idx].caps_size = dfw->caps.caps_size;
2812
2813	mconfig->m_in_pin = devm_kcalloc(dev,
2814				MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2815				GFP_KERNEL);
2816	if (!mconfig->m_in_pin)
2817		return -ENOMEM;
2818
2819	mconfig->m_out_pin = devm_kcalloc(dev,
2820				MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2821				GFP_KERNEL);
2822	if (!mconfig->m_out_pin)
2823		return -ENOMEM;
2824
2825	skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2826				    dfw->is_dynamic_in_pin,
2827				    mconfig->module->max_input_pins);
2828	skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2829				    dfw->is_dynamic_out_pin,
2830				    mconfig->module->max_output_pins);
2831
2832	if (mconfig->formats_config[idx].caps_size) {
2833		mconfig->formats_config[idx].set_params = dfw->caps.set_params;
2834		mconfig->formats_config[idx].param_id = dfw->caps.param_id;
2835		mconfig->formats_config[idx].caps =
2836		devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2837			     GFP_KERNEL);
2838		if (!mconfig->formats_config[idx].caps)
2839			return -ENOMEM;
2840		memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps,
2841		       dfw->caps.caps_size);
2842	}
2843
2844	return 0;
2845}
2846
2847static int skl_tplg_get_caps_data(struct device *dev, char *data,
2848				  struct skl_module_cfg *mconfig)
2849{
2850	int idx = mconfig->fmt_cfg_idx;
2851
2852	if (mconfig->formats_config[idx].caps_size > 0) {
2853		mconfig->formats_config[idx].caps =
2854			devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2855				     GFP_KERNEL);
2856		if (!mconfig->formats_config[idx].caps)
2857			return -ENOMEM;
2858		memcpy(mconfig->formats_config[idx].caps, data,
2859		       mconfig->formats_config[idx].caps_size);
2860	}
2861
2862	return mconfig->formats_config[idx].caps_size;
2863}
2864
2865/*
2866 * Parse the private data for the token and corresponding value.
2867 * The private data can have multiple data blocks. So, a data block
2868 * is preceded by a descriptor for number of blocks and a descriptor
2869 * for the type and size of the suceeding data block.
2870 */
2871static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2872				struct skl_dev *skl, struct device *dev,
2873				struct skl_module_cfg *mconfig)
2874{
2875	struct snd_soc_tplg_vendor_array *array;
2876	int num_blocks, block_size, block_type, off = 0;
2877	char *data;
2878	int ret;
2879
2880	/*
2881	 * v4 configuration files have a valid UUID at the start of
2882	 * the widget's private data.
2883	 */
2884	if (uuid_is_valid((char *)tplg_w->priv.data))
2885		return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2886
2887	/* Read the NUM_DATA_BLOCKS descriptor */
2888	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2889	ret = skl_tplg_get_desc_blocks(dev, array);
2890	if (ret < 0)
2891		return ret;
2892	num_blocks = ret;
2893
2894	off += array->size;
2895	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2896	while (num_blocks > 0) {
2897		array = (struct snd_soc_tplg_vendor_array *)
2898				(tplg_w->priv.data + off);
2899
2900		ret = skl_tplg_get_desc_blocks(dev, array);
2901
2902		if (ret < 0)
2903			return ret;
2904		block_type = ret;
2905		off += array->size;
2906
2907		array = (struct snd_soc_tplg_vendor_array *)
2908			(tplg_w->priv.data + off);
2909
2910		ret = skl_tplg_get_desc_blocks(dev, array);
2911
2912		if (ret < 0)
2913			return ret;
2914		block_size = ret;
2915		off += array->size;
2916
2917		data = (tplg_w->priv.data + off);
2918
2919		if (block_type == SKL_TYPE_TUPLE) {
2920			ret = skl_tplg_get_tokens(dev, data,
2921					skl, mconfig, block_size);
2922		} else {
2923			ret = skl_tplg_get_caps_data(dev, data, mconfig);
2924		}
2925
2926		if (ret < 0)
2927			return ret;
2928
2929		--num_blocks;
2930		off += ret;
2931	}
2932
2933	return 0;
2934}
2935
2936static void skl_clear_pin_config(struct snd_soc_component *component,
2937				struct snd_soc_dapm_widget *w)
2938{
2939	int i;
2940	struct skl_module_cfg *mconfig;
2941	struct skl_pipe *pipe;
2942
2943	if (!strncmp(w->dapm->component->name, component->name,
2944					strlen(component->name))) {
2945		mconfig = w->priv;
2946		pipe = mconfig->pipe;
2947		for (i = 0; i < mconfig->module->max_input_pins; i++) {
2948			mconfig->m_in_pin[i].in_use = false;
2949			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2950		}
2951		for (i = 0; i < mconfig->module->max_output_pins; i++) {
2952			mconfig->m_out_pin[i].in_use = false;
2953			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2954		}
2955		pipe->state = SKL_PIPE_INVALID;
2956		mconfig->m_state = SKL_MODULE_UNINIT;
2957	}
2958}
2959
2960void skl_cleanup_resources(struct skl_dev *skl)
2961{
2962	struct snd_soc_component *soc_component = skl->component;
2963	struct snd_soc_dapm_widget *w;
2964	struct snd_soc_card *card;
2965
2966	if (soc_component == NULL)
2967		return;
2968
2969	card = soc_component->card;
2970	if (!snd_soc_card_is_instantiated(card))
2971		return;
2972
2973	list_for_each_entry(w, &card->widgets, list) {
2974		if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL)
2975			skl_clear_pin_config(soc_component, w);
2976	}
2977
2978	skl_clear_module_cnt(skl->dsp);
2979}
2980
2981/*
2982 * Topology core widget load callback
2983 *
2984 * This is used to save the private data for each widget which gives
2985 * information to the driver about module and pipeline parameters which DSP
2986 * FW expects like ids, resource values, formats etc
2987 */
2988static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
2989				struct snd_soc_dapm_widget *w,
2990				struct snd_soc_tplg_dapm_widget *tplg_w)
2991{
2992	int ret;
2993	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
2994	struct skl_dev *skl = bus_to_skl(bus);
2995	struct skl_module_cfg *mconfig;
2996
2997	if (!tplg_w->priv.size)
2998		goto bind_event;
2999
3000	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3001
3002	if (!mconfig)
3003		return -ENOMEM;
3004
3005	if (skl->nr_modules == 0) {
3006		mconfig->module = devm_kzalloc(bus->dev,
3007				sizeof(*mconfig->module), GFP_KERNEL);
3008		if (!mconfig->module)
3009			return -ENOMEM;
3010	}
3011
3012	w->priv = mconfig;
3013
3014	/*
3015	 * module binary can be loaded later, so set it to query when
3016	 * module is load for a use case
3017	 */
3018	mconfig->id.module_id = -1;
3019
3020	/* To provide backward compatibility, set default as SKL_PARAM_INIT */
3021	mconfig->fmt_cfg_idx = SKL_PARAM_INIT;
3022
3023	/* Parse private data for tuples */
3024	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3025	if (ret < 0)
3026		return ret;
3027
3028	skl_debug_init_module(skl->debugfs, w, mconfig);
3029
3030bind_event:
3031	if (tplg_w->event_type == 0) {
3032		dev_dbg(bus->dev, "ASoC: No event handler required\n");
3033		return 0;
3034	}
3035
3036	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3037					ARRAY_SIZE(skl_tplg_widget_ops),
3038					tplg_w->event_type);
3039
3040	if (ret) {
3041		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3042					__func__, tplg_w->event_type);
3043		return -EINVAL;
3044	}
3045
3046	return 0;
3047}
3048
3049static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3050					struct snd_soc_tplg_bytes_control *bc)
3051{
3052	struct skl_algo_data *ac;
3053	struct skl_dfw_algo_data *dfw_ac =
3054				(struct skl_dfw_algo_data *)bc->priv.data;
3055
3056	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3057	if (!ac)
3058		return -ENOMEM;
3059
3060	/* Fill private data */
3061	ac->max = dfw_ac->max;
3062	ac->param_id = dfw_ac->param_id;
3063	ac->set_params = dfw_ac->set_params;
3064	ac->size = dfw_ac->max;
3065
3066	if (ac->max) {
3067		ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
3068		if (!ac->params)
3069			return -ENOMEM;
3070
3071		memcpy(ac->params, dfw_ac->params, ac->max);
3072	}
3073
3074	be->dobj.private  = ac;
3075	return 0;
3076}
3077
3078static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3079				struct snd_soc_tplg_enum_control *ec)
3080{
3081
3082	void *data;
3083
3084	if (ec->priv.size) {
3085		data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3086		if (!data)
3087			return -ENOMEM;
3088		memcpy(data, ec->priv.data, ec->priv.size);
3089		se->dobj.private = data;
3090	}
3091
3092	return 0;
3093
3094}
3095
3096static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3097				int index,
3098				struct snd_kcontrol_new *kctl,
3099				struct snd_soc_tplg_ctl_hdr *hdr)
3100{
3101	struct soc_bytes_ext *sb;
3102	struct snd_soc_tplg_bytes_control *tplg_bc;
3103	struct snd_soc_tplg_enum_control *tplg_ec;
3104	struct hdac_bus *bus  = snd_soc_component_get_drvdata(cmpnt);
3105	struct soc_enum *se;
3106
3107	switch (hdr->ops.info) {
3108	case SND_SOC_TPLG_CTL_BYTES:
3109		tplg_bc = container_of(hdr,
3110				struct snd_soc_tplg_bytes_control, hdr);
3111		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3112			sb = (struct soc_bytes_ext *)kctl->private_value;
3113			if (tplg_bc->priv.size)
3114				return skl_init_algo_data(
3115						bus->dev, sb, tplg_bc);
3116		}
3117		break;
3118
3119	case SND_SOC_TPLG_CTL_ENUM:
3120		tplg_ec = container_of(hdr,
3121				struct snd_soc_tplg_enum_control, hdr);
3122		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
3123			se = (struct soc_enum *)kctl->private_value;
3124			if (tplg_ec->priv.size)
3125				skl_init_enum_data(bus->dev, se, tplg_ec);
3126		}
3127
3128		/*
3129		 * now that the control initializations are done, remove
3130		 * write permission for the DMIC configuration enums to
3131		 * avoid conflicts between NHLT settings and user interaction
3132		 */
3133
3134		if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
3135			kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
3136
3137		break;
3138
3139	default:
3140		dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3141			hdr->ops.get, hdr->ops.put, hdr->ops.info);
3142		break;
3143	}
3144
3145	return 0;
3146}
3147
3148static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3149		struct snd_soc_tplg_vendor_string_elem *str_elem,
3150		struct skl_dev *skl)
3151{
3152	int tkn_count = 0;
3153	static int ref_count;
3154
3155	switch (str_elem->token) {
3156	case SKL_TKN_STR_LIB_NAME:
3157		if (ref_count > skl->lib_count - 1) {
3158			ref_count = 0;
3159			return -EINVAL;
3160		}
3161
3162		strncpy(skl->lib_info[ref_count].name,
3163			str_elem->string,
3164			ARRAY_SIZE(skl->lib_info[ref_count].name));
3165		ref_count++;
3166		break;
3167
3168	default:
3169		dev_err(dev, "Not a string token %d\n", str_elem->token);
3170		break;
3171	}
3172	tkn_count++;
3173
3174	return tkn_count;
3175}
3176
3177static int skl_tplg_get_str_tkn(struct device *dev,
3178		struct snd_soc_tplg_vendor_array *array,
3179		struct skl_dev *skl)
3180{
3181	int tkn_count = 0, ret;
3182	struct snd_soc_tplg_vendor_string_elem *str_elem;
3183
3184	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3185	while (tkn_count < array->num_elems) {
3186		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3187		str_elem++;
3188
3189		if (ret < 0)
3190			return ret;
3191
3192		tkn_count = tkn_count + ret;
3193	}
3194
3195	return tkn_count;
3196}
3197
3198static int skl_tplg_manifest_fill_fmt(struct device *dev,
3199		struct skl_module_iface *fmt,
3200		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3201		u32 dir, int fmt_idx)
3202{
3203	struct skl_module_pin_fmt *dst_fmt;
3204	struct skl_module_fmt *mod_fmt;
3205	int ret;
3206
3207	if (!fmt)
3208		return -EINVAL;
3209
3210	switch (dir) {
3211	case SKL_DIR_IN:
3212		dst_fmt = &fmt->inputs[fmt_idx];
3213		break;
3214
3215	case SKL_DIR_OUT:
3216		dst_fmt = &fmt->outputs[fmt_idx];
3217		break;
3218
3219	default:
3220		dev_err(dev, "Invalid direction: %d\n", dir);
3221		return -EINVAL;
3222	}
3223
3224	mod_fmt = &dst_fmt->fmt;
3225
3226	switch (tkn_elem->token) {
3227	case SKL_TKN_MM_U32_INTF_PIN_ID:
3228		dst_fmt->id = tkn_elem->value;
3229		break;
3230
3231	default:
3232		ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3233					tkn_elem->value);
3234		if (ret < 0)
3235			return ret;
3236		break;
3237	}
3238
3239	return 0;
3240}
3241
3242static int skl_tplg_fill_mod_info(struct device *dev,
3243		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3244		struct skl_module *mod)
3245{
3246
3247	if (!mod)
3248		return -EINVAL;
3249
3250	switch (tkn_elem->token) {
3251	case SKL_TKN_U8_IN_PIN_TYPE:
3252		mod->input_pin_type = tkn_elem->value;
3253		break;
3254
3255	case SKL_TKN_U8_OUT_PIN_TYPE:
3256		mod->output_pin_type = tkn_elem->value;
3257		break;
3258
3259	case SKL_TKN_U8_IN_QUEUE_COUNT:
3260		mod->max_input_pins = tkn_elem->value;
3261		break;
3262
3263	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3264		mod->max_output_pins = tkn_elem->value;
3265		break;
3266
3267	case SKL_TKN_MM_U8_NUM_RES:
3268		mod->nr_resources = tkn_elem->value;
3269		break;
3270
3271	case SKL_TKN_MM_U8_NUM_INTF:
3272		mod->nr_interfaces = tkn_elem->value;
3273		break;
3274
3275	default:
3276		dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3277		return -EINVAL;
3278	}
3279
3280	return 0;
3281}
3282
3283
3284static int skl_tplg_get_int_tkn(struct device *dev,
3285		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3286		struct skl_dev *skl)
3287{
3288	int tkn_count = 0, ret;
3289	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3290	struct skl_module_res *res = NULL;
3291	struct skl_module_iface *fmt = NULL;
3292	struct skl_module *mod = NULL;
3293	static struct skl_astate_param *astate_table;
3294	static int astate_cfg_idx, count;
3295	int i;
3296	size_t size;
3297
3298	if (skl->modules) {
3299		mod = skl->modules[mod_idx];
3300		res = &mod->resources[res_val_idx];
3301		fmt = &mod->formats[intf_val_idx];
3302	}
3303
3304	switch (tkn_elem->token) {
3305	case SKL_TKN_U32_LIB_COUNT:
3306		skl->lib_count = tkn_elem->value;
3307		break;
3308
3309	case SKL_TKN_U8_NUM_MOD:
3310		skl->nr_modules = tkn_elem->value;
3311		skl->modules = devm_kcalloc(dev, skl->nr_modules,
3312				sizeof(*skl->modules), GFP_KERNEL);
3313		if (!skl->modules)
3314			return -ENOMEM;
3315
3316		for (i = 0; i < skl->nr_modules; i++) {
3317			skl->modules[i] = devm_kzalloc(dev,
3318					sizeof(struct skl_module), GFP_KERNEL);
3319			if (!skl->modules[i])
3320				return -ENOMEM;
3321		}
3322		break;
3323
3324	case SKL_TKN_MM_U8_MOD_IDX:
3325		mod_idx = tkn_elem->value;
3326		break;
3327
3328	case SKL_TKN_U32_ASTATE_COUNT:
3329		if (astate_table != NULL) {
3330			dev_err(dev, "More than one entry for A-State count");
3331			return -EINVAL;
3332		}
3333
3334		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3335			dev_err(dev, "Invalid A-State count %d\n",
3336				tkn_elem->value);
3337			return -EINVAL;
3338		}
3339
3340		size = struct_size(skl->cfg.astate_cfg, astate_table,
3341				   tkn_elem->value);
3342		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3343		if (!skl->cfg.astate_cfg)
3344			return -ENOMEM;
3345
3346		astate_table = skl->cfg.astate_cfg->astate_table;
3347		count = skl->cfg.astate_cfg->count = tkn_elem->value;
3348		break;
3349
3350	case SKL_TKN_U32_ASTATE_IDX:
3351		if (tkn_elem->value >= count) {
3352			dev_err(dev, "Invalid A-State index %d\n",
3353				tkn_elem->value);
3354			return -EINVAL;
3355		}
3356
3357		astate_cfg_idx = tkn_elem->value;
3358		break;
3359
3360	case SKL_TKN_U32_ASTATE_KCPS:
3361		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3362		break;
3363
3364	case SKL_TKN_U32_ASTATE_CLK_SRC:
3365		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3366		break;
3367
3368	case SKL_TKN_U8_IN_PIN_TYPE:
3369	case SKL_TKN_U8_OUT_PIN_TYPE:
3370	case SKL_TKN_U8_IN_QUEUE_COUNT:
3371	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3372	case SKL_TKN_MM_U8_NUM_RES:
3373	case SKL_TKN_MM_U8_NUM_INTF:
3374		ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3375		if (ret < 0)
3376			return ret;
3377		break;
3378
3379	case SKL_TKN_U32_DIR_PIN_COUNT:
3380		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3381		pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3382		break;
3383
3384	case SKL_TKN_MM_U32_RES_ID:
3385		if (!res)
3386			return -EINVAL;
3387
3388		res->id = tkn_elem->value;
3389		res_val_idx = tkn_elem->value;
3390		break;
3391
3392	case SKL_TKN_MM_U32_FMT_ID:
3393		if (!fmt)
3394			return -EINVAL;
3395
3396		fmt->fmt_idx = tkn_elem->value;
3397		intf_val_idx = tkn_elem->value;
3398		break;
3399
3400	case SKL_TKN_MM_U32_CPS:
3401	case SKL_TKN_MM_U32_DMA_SIZE:
3402	case SKL_TKN_MM_U32_CPC:
3403	case SKL_TKN_U32_MEM_PAGES:
3404	case SKL_TKN_U32_OBS:
3405	case SKL_TKN_U32_IBS:
3406	case SKL_TKN_MM_U32_RES_PIN_ID:
3407	case SKL_TKN_MM_U32_PIN_BUF:
3408		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3409		if (ret < 0)
3410			return ret;
3411
3412		break;
3413
3414	case SKL_TKN_MM_U32_NUM_IN_FMT:
3415		if (!fmt)
3416			return -EINVAL;
3417
3418		res->nr_input_pins = tkn_elem->value;
3419		break;
3420
3421	case SKL_TKN_MM_U32_NUM_OUT_FMT:
3422		if (!fmt)
3423			return -EINVAL;
3424
3425		res->nr_output_pins = tkn_elem->value;
3426		break;
3427
3428	case SKL_TKN_U32_FMT_CH:
3429	case SKL_TKN_U32_FMT_FREQ:
3430	case SKL_TKN_U32_FMT_BIT_DEPTH:
3431	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3432	case SKL_TKN_U32_FMT_CH_CONFIG:
3433	case SKL_TKN_U32_FMT_INTERLEAVE:
3434	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3435	case SKL_TKN_U32_FMT_CH_MAP:
3436	case SKL_TKN_MM_U32_INTF_PIN_ID:
3437		ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3438						 dir, pin_idx);
3439		if (ret < 0)
3440			return ret;
3441		break;
3442
3443	default:
3444		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3445		return -EINVAL;
3446	}
3447	tkn_count++;
3448
3449	return tkn_count;
3450}
3451
3452/*
3453 * Fill the manifest structure by parsing the tokens based on the
3454 * type.
3455 */
3456static int skl_tplg_get_manifest_tkn(struct device *dev,
3457		char *pvt_data, struct skl_dev *skl,
3458		int block_size)
3459{
3460	int tkn_count = 0, ret;
3461	int off = 0, tuple_size = 0;
3462	u8 uuid_index = 0;
3463	struct snd_soc_tplg_vendor_array *array;
3464	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3465
3466	if (block_size <= 0)
3467		return -EINVAL;
3468
3469	while (tuple_size < block_size) {
3470		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3471		off += array->size;
3472		switch (array->type) {
3473		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3474			ret = skl_tplg_get_str_tkn(dev, array, skl);
3475
3476			if (ret < 0)
3477				return ret;
3478			tkn_count = ret;
3479
3480			tuple_size += tkn_count *
3481				sizeof(struct snd_soc_tplg_vendor_string_elem);
3482			continue;
3483
3484		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3485			if (array->uuid->token != SKL_TKN_UUID) {
3486				dev_err(dev, "Not an UUID token: %d\n",
3487					array->uuid->token);
3488				return -EINVAL;
3489			}
3490			if (uuid_index >= skl->nr_modules) {
3491				dev_err(dev, "Too many UUID tokens\n");
3492				return -EINVAL;
3493			}
3494			import_guid(&skl->modules[uuid_index++]->uuid,
3495				    array->uuid->uuid);
3496
3497			tuple_size += sizeof(*array->uuid);
3498			continue;
3499
3500		default:
3501			tkn_elem = array->value;
3502			tkn_count = 0;
3503			break;
3504		}
3505
3506		while (tkn_count <= array->num_elems - 1) {
3507			ret = skl_tplg_get_int_tkn(dev,
3508					tkn_elem, skl);
3509			if (ret < 0)
3510				return ret;
3511
3512			tkn_count = tkn_count + ret;
3513			tkn_elem++;
3514		}
3515		tuple_size += (tkn_count * sizeof(*tkn_elem));
3516		tkn_count = 0;
3517	}
3518
3519	return off;
3520}
3521
3522/*
3523 * Parse manifest private data for tokens. The private data block is
3524 * preceded by descriptors for type and size of data block.
3525 */
3526static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3527			struct device *dev, struct skl_dev *skl)
3528{
3529	struct snd_soc_tplg_vendor_array *array;
3530	int num_blocks, block_size = 0, block_type, off = 0;
3531	char *data;
3532	int ret;
3533
3534	/* Read the NUM_DATA_BLOCKS descriptor */
3535	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3536	ret = skl_tplg_get_desc_blocks(dev, array);
3537	if (ret < 0)
3538		return ret;
3539	num_blocks = ret;
3540
3541	off += array->size;
3542	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3543	while (num_blocks > 0) {
3544		array = (struct snd_soc_tplg_vendor_array *)
3545				(manifest->priv.data + off);
3546		ret = skl_tplg_get_desc_blocks(dev, array);
3547
3548		if (ret < 0)
3549			return ret;
3550		block_type = ret;
3551		off += array->size;
3552
3553		array = (struct snd_soc_tplg_vendor_array *)
3554			(manifest->priv.data + off);
3555
3556		ret = skl_tplg_get_desc_blocks(dev, array);
3557
3558		if (ret < 0)
3559			return ret;
3560		block_size = ret;
3561		off += array->size;
3562
3563		data = (manifest->priv.data + off);
3564
3565		if (block_type == SKL_TYPE_TUPLE) {
3566			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3567					block_size);
3568
3569			if (ret < 0)
3570				return ret;
3571
3572			--num_blocks;
3573		} else {
3574			return -EINVAL;
3575		}
3576		off += ret;
3577	}
3578
3579	return 0;
3580}
3581
3582static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3583				struct snd_soc_tplg_manifest *manifest)
3584{
3585	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3586	struct skl_dev *skl = bus_to_skl(bus);
3587
3588	/* proceed only if we have private data defined */
3589	if (manifest->priv.size == 0)
3590		return 0;
3591
3592	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3593
3594	if (skl->lib_count > SKL_MAX_LIB) {
3595		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3596					skl->lib_count);
3597		return  -EINVAL;
3598	}
3599
3600	return 0;
3601}
3602
3603static int skl_tplg_complete(struct snd_soc_component *component)
3604{
3605	struct snd_soc_dobj *dobj;
3606	struct snd_soc_acpi_mach *mach;
3607	struct snd_ctl_elem_value *val;
3608	int i;
3609
3610	val = kmalloc(sizeof(*val), GFP_KERNEL);
3611	if (!val)
3612		return -ENOMEM;
3613
3614	mach = dev_get_platdata(component->card->dev);
3615	list_for_each_entry(dobj, &component->dobj_list, list) {
3616		struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
3617		struct soc_enum *se;
3618		char **texts;
3619		char chan_text[4];
3620
3621		if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
3622		    kcontrol->put != skl_tplg_multi_config_set_dmic)
3623			continue;
3624
3625		se = (struct soc_enum *)kcontrol->private_value;
3626		texts = dobj->control.dtexts;
3627		sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
3628
3629		for (i = 0; i < se->items; i++) {
3630			if (strstr(texts[i], chan_text)) {
3631				memset(val, 0, sizeof(*val));
3632				val->value.enumerated.item[0] = i;
3633				kcontrol->put(kcontrol, val);
3634			}
3635		}
3636	}
3637
3638	kfree(val);
3639	return 0;
3640}
3641
3642static struct snd_soc_tplg_ops skl_tplg_ops  = {
3643	.widget_load = skl_tplg_widget_load,
3644	.control_load = skl_tplg_control_load,
3645	.bytes_ext_ops = skl_tlv_ops,
3646	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3647	.io_ops = skl_tplg_kcontrol_ops,
3648	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3649	.manifest = skl_manifest_load,
3650	.dai_load = skl_dai_load,
3651	.complete = skl_tplg_complete,
3652};
3653
3654/*
3655 * A pipe can have multiple modules, each of them will be a DAPM widget as
3656 * well. While managing a pipeline we need to get the list of all the
3657 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3658 * helps to get the SKL type widgets in that pipeline
3659 */
3660static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3661{
3662	struct snd_soc_dapm_widget *w;
3663	struct skl_module_cfg *mcfg = NULL;
3664	struct skl_pipe_module *p_module = NULL;
3665	struct skl_pipe *pipe;
3666
3667	list_for_each_entry(w, &component->card->widgets, list) {
3668		if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3669			mcfg = w->priv;
3670			pipe = mcfg->pipe;
3671
3672			p_module = devm_kzalloc(component->dev,
3673						sizeof(*p_module), GFP_KERNEL);
3674			if (!p_module)
3675				return -ENOMEM;
3676
3677			p_module->w = w;
3678			list_add_tail(&p_module->node, &pipe->w_list);
3679		}
3680	}
3681
3682	return 0;
3683}
3684
3685static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe)
3686{
3687	struct skl_pipe_module *w_module;
3688	struct snd_soc_dapm_widget *w;
3689	struct skl_module_cfg *mconfig;
3690	bool host_found = false, link_found = false;
3691
3692	list_for_each_entry(w_module, &pipe->w_list, node) {
3693		w = w_module->w;
3694		mconfig = w->priv;
3695
3696		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3697			host_found = true;
3698		else if (mconfig->dev_type != SKL_DEVICE_NONE)
3699			link_found = true;
3700	}
3701
3702	if (host_found && link_found)
3703		pipe->passthru = true;
3704	else
3705		pipe->passthru = false;
3706}
3707
3708/*
3709 * SKL topology init routine
3710 */
3711int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3712{
3713	int ret;
3714	const struct firmware *fw;
3715	struct skl_dev *skl = bus_to_skl(bus);
3716	struct skl_pipeline *ppl;
3717
3718	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3719	if (ret < 0) {
3720		char alt_tplg_name[64];
3721
3722		snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
3723			 skl->mach->drv_name);
3724		dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
3725			 skl->tplg_name, ret, alt_tplg_name);
3726
3727		ret = request_firmware(&fw, alt_tplg_name, bus->dev);
3728		if (!ret)
3729			goto component_load;
3730
3731		dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
3732			 alt_tplg_name, ret);
3733
3734		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3735		if (ret < 0) {
3736			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3737					"dfw_sst.bin", ret);
3738			return ret;
3739		}
3740	}
3741
3742component_load:
3743	ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw);
3744	if (ret < 0) {
3745		dev_err(bus->dev, "tplg component load failed%d\n", ret);
3746		goto err;
3747	}
3748
3749	ret = skl_tplg_create_pipe_widget_list(component);
3750	if (ret < 0) {
3751		dev_err(bus->dev, "tplg create pipe widget list failed%d\n",
3752				ret);
3753		goto err;
3754	}
3755
3756	list_for_each_entry(ppl, &skl->ppl_list, node)
3757		skl_tplg_set_pipe_type(skl, ppl->pipe);
3758
3759err:
3760	release_firmware(fw);
3761	return ret;
3762}
3763
3764void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
3765{
3766	struct skl_dev *skl = bus_to_skl(bus);
3767	struct skl_pipeline *ppl, *tmp;
3768
3769	list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
3770		list_del(&ppl->node);
3771
3772	/* clean up topology */
3773	snd_soc_tplg_component_remove(component);
3774}
3775