1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
4 *
5 * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
6 */
7
8#include <dt-bindings/sound/qcom,lpass.h>
9#include <linux/clk.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14#include <sound/pcm.h>
15#include <sound/pcm_params.h>
16#include <linux/regmap.h>
17#include <sound/soc.h>
18#include <sound/soc-dai.h>
19#include "lpass-lpaif-reg.h"
20#include "lpass.h"
21
22#define LPASS_CPU_MAX_MI2S_LINES	4
23#define LPASS_CPU_I2S_SD0_MASK		BIT(0)
24#define LPASS_CPU_I2S_SD1_MASK		BIT(1)
25#define LPASS_CPU_I2S_SD2_MASK		BIT(2)
26#define LPASS_CPU_I2S_SD3_MASK		BIT(3)
27#define LPASS_CPU_I2S_SD0_1_MASK	GENMASK(1, 0)
28#define LPASS_CPU_I2S_SD2_3_MASK	GENMASK(3, 2)
29#define LPASS_CPU_I2S_SD0_1_2_MASK	GENMASK(2, 0)
30#define LPASS_CPU_I2S_SD0_1_2_3_MASK	GENMASK(3, 0)
31#define LPASS_REG_READ 1
32#define LPASS_REG_WRITE 0
33
34/*
35 * Channel maps for Quad channel playbacks on MI2S Secondary
36 */
37static struct snd_pcm_chmap_elem lpass_quad_chmaps[] = {
38		{ .channels = 4,
39		  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
40				SNDRV_CHMAP_FR, SNDRV_CHMAP_RR } },
41		{ }
42};
43static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
44			struct lpaif_i2sctl *i2sctl, struct regmap *map)
45{
46	struct lpass_data *drvdata = dev_get_drvdata(dev);
47	const struct lpass_variant *v = drvdata->variant;
48
49	i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
50	i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
51	i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
52	i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
53	i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
54	i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
55	i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
56	i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
57	i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
58
59	if (IS_ERR(i2sctl->loopback) || IS_ERR(i2sctl->spken) ||
60	    IS_ERR(i2sctl->spkmode) || IS_ERR(i2sctl->spkmono) ||
61	    IS_ERR(i2sctl->micen) || IS_ERR(i2sctl->micmode) ||
62	    IS_ERR(i2sctl->micmono) || IS_ERR(i2sctl->wssrc) ||
63	    IS_ERR(i2sctl->bitwidth))
64		return -EINVAL;
65
66	return 0;
67}
68
69static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
70		unsigned int freq, int dir)
71{
72	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
73	int ret;
74
75	ret = clk_set_rate(drvdata->mi2s_osr_clk[dai->driver->id], freq);
76	if (ret)
77		dev_err(dai->dev, "error setting mi2s osrclk to %u: %d\n",
78			freq, ret);
79
80	return ret;
81}
82
83static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
84		struct snd_soc_dai *dai)
85{
86	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
87	int ret;
88
89	ret = clk_prepare_enable(drvdata->mi2s_osr_clk[dai->driver->id]);
90	if (ret) {
91		dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret);
92		return ret;
93	}
94	ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]);
95	if (ret) {
96		dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
97		clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
98		return ret;
99	}
100	return 0;
101}
102
103static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
104		struct snd_soc_dai *dai)
105{
106	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
107	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
108	unsigned int id = dai->driver->id;
109
110	clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
111	/*
112	 * Ensure LRCLK is disabled even in device node validation.
113	 * Will not impact if disabled in lpass_cpu_daiops_trigger()
114	 * suspend.
115	 */
116	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
117		regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
118	else
119		regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
120
121	/*
122	 * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
123	 * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
124	 * lpass_cpu_daiops_prepare.
125	 */
126	if (drvdata->mi2s_was_prepared[dai->driver->id]) {
127		drvdata->mi2s_was_prepared[dai->driver->id] = false;
128		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
129	}
130
131	clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
132}
133
134static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
135		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
136{
137	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
138	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
139	unsigned int id = dai->driver->id;
140	snd_pcm_format_t format = params_format(params);
141	unsigned int channels = params_channels(params);
142	unsigned int rate = params_rate(params);
143	unsigned int mode;
144	unsigned int regval;
145	int bitwidth, ret;
146
147	bitwidth = snd_pcm_format_width(format);
148	if (bitwidth < 0) {
149		dev_err(dai->dev, "invalid bit width given: %d\n", bitwidth);
150		return bitwidth;
151	}
152
153	ret = regmap_fields_write(i2sctl->loopback, id,
154				 LPAIF_I2SCTL_LOOPBACK_DISABLE);
155	if (ret) {
156		dev_err(dai->dev, "error updating loopback field: %d\n", ret);
157		return ret;
158	}
159
160	ret = regmap_fields_write(i2sctl->wssrc, id,
161				 LPAIF_I2SCTL_WSSRC_INTERNAL);
162	if (ret) {
163		dev_err(dai->dev, "error updating wssrc field: %d\n", ret);
164		return ret;
165	}
166
167	switch (bitwidth) {
168	case 16:
169		regval = LPAIF_I2SCTL_BITWIDTH_16;
170		break;
171	case 24:
172		regval = LPAIF_I2SCTL_BITWIDTH_24;
173		break;
174	case 32:
175		regval = LPAIF_I2SCTL_BITWIDTH_32;
176		break;
177	default:
178		dev_err(dai->dev, "invalid bitwidth given: %d\n", bitwidth);
179		return -EINVAL;
180	}
181
182	ret = regmap_fields_write(i2sctl->bitwidth, id, regval);
183	if (ret) {
184		dev_err(dai->dev, "error updating bitwidth field: %d\n", ret);
185		return ret;
186	}
187
188	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
189		mode = drvdata->mi2s_playback_sd_mode[id];
190	else
191		mode = drvdata->mi2s_capture_sd_mode[id];
192
193	if (!mode) {
194		dev_err(dai->dev, "no line is assigned\n");
195		return -EINVAL;
196	}
197
198	switch (channels) {
199	case 1:
200	case 2:
201		switch (mode) {
202		case LPAIF_I2SCTL_MODE_QUAD01:
203		case LPAIF_I2SCTL_MODE_6CH:
204		case LPAIF_I2SCTL_MODE_8CH:
205			mode = LPAIF_I2SCTL_MODE_SD0;
206			break;
207		case LPAIF_I2SCTL_MODE_QUAD23:
208			mode = LPAIF_I2SCTL_MODE_SD2;
209			break;
210		}
211
212		break;
213	case 4:
214		if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
215			dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
216				mode);
217			return -EINVAL;
218		}
219
220		switch (mode) {
221		case LPAIF_I2SCTL_MODE_6CH:
222		case LPAIF_I2SCTL_MODE_8CH:
223			mode = LPAIF_I2SCTL_MODE_QUAD01;
224			break;
225		}
226		break;
227	case 6:
228		if (mode < LPAIF_I2SCTL_MODE_6CH) {
229			dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
230				mode);
231			return -EINVAL;
232		}
233
234		switch (mode) {
235		case LPAIF_I2SCTL_MODE_8CH:
236			mode = LPAIF_I2SCTL_MODE_6CH;
237			break;
238		}
239		break;
240	case 8:
241		if (mode < LPAIF_I2SCTL_MODE_8CH) {
242			dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
243				mode);
244			return -EINVAL;
245		}
246		break;
247	default:
248		dev_err(dai->dev, "invalid channels given: %u\n", channels);
249		return -EINVAL;
250	}
251
252	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
253		ret = regmap_fields_write(i2sctl->spkmode, id,
254					 LPAIF_I2SCTL_SPKMODE(mode));
255		if (ret) {
256			dev_err(dai->dev, "error writing to i2sctl spkr mode: %d\n",
257				ret);
258			return ret;
259		}
260		if (channels >= 2)
261			ret = regmap_fields_write(i2sctl->spkmono, id,
262						 LPAIF_I2SCTL_SPKMONO_STEREO);
263		else
264			ret = regmap_fields_write(i2sctl->spkmono, id,
265						 LPAIF_I2SCTL_SPKMONO_MONO);
266	} else {
267		ret = regmap_fields_write(i2sctl->micmode, id,
268					 LPAIF_I2SCTL_MICMODE(mode));
269		if (ret) {
270			dev_err(dai->dev, "error writing to i2sctl mic mode: %d\n",
271				ret);
272			return ret;
273		}
274		if (channels >= 2)
275			ret = regmap_fields_write(i2sctl->micmono, id,
276						 LPAIF_I2SCTL_MICMONO_STEREO);
277		else
278			ret = regmap_fields_write(i2sctl->micmono, id,
279						 LPAIF_I2SCTL_MICMONO_MONO);
280	}
281
282	if (ret) {
283		dev_err(dai->dev, "error writing to i2sctl channels mode: %d\n",
284			ret);
285		return ret;
286	}
287
288	ret = clk_set_rate(drvdata->mi2s_bit_clk[id],
289			   rate * bitwidth * 2);
290	if (ret) {
291		dev_err(dai->dev, "error setting mi2s bitclk to %u: %d\n",
292			rate * bitwidth * 2, ret);
293		return ret;
294	}
295
296	return 0;
297}
298
299static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
300		int cmd, struct snd_soc_dai *dai)
301{
302	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
303	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
304	unsigned int id = dai->driver->id;
305	int ret = -EINVAL;
306
307	switch (cmd) {
308	case SNDRV_PCM_TRIGGER_START:
309	case SNDRV_PCM_TRIGGER_RESUME:
310	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
311		/*
312		 * Ensure lpass BCLK/LRCLK is enabled during
313		 * device resume as lpass_cpu_daiops_prepare() is not called
314		 * after the device resumes. We don't check mi2s_was_prepared before
315		 * enable/disable BCLK in trigger events because:
316		 *  1. These trigger events are paired, so the BCLK
317		 *     enable_count is balanced.
318		 *  2. the BCLK can be shared (ex: headset and headset mic),
319		 *     we need to increase the enable_count so that we don't
320		 *     turn off the shared BCLK while other devices are using
321		 *     it.
322		 */
323		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
324			ret = regmap_fields_write(i2sctl->spken, id,
325						 LPAIF_I2SCTL_SPKEN_ENABLE);
326		} else  {
327			ret = regmap_fields_write(i2sctl->micen, id,
328						 LPAIF_I2SCTL_MICEN_ENABLE);
329		}
330		if (ret)
331			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
332				ret);
333
334		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
335		if (ret) {
336			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
337			clk_disable(drvdata->mi2s_osr_clk[id]);
338			return ret;
339		}
340		break;
341	case SNDRV_PCM_TRIGGER_STOP:
342	case SNDRV_PCM_TRIGGER_SUSPEND:
343	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
344		/*
345		 * To ensure lpass BCLK/LRCLK is disabled during
346		 * device suspend.
347		 */
348		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
349			ret = regmap_fields_write(i2sctl->spken, id,
350						 LPAIF_I2SCTL_SPKEN_DISABLE);
351		} else  {
352			ret = regmap_fields_write(i2sctl->micen, id,
353						 LPAIF_I2SCTL_MICEN_DISABLE);
354		}
355		if (ret)
356			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
357				ret);
358
359		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
360
361		break;
362	}
363
364	return ret;
365}
366
367static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
368		struct snd_soc_dai *dai)
369{
370	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
371	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
372	unsigned int id = dai->driver->id;
373	int ret;
374
375	/*
376	 * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
377	 * data flow starts. This allows other codec to have some delay before
378	 * the data flow.
379	 * (ex: to drop start up pop noise before capture starts).
380	 */
381	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
382		ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
383	else
384		ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
385
386	if (ret) {
387		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
388		return ret;
389	}
390
391	/*
392	 * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
393	 * be called multiple times. It's paired with the clk_disable in
394	 * lpass_cpu_daiops_shutdown.
395	 */
396	if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
397		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
398		if (ret) {
399			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
400			return ret;
401		}
402		drvdata->mi2s_was_prepared[dai->driver->id] = true;
403	}
404	return 0;
405}
406
407static int lpass_cpu_daiops_pcm_new(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
408{
409	int ret;
410	struct snd_soc_dai_driver *drv = dai->driver;
411	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
412
413	if (drvdata->mi2s_playback_sd_mode[dai->id] == LPAIF_I2SCTL_MODE_QUAD01) {
414		ret =  snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
415				lpass_quad_chmaps, drv->playback.channels_max, 0,
416				NULL);
417		if (ret < 0)
418			return ret;
419	}
420
421	return 0;
422}
423
424static int lpass_cpu_daiops_probe(struct snd_soc_dai *dai)
425{
426	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
427	int ret;
428
429	/* ensure audio hardware is disabled */
430	ret = regmap_write(drvdata->lpaif_map,
431			LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), 0);
432	if (ret)
433		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
434
435	return ret;
436}
437
438const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
439	.probe		= lpass_cpu_daiops_probe,
440	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
441	.startup	= lpass_cpu_daiops_startup,
442	.shutdown	= lpass_cpu_daiops_shutdown,
443	.hw_params	= lpass_cpu_daiops_hw_params,
444	.trigger	= lpass_cpu_daiops_trigger,
445	.prepare	= lpass_cpu_daiops_prepare,
446};
447EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
448
449const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops2 = {
450	.pcm_new	= lpass_cpu_daiops_pcm_new,
451	.probe		= lpass_cpu_daiops_probe,
452	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
453	.startup	= lpass_cpu_daiops_startup,
454	.shutdown	= lpass_cpu_daiops_shutdown,
455	.hw_params	= lpass_cpu_daiops_hw_params,
456	.trigger	= lpass_cpu_daiops_trigger,
457	.prepare	= lpass_cpu_daiops_prepare,
458};
459EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops2);
460
461static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
462				   const struct of_phandle_args *args,
463				   const char **dai_name)
464{
465	struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
466	const struct lpass_variant *variant = drvdata->variant;
467	int id = args->args[0];
468	int ret = -EINVAL;
469	int i;
470
471	for (i = 0; i  < variant->num_dai; i++) {
472		if (variant->dai_driver[i].id == id) {
473			*dai_name = variant->dai_driver[i].name;
474			ret = 0;
475			break;
476		}
477	}
478
479	return ret;
480}
481
482static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
483	.name = "lpass-cpu",
484	.of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
485	.legacy_dai_naming = 1,
486};
487
488static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
489{
490	struct lpass_data *drvdata = dev_get_drvdata(dev);
491	const struct lpass_variant *v = drvdata->variant;
492	int i;
493
494	for (i = 0; i < v->i2s_ports; ++i)
495		if (reg == LPAIF_I2SCTL_REG(v, i))
496			return true;
497
498	for (i = 0; i < v->irq_ports; ++i) {
499		if (reg == LPAIF_IRQEN_REG(v, i))
500			return true;
501		if (reg == LPAIF_IRQCLEAR_REG(v, i))
502			return true;
503	}
504
505	for (i = 0; i < v->rdma_channels; ++i) {
506		if (reg == LPAIF_RDMACTL_REG(v, i))
507			return true;
508		if (reg == LPAIF_RDMABASE_REG(v, i))
509			return true;
510		if (reg == LPAIF_RDMABUFF_REG(v, i))
511			return true;
512		if (reg == LPAIF_RDMAPER_REG(v, i))
513			return true;
514	}
515
516	for (i = 0; i < v->wrdma_channels; ++i) {
517		if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
518			return true;
519		if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
520			return true;
521		if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
522			return true;
523		if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
524			return true;
525	}
526
527	return false;
528}
529
530static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
531{
532	struct lpass_data *drvdata = dev_get_drvdata(dev);
533	const struct lpass_variant *v = drvdata->variant;
534	int i;
535
536	for (i = 0; i < v->i2s_ports; ++i)
537		if (reg == LPAIF_I2SCTL_REG(v, i))
538			return true;
539
540	for (i = 0; i < v->irq_ports; ++i) {
541		if (reg == LPAIF_IRQCLEAR_REG(v, i))
542			return true;
543		if (reg == LPAIF_IRQEN_REG(v, i))
544			return true;
545		if (reg == LPAIF_IRQSTAT_REG(v, i))
546			return true;
547	}
548
549	for (i = 0; i < v->rdma_channels; ++i) {
550		if (reg == LPAIF_RDMACTL_REG(v, i))
551			return true;
552		if (reg == LPAIF_RDMABASE_REG(v, i))
553			return true;
554		if (reg == LPAIF_RDMABUFF_REG(v, i))
555			return true;
556		if (reg == LPAIF_RDMACURR_REG(v, i))
557			return true;
558		if (reg == LPAIF_RDMAPER_REG(v, i))
559			return true;
560	}
561
562	for (i = 0; i < v->wrdma_channels; ++i) {
563		if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
564			return true;
565		if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
566			return true;
567		if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
568			return true;
569		if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
570			return true;
571		if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
572			return true;
573	}
574
575	return false;
576}
577
578static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
579{
580	struct lpass_data *drvdata = dev_get_drvdata(dev);
581	const struct lpass_variant *v = drvdata->variant;
582	int i;
583
584	for (i = 0; i < v->irq_ports; ++i) {
585		if (reg == LPAIF_IRQCLEAR_REG(v, i))
586			return true;
587		if (reg == LPAIF_IRQSTAT_REG(v, i))
588			return true;
589	}
590
591	for (i = 0; i < v->rdma_channels; ++i)
592		if (reg == LPAIF_RDMACURR_REG(v, i))
593			return true;
594
595	for (i = 0; i < v->wrdma_channels; ++i)
596		if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
597			return true;
598
599	return false;
600}
601
602static struct regmap_config lpass_cpu_regmap_config = {
603	.name = "lpass_cpu",
604	.reg_bits = 32,
605	.reg_stride = 4,
606	.val_bits = 32,
607	.writeable_reg = lpass_cpu_regmap_writeable,
608	.readable_reg = lpass_cpu_regmap_readable,
609	.volatile_reg = lpass_cpu_regmap_volatile,
610	.cache_type = REGCACHE_FLAT,
611};
612
613static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
614{
615	struct lpass_data *drvdata = dev_get_drvdata(dev);
616	const struct lpass_variant *v = drvdata->variant;
617	unsigned int i;
618	struct lpass_hdmi_tx_ctl *tx_ctl;
619	struct regmap_field *legacy_en;
620	struct lpass_vbit_ctrl *vbit_ctl;
621	struct regmap_field *tx_parity;
622	struct lpass_dp_metadata_ctl *meta_ctl;
623	struct lpass_sstream_ctl *sstream_ctl;
624	struct regmap_field *ch_msb;
625	struct regmap_field *ch_lsb;
626	struct lpass_hdmitx_dmactl *tx_dmactl;
627	int rval;
628
629	tx_ctl = devm_kzalloc(dev, sizeof(*tx_ctl), GFP_KERNEL);
630	if (!tx_ctl)
631		return -ENOMEM;
632
633	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
634	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
635	drvdata->tx_ctl = tx_ctl;
636
637	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
638	drvdata->hdmitx_legacy_en = legacy_en;
639
640	vbit_ctl = devm_kzalloc(dev, sizeof(*vbit_ctl), GFP_KERNEL);
641	if (!vbit_ctl)
642		return -ENOMEM;
643
644	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
645	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
646	drvdata->vbit_ctl = vbit_ctl;
647
648
649	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
650	drvdata->hdmitx_parity_calc_en = tx_parity;
651
652	meta_ctl = devm_kzalloc(dev, sizeof(*meta_ctl), GFP_KERNEL);
653	if (!meta_ctl)
654		return -ENOMEM;
655
656	rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
657	if (rval)
658		return rval;
659	drvdata->meta_ctl = meta_ctl;
660
661	sstream_ctl = devm_kzalloc(dev, sizeof(*sstream_ctl), GFP_KERNEL);
662	if (!sstream_ctl)
663		return -ENOMEM;
664
665	rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
666	if (rval)
667		return rval;
668
669	drvdata->sstream_ctl = sstream_ctl;
670
671	for (i = 0; i < LPASS_MAX_HDMI_DMA_CHANNELS; i++) {
672		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
673		drvdata->hdmitx_ch_msb[i] = ch_msb;
674
675		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
676		drvdata->hdmitx_ch_lsb[i] = ch_lsb;
677
678		tx_dmactl = devm_kzalloc(dev, sizeof(*tx_dmactl), GFP_KERNEL);
679		if (!tx_dmactl)
680			return -ENOMEM;
681
682		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
683		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
684		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
685		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
686		drvdata->hdmi_tx_dmactl[i] = tx_dmactl;
687	}
688	return 0;
689}
690
691static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
692{
693	struct lpass_data *drvdata = dev_get_drvdata(dev);
694	const struct lpass_variant *v = drvdata->variant;
695	int i;
696
697	if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
698		return true;
699	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
700		return true;
701	if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
702		return true;
703	if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
704		return true;
705	if (reg == LPASS_HDMI_TX_DP_ADDR(v))
706		return true;
707	if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
708		return true;
709	if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
710		return true;
711	if (reg == LPASS_HDMITX_APP_IRQCLEAR_REG(v))
712		return true;
713
714	for (i = 0; i < v->hdmi_rdma_channels; i++) {
715		if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
716			return true;
717		if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
718			return true;
719		if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
720			return true;
721	}
722
723	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
724		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
725			return true;
726		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
727			return true;
728		if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
729			return true;
730		if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
731			return true;
732	}
733	return false;
734}
735
736static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
737{
738	struct lpass_data *drvdata = dev_get_drvdata(dev);
739	const struct lpass_variant *v = drvdata->variant;
740	int i;
741
742	if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
743		return true;
744	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
745		return true;
746	if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
747		return true;
748
749	for (i = 0; i < v->hdmi_rdma_channels; i++) {
750		if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
751			return true;
752		if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
753			return true;
754		if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
755			return true;
756	}
757
758	if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
759		return true;
760	if (reg == LPASS_HDMI_TX_DP_ADDR(v))
761		return true;
762	if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
763		return true;
764	if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
765		return true;
766	if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
767		return true;
768
769	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
770		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
771			return true;
772		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
773			return true;
774		if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
775			return true;
776		if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
777			return true;
778		if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
779			return true;
780	}
781
782	return false;
783}
784
785static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
786{
787	struct lpass_data *drvdata = dev_get_drvdata(dev);
788	const struct lpass_variant *v = drvdata->variant;
789	int i;
790
791	if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
792		return true;
793	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
794		return true;
795	if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
796		return true;
797	if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
798		return true;
799
800	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
801		if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
802			return true;
803		if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
804			return true;
805		if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
806			return true;
807		if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
808			return true;
809	}
810	return false;
811}
812
813static struct regmap_config lpass_hdmi_regmap_config = {
814	.name = "lpass_hdmi",
815	.reg_bits = 32,
816	.reg_stride = 4,
817	.val_bits = 32,
818	.writeable_reg = lpass_hdmi_regmap_writeable,
819	.readable_reg = lpass_hdmi_regmap_readable,
820	.volatile_reg = lpass_hdmi_regmap_volatile,
821	.cache_type = REGCACHE_FLAT,
822};
823
824static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
825{
826	struct lpass_data *drvdata = dev_get_drvdata(dev);
827	const struct lpass_variant *v = drvdata->variant;
828	int i;
829
830	for (i = 0; i < v->rxtx_irq_ports; ++i) {
831		if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
832			return true;
833		if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
834			return true;
835		if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
836			return true;
837	}
838
839	for (i = 0; i < v->rxtx_rdma_channels; ++i) {
840		if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
841			return true;
842		if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
843			return true;
844		if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
845			return true;
846		if (rw == LPASS_REG_READ) {
847			if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
848				return true;
849		}
850		if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
851			return true;
852		if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
853			return true;
854	}
855
856	for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
857		if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
858							LPASS_CDC_DMA_TX3))
859			return true;
860		if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
861							LPASS_CDC_DMA_TX3))
862			return true;
863		if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
864							LPASS_CDC_DMA_TX3))
865			return true;
866		if (rw == LPASS_REG_READ) {
867			if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
868				return true;
869		}
870		if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
871							LPASS_CDC_DMA_TX3))
872			return true;
873		if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
874							LPASS_CDC_DMA_TX3))
875			return true;
876	}
877	return false;
878}
879
880static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
881{
882	return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
883}
884
885static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
886{
887	return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
888}
889
890static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
891{
892	struct lpass_data *drvdata = dev_get_drvdata(dev);
893	const struct lpass_variant *v = drvdata->variant;
894	int i;
895
896	for (i = 0; i < v->rxtx_irq_ports; ++i) {
897		if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
898			return true;
899		if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
900			return true;
901	}
902
903	for (i = 0; i < v->rxtx_rdma_channels; ++i)
904		if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
905			return true;
906
907	for (i = 0; i < v->rxtx_wrdma_channels; ++i)
908		if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
909							LPASS_CDC_DMA_TX3))
910			return true;
911
912	return false;
913}
914
915static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
916{
917	struct lpass_data *drvdata = dev_get_drvdata(dev);
918	const struct lpass_variant *v = drvdata->variant;
919	int i;
920
921	for (i = 0; i < v->va_irq_ports; ++i) {
922		if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
923			return true;
924		if (reg == LPAIF_VA_IRQEN_REG(v, i))
925			return true;
926		if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
927			return true;
928	}
929
930	for (i = 0; i < v->va_wrdma_channels; ++i) {
931		if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
932							LPASS_CDC_DMA_VA_TX0))
933			return true;
934		if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
935							LPASS_CDC_DMA_VA_TX0))
936			return true;
937		if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
938							LPASS_CDC_DMA_VA_TX0))
939			return true;
940		if (rw == LPASS_REG_READ) {
941			if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
942							LPASS_CDC_DMA_VA_TX0))
943				return true;
944		}
945		if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
946							LPASS_CDC_DMA_VA_TX0))
947			return true;
948		if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
949							LPASS_CDC_DMA_VA_TX0))
950			return true;
951	}
952	return false;
953}
954
955static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
956{
957	return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
958}
959
960static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
961{
962	return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
963}
964
965static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
966{
967	struct lpass_data *drvdata = dev_get_drvdata(dev);
968	const struct lpass_variant *v = drvdata->variant;
969	int i;
970
971	for (i = 0; i < v->va_irq_ports; ++i) {
972		if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
973			return true;
974		if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
975			return true;
976	}
977
978	for (i = 0; i < v->va_wrdma_channels; ++i) {
979		if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
980							LPASS_CDC_DMA_VA_TX0))
981			return true;
982	}
983
984	return false;
985}
986
987static struct regmap_config lpass_rxtx_regmap_config = {
988	.reg_bits = 32,
989	.reg_stride = 4,
990	.val_bits = 32,
991	.writeable_reg = lpass_rxtx_regmap_writeable,
992	.readable_reg = lpass_rxtx_regmap_readable,
993	.volatile_reg = lpass_rxtx_regmap_volatile,
994	.cache_type = REGCACHE_FLAT,
995};
996
997static struct regmap_config lpass_va_regmap_config = {
998	.reg_bits = 32,
999	.reg_stride = 4,
1000	.val_bits = 32,
1001	.writeable_reg = lpass_va_regmap_writeable,
1002	.readable_reg = lpass_va_regmap_readable,
1003	.volatile_reg = lpass_va_regmap_volatile,
1004	.cache_type = REGCACHE_FLAT,
1005};
1006
1007static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
1008						struct device_node *node,
1009						const char *name)
1010{
1011	unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
1012	unsigned int sd_line_mask = 0;
1013	int num_lines, i;
1014
1015	num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
1016							LPASS_CPU_MAX_MI2S_LINES);
1017	if (num_lines < 0)
1018		return LPAIF_I2SCTL_MODE_NONE;
1019
1020	for (i = 0; i < num_lines; i++)
1021		sd_line_mask |= BIT(lines[i]);
1022
1023	switch (sd_line_mask) {
1024	case LPASS_CPU_I2S_SD0_MASK:
1025		return LPAIF_I2SCTL_MODE_SD0;
1026	case LPASS_CPU_I2S_SD1_MASK:
1027		return LPAIF_I2SCTL_MODE_SD1;
1028	case LPASS_CPU_I2S_SD2_MASK:
1029		return LPAIF_I2SCTL_MODE_SD2;
1030	case LPASS_CPU_I2S_SD3_MASK:
1031		return LPAIF_I2SCTL_MODE_SD3;
1032	case LPASS_CPU_I2S_SD0_1_MASK:
1033		return LPAIF_I2SCTL_MODE_QUAD01;
1034	case LPASS_CPU_I2S_SD2_3_MASK:
1035		return LPAIF_I2SCTL_MODE_QUAD23;
1036	case LPASS_CPU_I2S_SD0_1_2_MASK:
1037		return LPAIF_I2SCTL_MODE_6CH;
1038	case LPASS_CPU_I2S_SD0_1_2_3_MASK:
1039		return LPAIF_I2SCTL_MODE_8CH;
1040	default:
1041		dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
1042		return LPAIF_I2SCTL_MODE_NONE;
1043	}
1044}
1045
1046static void of_lpass_cpu_parse_dai_data(struct device *dev,
1047					struct lpass_data *data)
1048{
1049	struct device_node *node;
1050	int ret, i, id;
1051
1052	/* Allow all channels by default for backwards compatibility */
1053	for (i = 0; i < data->variant->num_dai; i++) {
1054		id = data->variant->dai_driver[i].id;
1055		data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
1056		data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
1057	}
1058
1059	for_each_child_of_node(dev->of_node, node) {
1060		ret = of_property_read_u32(node, "reg", &id);
1061		if (ret || id < 0) {
1062			dev_err(dev, "valid dai id not found: %d\n", ret);
1063			continue;
1064		}
1065		if (id == LPASS_DP_RX) {
1066			data->hdmi_port_enable = 1;
1067		} else if (is_cdc_dma_port(id)) {
1068			data->codec_dma_enable = 1;
1069		} else {
1070			data->mi2s_playback_sd_mode[id] =
1071				of_lpass_cpu_parse_sd_lines(dev, node,
1072							    "qcom,playback-sd-lines");
1073			data->mi2s_capture_sd_mode[id] =
1074				of_lpass_cpu_parse_sd_lines(dev, node,
1075						    "qcom,capture-sd-lines");
1076		}
1077	}
1078}
1079
1080static int of_lpass_cdc_dma_clks_parse(struct device *dev,
1081					struct lpass_data *data)
1082{
1083	data->codec_mem0 = devm_clk_get(dev, "audio_cc_codec_mem0");
1084	if (IS_ERR(data->codec_mem0))
1085		return PTR_ERR(data->codec_mem0);
1086
1087	data->codec_mem1 = devm_clk_get(dev, "audio_cc_codec_mem1");
1088	if (IS_ERR(data->codec_mem1))
1089		return PTR_ERR(data->codec_mem1);
1090
1091	data->codec_mem2 = devm_clk_get(dev, "audio_cc_codec_mem2");
1092	if (IS_ERR(data->codec_mem2))
1093		return PTR_ERR(data->codec_mem2);
1094
1095	data->va_mem0 = devm_clk_get(dev, "aon_cc_va_mem0");
1096	if (IS_ERR(data->va_mem0))
1097		return PTR_ERR(data->va_mem0);
1098
1099	return 0;
1100}
1101
1102int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
1103{
1104	struct lpass_data *drvdata;
1105	struct device_node *dsp_of_node;
1106	struct resource *res;
1107	const struct lpass_variant *variant;
1108	struct device *dev = &pdev->dev;
1109	int ret, i, dai_id;
1110
1111	dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
1112	if (dsp_of_node) {
1113		dev_err(dev, "DSP exists and holds audio resources\n");
1114		of_node_put(dsp_of_node);
1115		return -EBUSY;
1116	}
1117
1118	drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
1119	if (!drvdata)
1120		return -ENOMEM;
1121	platform_set_drvdata(pdev, drvdata);
1122
1123	variant = device_get_match_data(dev);
1124	if (!variant)
1125		return -EINVAL;
1126
1127	if (of_device_is_compatible(dev->of_node, "qcom,lpass-cpu-apq8016"))
1128		dev_warn(dev, "qcom,lpass-cpu-apq8016 compatible is deprecated\n");
1129
1130	drvdata->variant = variant;
1131
1132	of_lpass_cpu_parse_dai_data(dev, drvdata);
1133
1134	if (drvdata->codec_dma_enable) {
1135		drvdata->rxtx_lpaif =
1136				devm_platform_ioremap_resource_byname(pdev, "lpass-rxtx-lpaif");
1137		if (IS_ERR(drvdata->rxtx_lpaif))
1138			return PTR_ERR(drvdata->rxtx_lpaif);
1139
1140		drvdata->va_lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-va-lpaif");
1141		if (IS_ERR(drvdata->va_lpaif))
1142			return PTR_ERR(drvdata->va_lpaif);
1143
1144		lpass_rxtx_regmap_config.max_register = LPAIF_CDC_RXTX_WRDMAPER_REG(variant,
1145					variant->rxtx_wrdma_channels +
1146					variant->rxtx_wrdma_channel_start, LPASS_CDC_DMA_TX3);
1147
1148		drvdata->rxtx_lpaif_map = devm_regmap_init_mmio(dev, drvdata->rxtx_lpaif,
1149					&lpass_rxtx_regmap_config);
1150		if (IS_ERR(drvdata->rxtx_lpaif_map))
1151			return PTR_ERR(drvdata->rxtx_lpaif_map);
1152
1153		lpass_va_regmap_config.max_register = LPAIF_CDC_VA_WRDMAPER_REG(variant,
1154					variant->va_wrdma_channels +
1155					variant->va_wrdma_channel_start, LPASS_CDC_DMA_VA_TX0);
1156
1157		drvdata->va_lpaif_map = devm_regmap_init_mmio(dev, drvdata->va_lpaif,
1158					&lpass_va_regmap_config);
1159		if (IS_ERR(drvdata->va_lpaif_map))
1160			return PTR_ERR(drvdata->va_lpaif_map);
1161
1162		ret = of_lpass_cdc_dma_clks_parse(dev, drvdata);
1163		if (ret) {
1164			dev_err(dev, "failed to get cdc dma clocks %d\n", ret);
1165			return ret;
1166		}
1167
1168		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
1169		drvdata->rxtx_cdc_dma_lpm_buf = res->start;
1170
1171		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
1172		drvdata->va_cdc_dma_lpm_buf = res->start;
1173	}
1174
1175	drvdata->lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-lpaif");
1176	if (IS_ERR(drvdata->lpaif))
1177		return PTR_ERR(drvdata->lpaif);
1178
1179	lpass_cpu_regmap_config.max_register = LPAIF_WRDMAPER_REG(variant,
1180						variant->wrdma_channels +
1181						variant->wrdma_channel_start);
1182
1183	drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
1184			&lpass_cpu_regmap_config);
1185	if (IS_ERR(drvdata->lpaif_map)) {
1186		dev_err(dev, "error initializing regmap: %ld\n",
1187			PTR_ERR(drvdata->lpaif_map));
1188		return PTR_ERR(drvdata->lpaif_map);
1189	}
1190
1191	if (drvdata->hdmi_port_enable) {
1192		drvdata->hdmiif = devm_platform_ioremap_resource_byname(pdev, "lpass-hdmiif");
1193		if (IS_ERR(drvdata->hdmiif))
1194			return PTR_ERR(drvdata->hdmiif);
1195
1196		lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
1197					variant->hdmi_rdma_channels - 1);
1198		drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
1199					&lpass_hdmi_regmap_config);
1200		if (IS_ERR(drvdata->hdmiif_map)) {
1201			dev_err(dev, "error initializing regmap: %ld\n",
1202			PTR_ERR(drvdata->hdmiif_map));
1203			return PTR_ERR(drvdata->hdmiif_map);
1204		}
1205	}
1206
1207	if (variant->init) {
1208		ret = variant->init(pdev);
1209		if (ret) {
1210			dev_err(dev, "error initializing variant: %d\n", ret);
1211			return ret;
1212		}
1213	}
1214
1215	for (i = 0; i < variant->num_dai; i++) {
1216		dai_id = variant->dai_driver[i].id;
1217		if (dai_id == LPASS_DP_RX || is_cdc_dma_port(dai_id))
1218			continue;
1219
1220		drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
1221					     variant->dai_osr_clk_names[i]);
1222		drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
1223						variant->dai_bit_clk_names[i]);
1224		if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
1225			dev_err(dev,
1226				"error getting %s: %ld\n",
1227				variant->dai_bit_clk_names[i],
1228				PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
1229			return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
1230		}
1231		if (drvdata->mi2s_playback_sd_mode[dai_id] ==
1232			LPAIF_I2SCTL_MODE_QUAD01) {
1233			variant->dai_driver[dai_id].playback.channels_min = 4;
1234			variant->dai_driver[dai_id].playback.channels_max = 4;
1235		}
1236	}
1237
1238	/* Allocation for i2sctl regmap fields */
1239	drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
1240					GFP_KERNEL);
1241
1242	/* Initialize bitfields for dai I2SCTL register */
1243	ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
1244						drvdata->lpaif_map);
1245	if (ret) {
1246		dev_err(dev, "error init i2sctl field: %d\n", ret);
1247		return ret;
1248	}
1249
1250	if (drvdata->hdmi_port_enable) {
1251		ret = lpass_hdmi_init_bitfields(dev, drvdata->hdmiif_map);
1252		if (ret) {
1253			dev_err(dev, "%s error  hdmi init failed\n", __func__);
1254			return ret;
1255		}
1256	}
1257	ret = devm_snd_soc_register_component(dev,
1258					      &lpass_cpu_comp_driver,
1259					      variant->dai_driver,
1260					      variant->num_dai);
1261	if (ret) {
1262		dev_err(dev, "error registering cpu driver: %d\n", ret);
1263		goto err;
1264	}
1265
1266	ret = asoc_qcom_lpass_platform_register(pdev);
1267	if (ret) {
1268		dev_err(dev, "error registering platform driver: %d\n", ret);
1269		goto err;
1270	}
1271
1272err:
1273	return ret;
1274}
1275EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
1276
1277void asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
1278{
1279	struct lpass_data *drvdata = platform_get_drvdata(pdev);
1280
1281	if (drvdata->variant->exit)
1282		drvdata->variant->exit(pdev);
1283}
1284EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
1285
1286void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev)
1287{
1288	struct lpass_data *drvdata = platform_get_drvdata(pdev);
1289
1290	if (drvdata->variant->exit)
1291		drvdata->variant->exit(pdev);
1292
1293}
1294EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown);
1295
1296MODULE_DESCRIPTION("QTi LPASS CPU Driver");
1297MODULE_LICENSE("GPL");
1298