1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mtk-afe-fe-dais.c  --  Mediatek afe fe dai operator
4 *
5 * Copyright (c) 2016 MediaTek Inc.
6 * Author: Garlic Tseng <garlic.tseng@mediatek.com>
7 */
8
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/pm_runtime.h>
12#include <linux/regmap.h>
13#include <sound/soc.h>
14#include "mtk-afe-platform-driver.h"
15#include <sound/pcm_params.h>
16#include "mtk-afe-fe-dai.h"
17#include "mtk-base-afe.h"
18
19#define AFE_BASE_END_OFFSET 8
20
21static int mtk_regmap_update_bits(struct regmap *map, int reg,
22			   unsigned int mask,
23			   unsigned int val, int shift)
24{
25	if (reg < 0 || WARN_ON_ONCE(shift < 0))
26		return 0;
27	return regmap_update_bits(map, reg, mask << shift, val << shift);
28}
29
30static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
31{
32	if (reg < 0)
33		return 0;
34	return regmap_write(map, reg, val);
35}
36
37int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
38		       struct snd_soc_dai *dai)
39{
40	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
41	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
42	struct snd_pcm_runtime *runtime = substream->runtime;
43	int memif_num = snd_soc_rtd_to_cpu(rtd, 0)->id;
44	struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
45	const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
46	int ret;
47
48	memif->substream = substream;
49
50	snd_pcm_hw_constraint_step(substream->runtime, 0,
51				   SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
52	/* enable agent */
53	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
54			       1, 0, memif->data->agent_disable_shift);
55
56	snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
57
58	/*
59	 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
60	 * smaller than period_size due to AFE's internal buffer.
61	 * This easily leads to overrun when avail_min is period_size.
62	 * One more period can hold the possible unread buffer.
63	 */
64	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
65		int periods_max = mtk_afe_hardware->periods_max;
66
67		ret = snd_pcm_hw_constraint_minmax(runtime,
68						   SNDRV_PCM_HW_PARAM_PERIODS,
69						   3, periods_max);
70		if (ret < 0) {
71			dev_err(afe->dev, "hw_constraint_minmax failed\n");
72			return ret;
73		}
74	}
75
76	ret = snd_pcm_hw_constraint_integer(runtime,
77					    SNDRV_PCM_HW_PARAM_PERIODS);
78	if (ret < 0)
79		dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
80
81	/* dynamic allocate irq to memif */
82	if (memif->irq_usage < 0) {
83		int irq_id = mtk_dynamic_irq_acquire(afe);
84
85		if (irq_id != afe->irqs_size) {
86			/* link */
87			memif->irq_usage = irq_id;
88		} else {
89			dev_err(afe->dev, "%s() error: no more asys irq\n",
90				__func__);
91			ret = -EBUSY;
92		}
93	}
94	return ret;
95}
96EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
97
98void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
99			 struct snd_soc_dai *dai)
100{
101	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
102	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
103	struct mtk_base_afe_memif *memif = &afe->memif[snd_soc_rtd_to_cpu(rtd, 0)->id];
104	int irq_id;
105
106	irq_id = memif->irq_usage;
107
108	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
109			       1, 1, memif->data->agent_disable_shift);
110
111	if (!memif->const_irq) {
112		mtk_dynamic_irq_release(afe, irq_id);
113		memif->irq_usage = -1;
114		memif->substream = NULL;
115	}
116}
117EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
118
119int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
120			 struct snd_pcm_hw_params *params,
121			 struct snd_soc_dai *dai)
122{
123	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
124	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
125	int id = snd_soc_rtd_to_cpu(rtd, 0)->id;
126	struct mtk_base_afe_memif *memif = &afe->memif[id];
127	int ret;
128	unsigned int channels = params_channels(params);
129	unsigned int rate = params_rate(params);
130	snd_pcm_format_t format = params_format(params);
131
132	if (afe->request_dram_resource)
133		afe->request_dram_resource(afe->dev);
134
135	dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
136		__func__, memif->data->name,
137		channels, rate, format,
138		&substream->runtime->dma_addr,
139		substream->runtime->dma_area,
140		substream->runtime->dma_bytes);
141
142	memset_io((void __force __iomem *)substream->runtime->dma_area, 0,
143		  substream->runtime->dma_bytes);
144
145	/* set addr */
146	ret = mtk_memif_set_addr(afe, id,
147				 substream->runtime->dma_area,
148				 substream->runtime->dma_addr,
149				 substream->runtime->dma_bytes);
150	if (ret) {
151		dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
152			__func__, id, ret);
153		return ret;
154	}
155
156	/* set channel */
157	ret = mtk_memif_set_channel(afe, id, channels);
158	if (ret) {
159		dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
160			__func__, id, channels, ret);
161		return ret;
162	}
163
164	/* set rate */
165	ret = mtk_memif_set_rate_substream(substream, id, rate);
166	if (ret) {
167		dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
168			__func__, id, rate, ret);
169		return ret;
170	}
171
172	/* set format */
173	ret = mtk_memif_set_format(afe, id, format);
174	if (ret) {
175		dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
176			__func__, id, format, ret);
177		return ret;
178	}
179
180	return 0;
181}
182EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
183
184int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
185		       struct snd_soc_dai *dai)
186{
187	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
188
189	if (afe->release_dram_resource)
190		afe->release_dram_resource(afe->dev);
191
192	return 0;
193}
194EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
195
196int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
197		       struct snd_soc_dai *dai)
198{
199	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
200	struct snd_pcm_runtime * const runtime = substream->runtime;
201	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
202	int id = snd_soc_rtd_to_cpu(rtd, 0)->id;
203	struct mtk_base_afe_memif *memif = &afe->memif[id];
204	struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
205	const struct mtk_base_irq_data *irq_data = irqs->irq_data;
206	unsigned int counter = runtime->period_size;
207	int fs;
208	int ret;
209
210	dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
211
212	switch (cmd) {
213	case SNDRV_PCM_TRIGGER_START:
214	case SNDRV_PCM_TRIGGER_RESUME:
215		ret = mtk_memif_set_enable(afe, id);
216		if (ret) {
217			dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
218				__func__, id, ret);
219			return ret;
220		}
221
222		/* set irq counter */
223		mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
224				       irq_data->irq_cnt_maskbit, counter,
225				       irq_data->irq_cnt_shift);
226
227		/* set irq fs */
228		fs = afe->irq_fs(substream, runtime->rate);
229
230		if (fs < 0)
231			return -EINVAL;
232
233		mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
234				       irq_data->irq_fs_maskbit, fs,
235				       irq_data->irq_fs_shift);
236
237		/* enable interrupt */
238		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
239				       1, 1, irq_data->irq_en_shift);
240
241		return 0;
242	case SNDRV_PCM_TRIGGER_STOP:
243	case SNDRV_PCM_TRIGGER_SUSPEND:
244		ret = mtk_memif_set_disable(afe, id);
245		if (ret) {
246			dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
247				__func__, id, ret);
248		}
249
250		/* disable interrupt */
251		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
252				       1, 0, irq_data->irq_en_shift);
253		/* and clear pending IRQ */
254		mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
255				 1 << irq_data->irq_clr_shift);
256		return ret;
257	default:
258		return -EINVAL;
259	}
260}
261EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
262
263int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
264		       struct snd_soc_dai *dai)
265{
266	struct snd_soc_pcm_runtime *rtd  = snd_soc_substream_to_rtd(substream);
267	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
268	int id = snd_soc_rtd_to_cpu(rtd, 0)->id;
269	int pbuf_size;
270
271	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
272		if (afe->get_memif_pbuf_size) {
273			pbuf_size = afe->get_memif_pbuf_size(substream);
274			mtk_memif_set_pbuf_size(afe, id, pbuf_size);
275		}
276	}
277	return 0;
278}
279EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
280
281const struct snd_soc_dai_ops mtk_afe_fe_ops = {
282	.startup	= mtk_afe_fe_startup,
283	.shutdown	= mtk_afe_fe_shutdown,
284	.hw_params	= mtk_afe_fe_hw_params,
285	.hw_free	= mtk_afe_fe_hw_free,
286	.prepare	= mtk_afe_fe_prepare,
287	.trigger	= mtk_afe_fe_trigger,
288};
289EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
290
291int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
292{
293	int i;
294
295	mutex_lock(&afe->irq_alloc_lock);
296	for (i = 0; i < afe->irqs_size; ++i) {
297		if (afe->irqs[i].irq_occupyed == 0) {
298			afe->irqs[i].irq_occupyed = 1;
299			mutex_unlock(&afe->irq_alloc_lock);
300			return i;
301		}
302	}
303	mutex_unlock(&afe->irq_alloc_lock);
304	return afe->irqs_size;
305}
306EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
307
308int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
309{
310	mutex_lock(&afe->irq_alloc_lock);
311	if (irq_id >= 0 && irq_id < afe->irqs_size) {
312		afe->irqs[irq_id].irq_occupyed = 0;
313		mutex_unlock(&afe->irq_alloc_lock);
314		return 0;
315	}
316	mutex_unlock(&afe->irq_alloc_lock);
317	return -EINVAL;
318}
319EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
320
321int mtk_afe_suspend(struct snd_soc_component *component)
322{
323	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
324	struct device *dev = afe->dev;
325	struct regmap *regmap = afe->regmap;
326	int i;
327
328	if (pm_runtime_status_suspended(dev) || afe->suspended)
329		return 0;
330
331	if (!afe->reg_back_up)
332		afe->reg_back_up =
333			devm_kcalloc(dev, afe->reg_back_up_list_num,
334				     sizeof(unsigned int), GFP_KERNEL);
335
336	if (afe->reg_back_up) {
337		for (i = 0; i < afe->reg_back_up_list_num; i++)
338			regmap_read(regmap, afe->reg_back_up_list[i],
339				    &afe->reg_back_up[i]);
340	}
341
342	afe->suspended = true;
343	afe->runtime_suspend(dev);
344	return 0;
345}
346EXPORT_SYMBOL_GPL(mtk_afe_suspend);
347
348int mtk_afe_resume(struct snd_soc_component *component)
349{
350	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
351	struct device *dev = afe->dev;
352	struct regmap *regmap = afe->regmap;
353	int i;
354
355	if (pm_runtime_status_suspended(dev) || !afe->suspended)
356		return 0;
357
358	afe->runtime_resume(dev);
359
360	if (!afe->reg_back_up) {
361		dev_dbg(dev, "%s no reg_backup\n", __func__);
362	} else {
363		for (i = 0; i < afe->reg_back_up_list_num; i++)
364			mtk_regmap_write(regmap, afe->reg_back_up_list[i],
365					 afe->reg_back_up[i]);
366	}
367
368	afe->suspended = false;
369	return 0;
370}
371EXPORT_SYMBOL_GPL(mtk_afe_resume);
372
373int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
374{
375	struct mtk_base_afe_memif *memif = &afe->memif[id];
376
377	if (memif->data->enable_shift < 0) {
378		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
379			 __func__, id);
380		return 0;
381	}
382	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
383				      1, 1, memif->data->enable_shift);
384}
385EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
386
387int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
388{
389	struct mtk_base_afe_memif *memif = &afe->memif[id];
390
391	if (memif->data->enable_shift < 0) {
392		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
393			 __func__, id);
394		return 0;
395	}
396	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
397				      1, 0, memif->data->enable_shift);
398}
399EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
400
401int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
402		       unsigned char *dma_area,
403		       dma_addr_t dma_addr,
404		       size_t dma_bytes)
405{
406	struct mtk_base_afe_memif *memif = &afe->memif[id];
407	int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
408	unsigned int phys_buf_addr = lower_32_bits(dma_addr);
409	unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
410
411	memif->dma_area = dma_area;
412	memif->dma_addr = dma_addr;
413	memif->dma_bytes = dma_bytes;
414
415	/* start */
416	mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
417			 phys_buf_addr);
418	/* end */
419	if (memif->data->reg_ofs_end)
420		mtk_regmap_write(afe->regmap,
421				 memif->data->reg_ofs_end,
422				 phys_buf_addr + dma_bytes - 1);
423	else
424		mtk_regmap_write(afe->regmap,
425				 memif->data->reg_ofs_base +
426				 AFE_BASE_END_OFFSET,
427				 phys_buf_addr + dma_bytes - 1);
428
429	/* set start, end, upper 32 bits */
430	if (memif->data->reg_ofs_base_msb) {
431		mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
432				 phys_buf_addr_upper_32);
433		mtk_regmap_write(afe->regmap,
434				 memif->data->reg_ofs_end_msb,
435				 phys_buf_addr_upper_32);
436	}
437
438	/*
439	 * set MSB to 33-bit, for memif address
440	 * only for memif base address, if msb_end_reg exists
441	 */
442	if (memif->data->msb_reg)
443		mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
444				       1, msb_at_bit33, memif->data->msb_shift);
445
446	/* set MSB to 33-bit, for memif end address */
447	if (memif->data->msb_end_reg)
448		mtk_regmap_update_bits(afe->regmap, memif->data->msb_end_reg,
449				       1, msb_at_bit33,
450				       memif->data->msb_end_shift);
451
452	return 0;
453}
454EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
455
456int mtk_memif_set_channel(struct mtk_base_afe *afe,
457			  int id, unsigned int channel)
458{
459	struct mtk_base_afe_memif *memif = &afe->memif[id];
460	unsigned int mono;
461
462	if (memif->data->mono_shift < 0)
463		return 0;
464
465	if (memif->data->quad_ch_mask) {
466		unsigned int quad_ch = (channel == 4) ? 1 : 0;
467
468		mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
469				       memif->data->quad_ch_mask,
470				       quad_ch, memif->data->quad_ch_shift);
471	}
472
473	if (memif->data->mono_invert)
474		mono = (channel == 1) ? 0 : 1;
475	else
476		mono = (channel == 1) ? 1 : 0;
477
478	/* for specific configuration of memif mono mode */
479	if (memif->data->int_odd_flag_reg)
480		mtk_regmap_update_bits(afe->regmap,
481				       memif->data->int_odd_flag_reg,
482				       1, mono,
483				       memif->data->int_odd_flag_shift);
484
485	return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
486				      1, mono, memif->data->mono_shift);
487}
488EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
489
490static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
491				 int id, int fs)
492{
493	struct mtk_base_afe_memif *memif = &afe->memif[id];
494
495	if (memif->data->fs_shift >= 0)
496		mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
497				       memif->data->fs_maskbit,
498				       fs, memif->data->fs_shift);
499
500	return 0;
501}
502
503int mtk_memif_set_rate(struct mtk_base_afe *afe,
504		       int id, unsigned int rate)
505{
506	int fs = 0;
507
508	if (!afe->get_dai_fs) {
509		dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
510			__func__);
511		return -EINVAL;
512	}
513
514	fs = afe->get_dai_fs(afe, id, rate);
515
516	if (fs < 0)
517		return -EINVAL;
518
519	return mtk_memif_set_rate_fs(afe, id, fs);
520}
521EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
522
523int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
524				 int id, unsigned int rate)
525{
526	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
527	struct snd_soc_component *component =
528		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
529	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
530
531	int fs = 0;
532
533	if (!afe->memif_fs) {
534		dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
535			__func__);
536		return -EINVAL;
537	}
538
539	fs = afe->memif_fs(substream, rate);
540
541	if (fs < 0)
542		return -EINVAL;
543
544	return mtk_memif_set_rate_fs(afe, id, fs);
545}
546EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
547
548int mtk_memif_set_format(struct mtk_base_afe *afe,
549			 int id, snd_pcm_format_t format)
550{
551	struct mtk_base_afe_memif *memif = &afe->memif[id];
552	int hd_audio = 0;
553	int hd_align = 0;
554
555	/* set hd mode */
556	switch (format) {
557	case SNDRV_PCM_FORMAT_S16_LE:
558	case SNDRV_PCM_FORMAT_U16_LE:
559		hd_audio = 0;
560		break;
561	case SNDRV_PCM_FORMAT_S32_LE:
562	case SNDRV_PCM_FORMAT_U32_LE:
563		if (afe->memif_32bit_supported) {
564			hd_audio = 2;
565			hd_align = 0;
566		} else {
567			hd_audio = 1;
568			hd_align = 1;
569		}
570		break;
571	case SNDRV_PCM_FORMAT_S24_LE:
572	case SNDRV_PCM_FORMAT_U24_LE:
573		hd_audio = 1;
574		break;
575	default:
576		dev_err(afe->dev, "%s() error: unsupported format %d\n",
577			__func__, format);
578		break;
579	}
580
581	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
582			       0x3, hd_audio, memif->data->hd_shift);
583
584	mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
585			       0x1, hd_align, memif->data->hd_align_mshift);
586
587	return 0;
588}
589EXPORT_SYMBOL_GPL(mtk_memif_set_format);
590
591int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
592			    int id, int pbuf_size)
593{
594	const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
595
596	if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
597		return 0;
598
599	mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
600			       memif_data->pbuf_mask,
601			       pbuf_size, memif_data->pbuf_shift);
602
603	mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
604			       memif_data->minlen_mask,
605			       pbuf_size, memif_data->minlen_shift);
606	return 0;
607}
608EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
609
610MODULE_DESCRIPTION("Mediatek simple fe dai operator");
611MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
612MODULE_LICENSE("GPL v2");
613