1/*
2 * ath79-mbox.c -- ALSA MBOX DMA management functions
3 *
4 * Copyright (c) 2013 The Linux Foundation. All rights reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/types.h>
21#include <linux/dmapool.h>
22#include <linux/delay.h>
23#include <sound/core.h>
24#include <asm/mach-ath79/ar71xx_regs.h>
25#include <asm/mach-ath79/ath79.h>
26
27#include "ath79-pcm.h"
28#include "ath79-i2s.h"
29
30spinlock_t ath79_pcm_lock;
31static struct dma_pool *ath79_pcm_cache;
32
33void ath79_mbox_reset(void)
34{
35	u32 t;
36
37	spin_lock(&ath79_pcm_lock);
38
39	t = ath79_reset_rr(AR934X_RESET_REG_RESET_MODULE);
40	t |= AR934X_RESET_MBOX;
41	ath79_reset_wr(AR934X_RESET_REG_RESET_MODULE, t);
42	udelay(50);
43	t &= ~(AR934X_RESET_MBOX);
44	ath79_reset_wr(AR934X_RESET_REG_RESET_MODULE, t);
45
46	spin_unlock(&ath79_pcm_lock);
47}
48
49void ath79_mbox_fifo_reset(u32 mask)
50{
51	ath79_dma_wr(AR934X_DMA_REG_MBOX_FIFO_RESET, mask);
52	udelay(50);
53	/* Datasheet says we should reset the stereo controller whenever
54	 * we reset the MBOX DMA controller */
55	ath79_stereo_reset();
56}
57
58void ath79_mbox_interrupt_enable(u32 mask)
59{
60	u32 t;
61
62	spin_lock(&ath79_pcm_lock);
63
64	t = ath79_dma_rr(AR934X_DMA_REG_MBOX_INT_ENABLE);
65	t |= mask;
66	ath79_dma_wr(AR934X_DMA_REG_MBOX_INT_ENABLE, t);
67
68	spin_unlock(&ath79_pcm_lock);
69}
70
71void ath79_mbox_interrupt_ack(u32 mask)
72{
73	ath79_dma_wr(AR934X_DMA_REG_MBOX_INT_STATUS, mask);
74	ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_STATUS, ~(MISC_INT_DMA));
75	/* Flush these two registers */
76	ath79_dma_rr(AR934X_DMA_REG_MBOX_INT_STATUS);
77	ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_STATUS);
78}
79
80void ath79_mbox_dma_start(struct ath79_pcm_rt_priv *rtpriv)
81{
82	if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) {
83		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL,
84			     AR934X_DMA_MBOX_DMA_CONTROL_START);
85		ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL);
86	} else {
87		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL,
88			     AR934X_DMA_MBOX_DMA_CONTROL_START);
89		ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL);
90	}
91}
92
93void ath79_mbox_dma_stop(struct ath79_pcm_rt_priv *rtpriv)
94{
95	if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) {
96		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL,
97			     AR934X_DMA_MBOX_DMA_CONTROL_STOP);
98		ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL);
99	} else {
100		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL,
101			     AR934X_DMA_MBOX_DMA_CONTROL_STOP);
102		ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL);
103	}
104
105	/* Delay for the dynamically calculated max time based on
106	sample size, channel, sample rate + margin to ensure that the
107	DMA engine will be truly idle. */
108
109	mdelay(rtpriv->delay_time);
110}
111
112void ath79_mbox_dma_reset(void)
113{
114	ath79_mbox_reset();
115	ath79_mbox_fifo_reset(AR934X_DMA_MBOX0_FIFO_RESET_RX |
116			AR934X_DMA_MBOX0_FIFO_RESET_TX);
117
118}
119
120void ath79_mbox_dma_prepare(struct ath79_pcm_rt_priv *rtpriv)
121{
122	struct ath79_pcm_desc *desc;
123	u32 t;
124
125	if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) {
126		/* Request the DMA channel to the controller */
127		t = ath79_dma_rr(AR934X_DMA_REG_MBOX_DMA_POLICY);
128		ath79_dma_wr(AR934X_DMA_REG_MBOX_DMA_POLICY,
129			     t | AR934X_DMA_MBOX_DMA_POLICY_RX_QUANTUM |
130			     (6 << AR934X_DMA_MBOX_DMA_POLICY_TX_FIFO_THRESH_SHIFT));
131
132		/* The direction is indicated from the DMA engine perspective
133		 * i.e. we'll be using the RX registers for Playback and
134		 * the TX registers for capture */
135		desc = list_first_entry(&rtpriv->dma_head, struct ath79_pcm_desc, list);
136		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_DESCRIPTOR_BASE,
137				(u32) desc->phys);
138		ath79_mbox_interrupt_enable(AR934X_DMA_MBOX0_INT_RX_COMPLETE);
139	} else {
140		/* Request the DMA channel to the controller */
141		t = ath79_dma_rr(AR934X_DMA_REG_MBOX_DMA_POLICY);
142		ath79_dma_wr(AR934X_DMA_REG_MBOX_DMA_POLICY,
143			     t | AR934X_DMA_MBOX_DMA_POLICY_TX_QUANTUM |
144			     (6 << AR934X_DMA_MBOX_DMA_POLICY_TX_FIFO_THRESH_SHIFT));
145
146		desc = list_first_entry(&rtpriv->dma_head, struct ath79_pcm_desc, list);
147		ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_DESCRIPTOR_BASE,
148				(u32) desc->phys);
149		ath79_mbox_interrupt_enable(AR934X_DMA_MBOX0_INT_TX_COMPLETE);
150
151	}
152}
153
154int ath79_mbox_dma_map(struct ath79_pcm_rt_priv *rtpriv, dma_addr_t baseaddr,
155			      int period_bytes,int bufsize)
156{
157	struct list_head *head = &rtpriv->dma_head;
158	struct ath79_pcm_desc *desc, *prev;
159	dma_addr_t desc_p;
160	unsigned int offset = 0;
161
162	spin_lock(&ath79_pcm_lock);
163
164	rtpriv->elapsed_size = 0;
165	/* We loop until we have enough buffers to map the requested DMA area */
166	do {
167		/* Allocate a descriptor and insert it into the DMA ring */
168		desc = dma_pool_alloc(ath79_pcm_cache, GFP_KERNEL, &desc_p);
169		if(!desc) {
170			return -ENOMEM;
171		}
172		memset(desc, 0, sizeof(struct ath79_pcm_desc));
173		desc->phys = desc_p;
174		list_add_tail(&desc->list, head);
175
176		desc->OWN = 1;
177		desc->rsvd1 = desc->rsvd2 = desc->rsvd3 = desc->EOM = 0;
178
179		/* buffer size may not be a multiple of period_bytes */
180		if (bufsize >= offset + period_bytes) {
181			desc->size = period_bytes;
182		} else {
183			desc->size = bufsize - offset;
184		}
185		desc->BufPtr = baseaddr + offset;
186
187		/* For now, we assume the buffer is always full
188		 * -->length == size */
189		desc->length = desc->size;
190
191		/* We need to make sure we are not the first descriptor.
192		 * If we are, prev doesn't point to a struct ath79_pcm_desc */
193		if (desc->list.prev != head) {
194			prev =
195			    list_entry(desc->list.prev, struct ath79_pcm_desc,
196				       list);
197			prev->NextPtr = desc->phys;
198		}
199
200		offset += desc->size;
201	} while (offset < bufsize);
202
203	/* Once all the descriptors have been created, we can close the loop
204	 * by pointing from the last one to the first one */
205	desc = list_first_entry(head, struct ath79_pcm_desc, list);
206	prev = list_entry(head->prev, struct ath79_pcm_desc, list);
207	prev->NextPtr = desc->phys;
208
209	spin_unlock(&ath79_pcm_lock);
210
211	return 0;
212}
213
214void ath79_mbox_dma_unmap(struct ath79_pcm_rt_priv *rtpriv)
215{
216	struct list_head *head = &rtpriv->dma_head;
217	struct ath79_pcm_desc *desc, *n;
218
219	spin_lock(&ath79_pcm_lock);
220	list_for_each_entry_safe(desc, n, head, list) {
221		list_del(&desc->list);
222		dma_pool_free(ath79_pcm_cache, desc, desc->phys);
223	}
224	spin_unlock(&ath79_pcm_lock);
225
226	return;
227}
228
229int ath79_mbox_dma_init(struct device *dev)
230{
231	int ret = 0;
232
233	/* Allocate a DMA pool to store the MBOX descriptor */
234	ath79_pcm_cache = dma_pool_create("ath79_pcm_pool", dev,
235					 sizeof(struct ath79_pcm_desc), 4, 0);
236	if (!ath79_pcm_cache)
237		ret = -ENOMEM;
238
239	return ret;
240}
241
242void ath79_mbox_dma_exit(void)
243{
244	dma_pool_destroy(ath79_pcm_cache);
245	ath79_pcm_cache = NULL;
246}
247