1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include <linux/soc/qcom/smem_state.h>
27#include "wcn36xx.h"
28#include "txrx.h"
29
30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31{
32	wcn36xx_dbg(WCN36XX_DBG_DXE,
33		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34		    addr, data);
35
36	writel(data, wcn->ccu_base + addr);
37}
38
39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40{
41	wcn36xx_dbg(WCN36XX_DBG_DXE,
42		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43		    addr, data);
44
45	writel(data, wcn->dxe_base + addr);
46}
47
48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49{
50	*data = readl(wcn->dxe_base + addr);
51
52	wcn36xx_dbg(WCN36XX_DBG_DXE,
53		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54		    addr, *data);
55}
56
57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58{
59	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60	int i;
61
62	for (i = 0; i < ch->desc_num && ctl; i++) {
63		next = ctl->next;
64		kfree(ctl);
65		ctl = next;
66	}
67}
68
69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70{
71	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73	int i;
74
75	spin_lock_init(&ch->lock);
76	for (i = 0; i < ch->desc_num; i++) {
77		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78		if (!cur_ctl)
79			goto out_fail;
80
81		cur_ctl->ctl_blk_order = i;
82		if (i == 0) {
83			ch->head_blk_ctl = cur_ctl;
84			ch->tail_blk_ctl = cur_ctl;
85		} else if (ch->desc_num - 1 == i) {
86			prev_ctl->next = cur_ctl;
87			cur_ctl->next = ch->head_blk_ctl;
88		} else {
89			prev_ctl->next = cur_ctl;
90		}
91		prev_ctl = cur_ctl;
92	}
93
94	return 0;
95
96out_fail:
97	wcn36xx_dxe_free_ctl_block(ch);
98	return -ENOMEM;
99}
100
101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102{
103	int ret;
104
105	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109
110	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114
115	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L(wcn);
116	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H(wcn);
117
118	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120
121	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123
124	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126
127	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129
130	/* DXE control block allocation */
131	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132	if (ret)
133		goto out_err;
134	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135	if (ret)
136		goto out_err;
137	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138	if (ret)
139		goto out_err;
140	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141	if (ret)
142		goto out_err;
143
144	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
145	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146					  WCN36XX_SMSM_WLAN_TX_ENABLE |
147					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149	if (ret)
150		goto out_err;
151
152	return 0;
153
154out_err:
155	wcn36xx_err("Failed to allocate DXE control blocks\n");
156	wcn36xx_dxe_free_ctl_blks(wcn);
157	return -ENOMEM;
158}
159
160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161{
162	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166}
167
168static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
169{
170	struct device *dev = wcn->dev;
171	struct wcn36xx_dxe_desc *cur_dxe = NULL;
172	struct wcn36xx_dxe_desc *prev_dxe = NULL;
173	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
174	size_t size;
175	int i;
176
177	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
178	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
179					      GFP_KERNEL);
180	if (!wcn_ch->cpu_addr)
181		return -ENOMEM;
182
183	cur_dxe = wcn_ch->cpu_addr;
184	cur_ctl = wcn_ch->head_blk_ctl;
185
186	for (i = 0; i < wcn_ch->desc_num; i++) {
187		cur_ctl->desc = cur_dxe;
188		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
189			i * sizeof(struct wcn36xx_dxe_desc);
190
191		switch (wcn_ch->ch_type) {
192		case WCN36XX_DXE_CH_TX_L:
193			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
194			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L(wcn);
195			break;
196		case WCN36XX_DXE_CH_TX_H:
197			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
198			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H(wcn);
199			break;
200		case WCN36XX_DXE_CH_RX_L:
201			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
202			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
203			break;
204		case WCN36XX_DXE_CH_RX_H:
205			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
206			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
207			break;
208		}
209		if (0 == i) {
210			cur_dxe->phy_next_l = 0;
211		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
212			prev_dxe->phy_next_l =
213				cur_ctl->desc_phy_addr;
214		} else if (i == (wcn_ch->desc_num - 1)) {
215			prev_dxe->phy_next_l =
216				cur_ctl->desc_phy_addr;
217			cur_dxe->phy_next_l =
218				wcn_ch->head_blk_ctl->desc_phy_addr;
219		}
220		cur_ctl = cur_ctl->next;
221		prev_dxe = cur_dxe;
222		cur_dxe++;
223	}
224
225	return 0;
226}
227
228static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
229{
230	size_t size;
231
232	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
233	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
234}
235
236static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
237				   struct wcn36xx_dxe_mem_pool *pool)
238{
239	int i, chunk_size = pool->chunk_size;
240	dma_addr_t bd_phy_addr = pool->phy_addr;
241	void *bd_cpu_addr = pool->virt_addr;
242	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
243
244	for (i = 0; i < ch->desc_num; i++) {
245		/* Only every second dxe needs a bd pointer,
246		   the other will point to the skb data */
247		if (!(i & 1)) {
248			cur->bd_phy_addr = bd_phy_addr;
249			cur->bd_cpu_addr = bd_cpu_addr;
250			bd_phy_addr += chunk_size;
251			bd_cpu_addr += chunk_size;
252		} else {
253			cur->bd_phy_addr = 0;
254			cur->bd_cpu_addr = NULL;
255		}
256		cur = cur->next;
257	}
258}
259
260static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
261{
262	int reg_data = 0;
263
264	wcn36xx_dxe_read_register(wcn,
265				  WCN36XX_DXE_INT_MASK_REG,
266				  &reg_data);
267
268	reg_data |= wcn_ch;
269
270	wcn36xx_dxe_write_register(wcn,
271				   WCN36XX_DXE_INT_MASK_REG,
272				   (int)reg_data);
273	return 0;
274}
275
276static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
277{
278	int reg_data = 0;
279
280	wcn36xx_dxe_read_register(wcn,
281				  WCN36XX_DXE_INT_MASK_REG,
282				  &reg_data);
283
284	reg_data &= ~wcn_ch;
285
286	wcn36xx_dxe_write_register(wcn,
287				   WCN36XX_DXE_INT_MASK_REG,
288				   (int)reg_data);
289}
290
291static int wcn36xx_dxe_fill_skb(struct device *dev,
292				struct wcn36xx_dxe_ctl *ctl,
293				gfp_t gfp)
294{
295	struct wcn36xx_dxe_desc *dxe = ctl->desc;
296	struct sk_buff *skb;
297
298	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
299	if (skb == NULL)
300		return -ENOMEM;
301
302	dxe->dst_addr_l = dma_map_single(dev,
303					 skb_tail_pointer(skb),
304					 WCN36XX_PKT_SIZE,
305					 DMA_FROM_DEVICE);
306	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
307		dev_err(dev, "unable to map skb\n");
308		kfree_skb(skb);
309		return -ENOMEM;
310	}
311	ctl->skb = skb;
312
313	return 0;
314}
315
316static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
317				    struct wcn36xx_dxe_ch *wcn_ch)
318{
319	int i;
320	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
321
322	cur_ctl = wcn_ch->head_blk_ctl;
323
324	for (i = 0; i < wcn_ch->desc_num; i++) {
325		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
326		cur_ctl = cur_ctl->next;
327	}
328
329	return 0;
330}
331
332static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
333				     struct wcn36xx_dxe_ch *wcn_ch)
334{
335	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
336	int i;
337
338	for (i = 0; i < wcn_ch->desc_num; i++) {
339		kfree_skb(cur->skb);
340		cur = cur->next;
341	}
342}
343
344void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
345{
346	struct ieee80211_tx_info *info;
347	struct sk_buff *skb;
348	unsigned long flags;
349
350	spin_lock_irqsave(&wcn->dxe_lock, flags);
351	skb = wcn->tx_ack_skb;
352	wcn->tx_ack_skb = NULL;
353	del_timer(&wcn->tx_ack_timer);
354	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
355
356	if (!skb) {
357		wcn36xx_warn("Spurious TX complete indication\n");
358		return;
359	}
360
361	info = IEEE80211_SKB_CB(skb);
362
363	if (status == 1)
364		info->flags |= IEEE80211_TX_STAT_ACK;
365	else
366		info->flags &= ~IEEE80211_TX_STAT_ACK;
367
368	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
369
370	ieee80211_tx_status_irqsafe(wcn->hw, skb);
371	ieee80211_wake_queues(wcn->hw);
372}
373
374static void wcn36xx_dxe_tx_timer(struct timer_list *t)
375{
376	struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
377	struct ieee80211_tx_info *info;
378	unsigned long flags;
379	struct sk_buff *skb;
380
381	/* TX Timeout */
382	wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
383
384	spin_lock_irqsave(&wcn->dxe_lock, flags);
385	skb = wcn->tx_ack_skb;
386	wcn->tx_ack_skb = NULL;
387	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
388
389	if (!skb)
390		return;
391
392	info = IEEE80211_SKB_CB(skb);
393	info->flags &= ~IEEE80211_TX_STAT_ACK;
394	info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
395
396	ieee80211_tx_status_irqsafe(wcn->hw, skb);
397	ieee80211_wake_queues(wcn->hw);
398}
399
400static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
401{
402	struct wcn36xx_dxe_ctl *ctl;
403	struct ieee80211_tx_info *info;
404	unsigned long flags;
405
406	/*
407	 * Make at least one loop of do-while because in case ring is
408	 * completely full head and tail are pointing to the same element
409	 * and while-do will not make any cycles.
410	 */
411	spin_lock_irqsave(&ch->lock, flags);
412	ctl = ch->tail_blk_ctl;
413	do {
414		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
415			break;
416
417		if (ctl->skb &&
418		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
419			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
420					 ctl->skb->len, DMA_TO_DEVICE);
421			info = IEEE80211_SKB_CB(ctl->skb);
422			if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
423				if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
424					info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
425					ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
426				} else {
427					/* Wait for the TX ack indication or timeout... */
428					spin_lock(&wcn->dxe_lock);
429					if (WARN_ON(wcn->tx_ack_skb))
430						ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
431					wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
432					mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
433					spin_unlock(&wcn->dxe_lock);
434				}
435				/* do not free, ownership transferred to mac80211 status cb */
436			} else {
437				ieee80211_free_txskb(wcn->hw, ctl->skb);
438			}
439
440			if (wcn->queues_stopped) {
441				wcn->queues_stopped = false;
442				ieee80211_wake_queues(wcn->hw);
443			}
444
445			ctl->skb = NULL;
446		}
447		ctl = ctl->next;
448	} while (ctl != ch->head_blk_ctl);
449
450	ch->tail_blk_ctl = ctl;
451	spin_unlock_irqrestore(&ch->lock, flags);
452}
453
454static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
455{
456	struct wcn36xx *wcn = dev;
457	int int_src, int_reason;
458
459	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
460
461	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
462		wcn36xx_dxe_read_register(wcn,
463					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
464					  &int_reason);
465
466		wcn36xx_dxe_write_register(wcn,
467					   WCN36XX_DXE_0_INT_CLR,
468					   WCN36XX_INT_MASK_CHAN_TX_H);
469
470		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
471			wcn36xx_dxe_write_register(wcn,
472						   WCN36XX_DXE_0_INT_ERR_CLR,
473						   WCN36XX_INT_MASK_CHAN_TX_H);
474
475			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
476					int_src);
477		}
478
479		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
480			wcn36xx_dxe_write_register(wcn,
481						   WCN36XX_DXE_0_INT_DONE_CLR,
482						   WCN36XX_INT_MASK_CHAN_TX_H);
483		}
484
485		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
486			wcn36xx_dxe_write_register(wcn,
487						   WCN36XX_DXE_0_INT_ED_CLR,
488						   WCN36XX_INT_MASK_CHAN_TX_H);
489		}
490
491		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
492			    int_reason);
493
494		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
495				  WCN36XX_CH_STAT_INT_ED_MASK)) {
496			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
497		}
498	}
499
500	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
501		wcn36xx_dxe_read_register(wcn,
502					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
503					  &int_reason);
504
505		wcn36xx_dxe_write_register(wcn,
506					   WCN36XX_DXE_0_INT_CLR,
507					   WCN36XX_INT_MASK_CHAN_TX_L);
508
509		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
510			wcn36xx_dxe_write_register(wcn,
511						   WCN36XX_DXE_0_INT_ERR_CLR,
512						   WCN36XX_INT_MASK_CHAN_TX_L);
513
514			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
515					int_src);
516		}
517
518		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
519			wcn36xx_dxe_write_register(wcn,
520						   WCN36XX_DXE_0_INT_DONE_CLR,
521						   WCN36XX_INT_MASK_CHAN_TX_L);
522		}
523
524		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
525			wcn36xx_dxe_write_register(wcn,
526						   WCN36XX_DXE_0_INT_ED_CLR,
527						   WCN36XX_INT_MASK_CHAN_TX_L);
528		}
529
530		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
531			    int_reason);
532
533		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
534				  WCN36XX_CH_STAT_INT_ED_MASK)) {
535			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
536		}
537	}
538
539	return IRQ_HANDLED;
540}
541
542static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
543{
544	struct wcn36xx *wcn = dev;
545
546	wcn36xx_dxe_rx_frame(wcn);
547
548	return IRQ_HANDLED;
549}
550
551static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
552{
553	int ret;
554
555	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
556			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
557	if (ret) {
558		wcn36xx_err("failed to alloc tx irq\n");
559		goto out_err;
560	}
561
562	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
563			  "wcn36xx_rx", wcn);
564	if (ret) {
565		wcn36xx_err("failed to alloc rx irq\n");
566		goto out_txirq;
567	}
568
569	enable_irq_wake(wcn->rx_irq);
570
571	return 0;
572
573out_txirq:
574	free_irq(wcn->tx_irq, wcn);
575out_err:
576	return ret;
577
578}
579
580static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
581				     struct wcn36xx_dxe_ch *ch,
582				     u32 ctrl,
583				     u32 en_mask,
584				     u32 int_mask,
585				     u32 status_reg)
586{
587	struct wcn36xx_dxe_desc *dxe;
588	struct wcn36xx_dxe_ctl *ctl;
589	dma_addr_t  dma_addr;
590	struct sk_buff *skb;
591	u32 int_reason;
592	int ret;
593
594	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
595	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
596
597	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
598		wcn36xx_dxe_write_register(wcn,
599					   WCN36XX_DXE_0_INT_ERR_CLR,
600					   int_mask);
601
602		wcn36xx_err("DXE IRQ reported error on RX channel\n");
603	}
604
605	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
606		wcn36xx_dxe_write_register(wcn,
607					   WCN36XX_DXE_0_INT_DONE_CLR,
608					   int_mask);
609
610	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
611		wcn36xx_dxe_write_register(wcn,
612					   WCN36XX_DXE_0_INT_ED_CLR,
613					   int_mask);
614
615	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
616			    WCN36XX_CH_STAT_INT_ED_MASK)))
617		return 0;
618
619	spin_lock(&ch->lock);
620
621	ctl = ch->head_blk_ctl;
622	dxe = ctl->desc;
623
624	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
625		/* do not read until we own DMA descriptor */
626		dma_rmb();
627
628		/* read/modify DMA descriptor */
629		skb = ctl->skb;
630		dma_addr = dxe->dst_addr_l;
631		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
632		if (0 == ret) {
633			/* new skb allocation ok. Use the new one and queue
634			 * the old one to network system.
635			 */
636			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
637					DMA_FROM_DEVICE);
638			wcn36xx_rx_skb(wcn, skb);
639		}
640		/* else keep old skb not submitted and reuse it for rx DMA
641		 * (dropping the packet that it contained)
642		 */
643
644		/* flush descriptor changes before re-marking as valid */
645		dma_wmb();
646		dxe->ctrl = ctrl;
647
648		ctl = ctl->next;
649		dxe = ctl->desc;
650	}
651	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
652
653	ch->head_blk_ctl = ctl;
654
655	spin_unlock(&ch->lock);
656
657	return 0;
658}
659
660void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
661{
662	int int_src;
663
664	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
665
666	/* RX_LOW_PRI */
667	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
668		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
669					  WCN36XX_DXE_CTRL_RX_L,
670					  WCN36XX_DXE_INT_CH1_MASK,
671					  WCN36XX_INT_MASK_CHAN_RX_L,
672					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
673
674	/* RX_HIGH_PRI */
675	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
676		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
677					  WCN36XX_DXE_CTRL_RX_H,
678					  WCN36XX_DXE_INT_CH3_MASK,
679					  WCN36XX_INT_MASK_CHAN_RX_H,
680					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
681
682	if (!int_src)
683		wcn36xx_warn("No DXE interrupt pending\n");
684}
685
686int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
687{
688	size_t s;
689	void *cpu_addr;
690
691	/* Allocate BD headers for MGMT frames */
692
693	/* Where this come from ask QC */
694	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
695		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
696
697	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
698	cpu_addr = dma_alloc_coherent(wcn->dev, s,
699				      &wcn->mgmt_mem_pool.phy_addr,
700				      GFP_KERNEL);
701	if (!cpu_addr)
702		goto out_err;
703
704	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
705
706	/* Allocate BD headers for DATA frames */
707
708	/* Where this come from ask QC */
709	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
710		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
711
712	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
713	cpu_addr = dma_alloc_coherent(wcn->dev, s,
714				      &wcn->data_mem_pool.phy_addr,
715				      GFP_KERNEL);
716	if (!cpu_addr)
717		goto out_err;
718
719	wcn->data_mem_pool.virt_addr = cpu_addr;
720
721	return 0;
722
723out_err:
724	wcn36xx_dxe_free_mem_pools(wcn);
725	wcn36xx_err("Failed to allocate BD mempool\n");
726	return -ENOMEM;
727}
728
729void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
730{
731	if (wcn->mgmt_mem_pool.virt_addr)
732		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
733				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
734				  wcn->mgmt_mem_pool.virt_addr,
735				  wcn->mgmt_mem_pool.phy_addr);
736
737	if (wcn->data_mem_pool.virt_addr) {
738		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
739				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
740				  wcn->data_mem_pool.virt_addr,
741				  wcn->data_mem_pool.phy_addr);
742	}
743}
744
745int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
746			 struct wcn36xx_vif *vif_priv,
747			 struct wcn36xx_tx_bd *bd,
748			 struct sk_buff *skb,
749			 bool is_low)
750{
751	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
752	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
753	struct wcn36xx_dxe_ch *ch = NULL;
754	unsigned long flags;
755	int ret;
756
757	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
758
759	spin_lock_irqsave(&ch->lock, flags);
760	ctl_bd = ch->head_blk_ctl;
761	ctl_skb = ctl_bd->next;
762
763	/*
764	 * If skb is not null that means that we reached the tail of the ring
765	 * hence ring is full. Stop queues to let mac80211 back off until ring
766	 * has an empty slot again.
767	 */
768	if (NULL != ctl_skb->skb) {
769		ieee80211_stop_queues(wcn->hw);
770		wcn->queues_stopped = true;
771		spin_unlock_irqrestore(&ch->lock, flags);
772		return -EBUSY;
773	}
774
775	if (unlikely(ctl_skb->bd_cpu_addr)) {
776		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
777		ret = -EINVAL;
778		goto unlock;
779	}
780
781	desc_bd = ctl_bd->desc;
782	desc_skb = ctl_skb->desc;
783
784	ctl_bd->skb = NULL;
785
786	/* write buffer descriptor */
787	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
788
789	/* Set source address of the BD we send */
790	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
791	desc_bd->dst_addr_l = ch->dxe_wq;
792	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
793
794	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
795
796	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
797			 (char *)desc_bd, sizeof(*desc_bd));
798	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
799			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
800			 sizeof(struct wcn36xx_tx_bd));
801
802	desc_skb->src_addr_l = dma_map_single(wcn->dev,
803					      skb->data,
804					      skb->len,
805					      DMA_TO_DEVICE);
806	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
807		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
808		ret = -ENOMEM;
809		goto unlock;
810	}
811
812	ctl_skb->skb = skb;
813	desc_skb->dst_addr_l = ch->dxe_wq;
814	desc_skb->fr_len = ctl_skb->skb->len;
815
816	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
817			 (char *)desc_skb, sizeof(*desc_skb));
818	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
819			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
820
821	/* Move the head of the ring to the next empty descriptor */
822	ch->head_blk_ctl = ctl_skb->next;
823
824	/* Commit all previous writes and set descriptors to VALID */
825	wmb();
826	desc_skb->ctrl = ch->ctrl_skb;
827	wmb();
828	desc_bd->ctrl = ch->ctrl_bd;
829
830	/*
831	 * When connected and trying to send data frame chip can be in sleep
832	 * mode and writing to the register will not wake up the chip. Instead
833	 * notify chip about new frame through SMSM bus.
834	 */
835	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
836		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
837					    WCN36XX_SMSM_WLAN_TX_ENABLE,
838					    WCN36XX_SMSM_WLAN_TX_ENABLE);
839	} else {
840		/* indicate End Of Packet and generate interrupt on descriptor
841		 * done.
842		 */
843		wcn36xx_dxe_write_register(wcn,
844			ch->reg_ctrl, ch->def_ctrl);
845	}
846
847	ret = 0;
848unlock:
849	spin_unlock_irqrestore(&ch->lock, flags);
850	return ret;
851}
852
853static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
854{
855	unsigned long flags;
856	struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
857	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
858	bool ret = true;
859
860	spin_lock_irqsave(&ch->lock, flags);
861
862	/* Loop through ring buffer looking for nonempty entries. */
863	ctl_bd_start = ch->head_blk_ctl;
864	ctl_bd = ctl_bd_start;
865	ctl_skb_start = ctl_bd_start->next;
866	ctl_skb = ctl_skb_start;
867	do {
868		if (ctl_skb->skb) {
869			ret = false;
870			goto unlock;
871		}
872		ctl_bd = ctl_skb->next;
873		ctl_skb = ctl_bd->next;
874	} while (ctl_skb != ctl_skb_start);
875
876unlock:
877	spin_unlock_irqrestore(&ch->lock, flags);
878	return ret;
879}
880
881int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
882{
883	int i = 0;
884
885	/* Called with mac80211 queues stopped. Wait for empty HW queues. */
886	do {
887		if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
888		    _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
889			return 0;
890		}
891		/* This ieee80211_ops callback is specifically allowed to
892		 * sleep.
893		 */
894		usleep_range(1000, 1100);
895	} while (++i < 100);
896
897	return -EBUSY;
898}
899
900int wcn36xx_dxe_init(struct wcn36xx *wcn)
901{
902	int reg_data = 0, ret;
903
904	reg_data = WCN36XX_DXE_REG_RESET;
905	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
906
907	/* Select channels for rx avail and xfer done interrupts... */
908	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
909		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
910	if (wcn->is_pronto)
911		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
912	else
913		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
914
915	/***************************************/
916	/* Init descriptors for TX LOW channel */
917	/***************************************/
918	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_l_ch);
919	if (ret) {
920		dev_err(wcn->dev, "Error allocating descriptor\n");
921		return ret;
922	}
923	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
924
925	/* Write channel head to a NEXT register */
926	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
927		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
928
929	/* Program DMA destination addr for TX LOW */
930	wcn36xx_dxe_write_register(wcn,
931		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
932		WCN36XX_DXE_WQ_TX_L(wcn));
933
934	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
935
936	/***************************************/
937	/* Init descriptors for TX HIGH channel */
938	/***************************************/
939	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_h_ch);
940	if (ret) {
941		dev_err(wcn->dev, "Error allocating descriptor\n");
942		goto out_err_txh_ch;
943	}
944
945	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
946
947	/* Write channel head to a NEXT register */
948	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
949		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
950
951	/* Program DMA destination addr for TX HIGH */
952	wcn36xx_dxe_write_register(wcn,
953		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
954		WCN36XX_DXE_WQ_TX_H(wcn));
955
956	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
957
958	/***************************************/
959	/* Init descriptors for RX LOW channel */
960	/***************************************/
961	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_l_ch);
962	if (ret) {
963		dev_err(wcn->dev, "Error allocating descriptor\n");
964		goto out_err_rxl_ch;
965	}
966
967	/* For RX we need to preallocated buffers */
968	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
969
970	/* Write channel head to a NEXT register */
971	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
972		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
973
974	/* Write DMA source address */
975	wcn36xx_dxe_write_register(wcn,
976		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
977		WCN36XX_DXE_WQ_RX_L);
978
979	/* Program preallocated destination address */
980	wcn36xx_dxe_write_register(wcn,
981		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
982		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
983
984	/* Enable default control registers */
985	wcn36xx_dxe_write_register(wcn,
986		WCN36XX_DXE_REG_CTL_RX_L,
987		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
988
989	/***************************************/
990	/* Init descriptors for RX HIGH channel */
991	/***************************************/
992	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_h_ch);
993	if (ret) {
994		dev_err(wcn->dev, "Error allocating descriptor\n");
995		goto out_err_rxh_ch;
996	}
997
998	/* For RX we need to prealocat buffers */
999	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
1000
1001	/* Write chanel head to a NEXT register */
1002	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
1003		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
1004
1005	/* Write DMA source address */
1006	wcn36xx_dxe_write_register(wcn,
1007		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
1008		WCN36XX_DXE_WQ_RX_H);
1009
1010	/* Program preallocated destination address */
1011	wcn36xx_dxe_write_register(wcn,
1012		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
1013		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
1014
1015	/* Enable default control registers */
1016	wcn36xx_dxe_write_register(wcn,
1017		WCN36XX_DXE_REG_CTL_RX_H,
1018		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
1019
1020	ret = wcn36xx_dxe_request_irqs(wcn);
1021	if (ret < 0)
1022		goto out_err_irq;
1023
1024	timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
1025
1026	/* Enable channel interrupts */
1027	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1028	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1029	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1030	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1031
1032	return 0;
1033
1034out_err_irq:
1035	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1036out_err_rxh_ch:
1037	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1038out_err_rxl_ch:
1039	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1040out_err_txh_ch:
1041	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1042
1043	return ret;
1044}
1045
1046void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
1047{
1048	int reg_data = 0;
1049
1050	/* Disable channel interrupts */
1051	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1052	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1053	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1054	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1055
1056	free_irq(wcn->tx_irq, wcn);
1057	free_irq(wcn->rx_irq, wcn);
1058	del_timer(&wcn->tx_ack_timer);
1059
1060	if (wcn->tx_ack_skb) {
1061		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
1062		wcn->tx_ack_skb = NULL;
1063	}
1064
1065	/* Put the DXE block into reset before freeing memory */
1066	reg_data = WCN36XX_DXE_REG_RESET;
1067	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1068
1069	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
1070	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
1071
1072	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1073	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1074	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1075	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1076}
1077