1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 */
7
8#include "core.h"
9#include "htc.h"
10#include "htt.h"
11#include "txrx.h"
12#include "debug.h"
13#include "trace.h"
14#include "mac.h"
15
16#include <linux/log2.h>
17#include <linux/bitfield.h>
18
19/* when under memory pressure rx ring refill may fail and needs a retry */
20#define HTT_RX_RING_REFILL_RETRY_MS 50
21
22#define HTT_RX_RING_REFILL_RESCHED_MS 5
23
24/* shortcut to interpret a raw memory buffer as a rx descriptor */
25#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
26
27static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
28
29static struct sk_buff *
30ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
31{
32	struct ath10k_skb_rxcb *rxcb;
33
34	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
35		if (rxcb->paddr == paddr)
36			return ATH10K_RXCB_SKB(rxcb);
37
38	WARN_ON_ONCE(1);
39	return NULL;
40}
41
42static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
43{
44	struct sk_buff *skb;
45	struct ath10k_skb_rxcb *rxcb;
46	struct hlist_node *n;
47	int i;
48
49	if (htt->rx_ring.in_ord_rx) {
50		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
51			skb = ATH10K_RXCB_SKB(rxcb);
52			dma_unmap_single(htt->ar->dev, rxcb->paddr,
53					 skb->len + skb_tailroom(skb),
54					 DMA_FROM_DEVICE);
55			hash_del(&rxcb->hlist);
56			dev_kfree_skb_any(skb);
57		}
58	} else {
59		for (i = 0; i < htt->rx_ring.size; i++) {
60			skb = htt->rx_ring.netbufs_ring[i];
61			if (!skb)
62				continue;
63
64			rxcb = ATH10K_SKB_RXCB(skb);
65			dma_unmap_single(htt->ar->dev, rxcb->paddr,
66					 skb->len + skb_tailroom(skb),
67					 DMA_FROM_DEVICE);
68			dev_kfree_skb_any(skb);
69		}
70	}
71
72	htt->rx_ring.fill_cnt = 0;
73	hash_init(htt->rx_ring.skb_table);
74	memset(htt->rx_ring.netbufs_ring, 0,
75	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
76}
77
78static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
79{
80	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
81}
82
83static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
84{
85	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
86}
87
88static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
89					     void *vaddr)
90{
91	htt->rx_ring.paddrs_ring_32 = vaddr;
92}
93
94static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
95					     void *vaddr)
96{
97	htt->rx_ring.paddrs_ring_64 = vaddr;
98}
99
100static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
101					  dma_addr_t paddr, int idx)
102{
103	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
104}
105
106static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
107					  dma_addr_t paddr, int idx)
108{
109	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
110}
111
112static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
113{
114	htt->rx_ring.paddrs_ring_32[idx] = 0;
115}
116
117static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
118{
119	htt->rx_ring.paddrs_ring_64[idx] = 0;
120}
121
122static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
123{
124	return (void *)htt->rx_ring.paddrs_ring_32;
125}
126
127static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
128{
129	return (void *)htt->rx_ring.paddrs_ring_64;
130}
131
132static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
133{
134	struct ath10k_hw_params *hw = &htt->ar->hw_params;
135	struct htt_rx_desc *rx_desc;
136	struct ath10k_skb_rxcb *rxcb;
137	struct sk_buff *skb;
138	dma_addr_t paddr;
139	int ret = 0, idx;
140
141	/* The Full Rx Reorder firmware has no way of telling the host
142	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
143	 * To keep things simple make sure ring is always half empty. This
144	 * guarantees there'll be no replenishment overruns possible.
145	 */
146	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
147
148	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
149
150	if (idx < 0 || idx >= htt->rx_ring.size) {
151		ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
152		idx &= htt->rx_ring.size_mask;
153		ret = -ENOMEM;
154		goto fail;
155	}
156
157	while (num > 0) {
158		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
159		if (!skb) {
160			ret = -ENOMEM;
161			goto fail;
162		}
163
164		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
165			skb_pull(skb,
166				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
167				 skb->data);
168
169		/* Clear rx_desc attention word before posting to Rx ring */
170		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
171		ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
172
173		paddr = dma_map_single(htt->ar->dev, skb->data,
174				       skb->len + skb_tailroom(skb),
175				       DMA_FROM_DEVICE);
176
177		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
178			dev_kfree_skb_any(skb);
179			ret = -ENOMEM;
180			goto fail;
181		}
182
183		rxcb = ATH10K_SKB_RXCB(skb);
184		rxcb->paddr = paddr;
185		htt->rx_ring.netbufs_ring[idx] = skb;
186		ath10k_htt_set_paddrs_ring(htt, paddr, idx);
187		htt->rx_ring.fill_cnt++;
188
189		if (htt->rx_ring.in_ord_rx) {
190			hash_add(htt->rx_ring.skb_table,
191				 &ATH10K_SKB_RXCB(skb)->hlist,
192				 paddr);
193		}
194
195		num--;
196		idx++;
197		idx &= htt->rx_ring.size_mask;
198	}
199
200fail:
201	/*
202	 * Make sure the rx buffer is updated before available buffer
203	 * index to avoid any potential rx ring corruption.
204	 */
205	mb();
206	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
207	return ret;
208}
209
210static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
211{
212	lockdep_assert_held(&htt->rx_ring.lock);
213	return __ath10k_htt_rx_ring_fill_n(htt, num);
214}
215
216static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
217{
218	int ret, num_deficit, num_to_fill;
219
220	/* Refilling the whole RX ring buffer proves to be a bad idea. The
221	 * reason is RX may take up significant amount of CPU cycles and starve
222	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
223	 * with ath10k wlan interface. This ended up with very poor performance
224	 * once CPU the host system was overwhelmed with RX on ath10k.
225	 *
226	 * By limiting the number of refills the replenishing occurs
227	 * progressively. This in turns makes use of the fact tasklets are
228	 * processed in FIFO order. This means actual RX processing can starve
229	 * out refilling. If there's not enough buffers on RX ring FW will not
230	 * report RX until it is refilled with enough buffers. This
231	 * automatically balances load wrt to CPU power.
232	 *
233	 * This probably comes at a cost of lower maximum throughput but
234	 * improves the average and stability.
235	 */
236	spin_lock_bh(&htt->rx_ring.lock);
237	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
238	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
239	num_deficit -= num_to_fill;
240	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
241	if (ret == -ENOMEM) {
242		/*
243		 * Failed to fill it to the desired level -
244		 * we'll start a timer and try again next time.
245		 * As long as enough buffers are left in the ring for
246		 * another A-MPDU rx, no special recovery is needed.
247		 */
248		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
249			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
250	} else if (num_deficit > 0) {
251		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
252			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
253	}
254	spin_unlock_bh(&htt->rx_ring.lock);
255}
256
257static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
258{
259	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
260
261	ath10k_htt_rx_msdu_buff_replenish(htt);
262}
263
264int ath10k_htt_rx_ring_refill(struct ath10k *ar)
265{
266	struct ath10k_htt *htt = &ar->htt;
267	int ret;
268
269	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
270		return 0;
271
272	spin_lock_bh(&htt->rx_ring.lock);
273	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
274					      htt->rx_ring.fill_cnt));
275
276	if (ret)
277		ath10k_htt_rx_ring_free(htt);
278
279	spin_unlock_bh(&htt->rx_ring.lock);
280
281	return ret;
282}
283
284void ath10k_htt_rx_free(struct ath10k_htt *htt)
285{
286	if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
287		return;
288
289	del_timer_sync(&htt->rx_ring.refill_retry_timer);
290
291	skb_queue_purge(&htt->rx_msdus_q);
292	skb_queue_purge(&htt->rx_in_ord_compl_q);
293	skb_queue_purge(&htt->tx_fetch_ind_q);
294
295	spin_lock_bh(&htt->rx_ring.lock);
296	ath10k_htt_rx_ring_free(htt);
297	spin_unlock_bh(&htt->rx_ring.lock);
298
299	dma_free_coherent(htt->ar->dev,
300			  ath10k_htt_get_rx_ring_size(htt),
301			  ath10k_htt_get_vaddr_ring(htt),
302			  htt->rx_ring.base_paddr);
303
304	ath10k_htt_config_paddrs_ring(htt, NULL);
305
306	dma_free_coherent(htt->ar->dev,
307			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
308			  htt->rx_ring.alloc_idx.vaddr,
309			  htt->rx_ring.alloc_idx.paddr);
310	htt->rx_ring.alloc_idx.vaddr = NULL;
311
312	kfree(htt->rx_ring.netbufs_ring);
313	htt->rx_ring.netbufs_ring = NULL;
314}
315
316static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
317{
318	struct ath10k *ar = htt->ar;
319	int idx;
320	struct sk_buff *msdu;
321
322	lockdep_assert_held(&htt->rx_ring.lock);
323
324	if (htt->rx_ring.fill_cnt == 0) {
325		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
326		return NULL;
327	}
328
329	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
330	msdu = htt->rx_ring.netbufs_ring[idx];
331	htt->rx_ring.netbufs_ring[idx] = NULL;
332	ath10k_htt_reset_paddrs_ring(htt, idx);
333
334	idx++;
335	idx &= htt->rx_ring.size_mask;
336	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
337	htt->rx_ring.fill_cnt--;
338
339	dma_unmap_single(htt->ar->dev,
340			 ATH10K_SKB_RXCB(msdu)->paddr,
341			 msdu->len + skb_tailroom(msdu),
342			 DMA_FROM_DEVICE);
343	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
344			msdu->data, msdu->len + skb_tailroom(msdu));
345
346	return msdu;
347}
348
349/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
350static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
351				   struct sk_buff_head *amsdu)
352{
353	struct ath10k *ar = htt->ar;
354	struct ath10k_hw_params *hw = &ar->hw_params;
355	int msdu_len, msdu_chaining = 0;
356	struct sk_buff *msdu;
357	struct htt_rx_desc *rx_desc;
358	struct rx_attention *rx_desc_attention;
359	struct rx_frag_info_common *rx_desc_frag_info_common;
360	struct rx_msdu_start_common *rx_desc_msdu_start_common;
361	struct rx_msdu_end_common *rx_desc_msdu_end_common;
362
363	lockdep_assert_held(&htt->rx_ring.lock);
364
365	for (;;) {
366		int last_msdu, msdu_len_invalid, msdu_chained;
367
368		msdu = ath10k_htt_rx_netbuf_pop(htt);
369		if (!msdu) {
370			__skb_queue_purge(amsdu);
371			return -ENOENT;
372		}
373
374		__skb_queue_tail(amsdu, msdu);
375
376		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
377		rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
378		rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
379									      rx_desc);
380		rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
381		rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
382
383		/* FIXME: we must report msdu payload since this is what caller
384		 * expects now
385		 */
386		skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
387		skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
388
389		/*
390		 * Sanity check - confirm the HW is finished filling in the
391		 * rx data.
392		 * If the HW and SW are working correctly, then it's guaranteed
393		 * that the HW's MAC DMA is done before this point in the SW.
394		 * To prevent the case that we handle a stale Rx descriptor,
395		 * just assert for now until we have a way to recover.
396		 */
397		if (!(__le32_to_cpu(rx_desc_attention->flags)
398				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
399			__skb_queue_purge(amsdu);
400			return -EIO;
401		}
402
403		msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
404					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
405					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
406		msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
407			      RX_MSDU_START_INFO0_MSDU_LENGTH);
408		msdu_chained = rx_desc_frag_info_common->ring2_more_count;
409
410		if (msdu_len_invalid)
411			msdu_len = 0;
412
413		skb_trim(msdu, 0);
414		skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
415		msdu_len -= msdu->len;
416
417		/* Note: Chained buffers do not contain rx descriptor */
418		while (msdu_chained--) {
419			msdu = ath10k_htt_rx_netbuf_pop(htt);
420			if (!msdu) {
421				__skb_queue_purge(amsdu);
422				return -ENOENT;
423			}
424
425			__skb_queue_tail(amsdu, msdu);
426			skb_trim(msdu, 0);
427			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
428			msdu_len -= msdu->len;
429			msdu_chaining = 1;
430		}
431
432		last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
433				RX_MSDU_END_INFO0_LAST_MSDU;
434
435		/* FIXME: why are we skipping the first part of the rx_desc? */
436#if defined(__linux__)
437		trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
438#elif defined(__FreeBSD__)
439		trace_ath10k_htt_rx_desc(ar, (u8 *)rx_desc + sizeof(u32),
440#endif
441					 hw->rx_desc_ops->rx_desc_size - sizeof(u32));
442
443		if (last_msdu)
444			break;
445	}
446
447	if (skb_queue_empty(amsdu))
448		msdu_chaining = -1;
449
450	/*
451	 * Don't refill the ring yet.
452	 *
453	 * First, the elements popped here are still in use - it is not
454	 * safe to overwrite them until the matching call to
455	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
456	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
457	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
458	 * (something like 3 buffers). Consequently, we'll rely on the txrx
459	 * SW to tell us when it is done pulling all the PPDU's rx buffers
460	 * out of the rx ring, and then refill it just once.
461	 */
462
463	return msdu_chaining;
464}
465
466static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
467					       u64 paddr)
468{
469	struct ath10k *ar = htt->ar;
470	struct ath10k_skb_rxcb *rxcb;
471	struct sk_buff *msdu;
472
473	lockdep_assert_held(&htt->rx_ring.lock);
474
475	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
476	if (!msdu)
477		return NULL;
478
479	rxcb = ATH10K_SKB_RXCB(msdu);
480	hash_del(&rxcb->hlist);
481	htt->rx_ring.fill_cnt--;
482
483	dma_unmap_single(htt->ar->dev, rxcb->paddr,
484			 msdu->len + skb_tailroom(msdu),
485			 DMA_FROM_DEVICE);
486	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
487			msdu->data, msdu->len + skb_tailroom(msdu));
488
489	return msdu;
490}
491
492static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
493					       struct sk_buff *frag_list,
494					       unsigned int frag_len)
495{
496	skb_shinfo(skb_head)->frag_list = frag_list;
497	skb_head->data_len = frag_len;
498	skb_head->len += skb_head->data_len;
499}
500
501static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
502					     struct sk_buff *msdu,
503					     struct htt_rx_in_ord_msdu_desc **msdu_desc)
504{
505	struct ath10k *ar = htt->ar;
506	struct ath10k_hw_params *hw = &ar->hw_params;
507	u32 paddr;
508	struct sk_buff *frag_buf;
509	struct sk_buff *prev_frag_buf;
510	u8 last_frag;
511	struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
512	struct htt_rx_desc *rxd;
513	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
514
515	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
516	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
517
518	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
519	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
520	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
521	amsdu_len -= msdu->len;
522
523	last_frag = ind_desc->reserved;
524	if (last_frag) {
525		if (amsdu_len) {
526			ath10k_warn(ar, "invalid amsdu len %u, left %d",
527				    __le16_to_cpu(ind_desc->msdu_len),
528				    amsdu_len);
529		}
530		return 0;
531	}
532
533	ind_desc++;
534	paddr = __le32_to_cpu(ind_desc->msdu_paddr);
535	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
536	if (!frag_buf) {
537		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
538		return -ENOENT;
539	}
540
541	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
542	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
543
544	amsdu_len -= frag_buf->len;
545	prev_frag_buf = frag_buf;
546	last_frag = ind_desc->reserved;
547	while (!last_frag) {
548		ind_desc++;
549		paddr = __le32_to_cpu(ind_desc->msdu_paddr);
550		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
551		if (!frag_buf) {
552			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
553				    paddr);
554			prev_frag_buf->next = NULL;
555			return -ENOENT;
556		}
557
558		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
559		last_frag = ind_desc->reserved;
560		amsdu_len -= frag_buf->len;
561
562		prev_frag_buf->next = frag_buf;
563		prev_frag_buf = frag_buf;
564	}
565
566	if (amsdu_len) {
567		ath10k_warn(ar, "invalid amsdu len %u, left %d",
568			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
569	}
570
571	*msdu_desc = ind_desc;
572
573	prev_frag_buf->next = NULL;
574	return 0;
575}
576
577static int
578ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
579				  struct sk_buff *msdu,
580				  struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
581{
582	struct ath10k *ar = htt->ar;
583	struct ath10k_hw_params *hw = &ar->hw_params;
584	u64 paddr;
585	struct sk_buff *frag_buf;
586	struct sk_buff *prev_frag_buf;
587	u8 last_frag;
588	struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
589	struct htt_rx_desc *rxd;
590	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
591
592	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
593	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
594
595	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
596	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
597	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
598	amsdu_len -= msdu->len;
599
600	last_frag = ind_desc->reserved;
601	if (last_frag) {
602		if (amsdu_len) {
603			ath10k_warn(ar, "invalid amsdu len %u, left %d",
604				    __le16_to_cpu(ind_desc->msdu_len),
605				    amsdu_len);
606		}
607		return 0;
608	}
609
610	ind_desc++;
611	paddr = __le64_to_cpu(ind_desc->msdu_paddr);
612	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
613	if (!frag_buf) {
614#if defined(__linux__)
615		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
616#elif defined(__FreeBSD__)
617		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%jx", (uintmax_t)paddr);
618#endif
619		return -ENOENT;
620	}
621
622	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
623	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
624
625	amsdu_len -= frag_buf->len;
626	prev_frag_buf = frag_buf;
627	last_frag = ind_desc->reserved;
628	while (!last_frag) {
629		ind_desc++;
630		paddr = __le64_to_cpu(ind_desc->msdu_paddr);
631		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
632		if (!frag_buf) {
633#if defined(__linux__)
634			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
635				    paddr);
636#elif defined(__FreeBSD__)
637			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%jx",
638				    (uintmax_t)paddr);
639#endif
640			prev_frag_buf->next = NULL;
641			return -ENOENT;
642		}
643
644		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
645		last_frag = ind_desc->reserved;
646		amsdu_len -= frag_buf->len;
647
648		prev_frag_buf->next = frag_buf;
649		prev_frag_buf = frag_buf;
650	}
651
652	if (amsdu_len) {
653		ath10k_warn(ar, "invalid amsdu len %u, left %d",
654			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
655	}
656
657	*msdu_desc = ind_desc;
658
659	prev_frag_buf->next = NULL;
660	return 0;
661}
662
663static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
664					  struct htt_rx_in_ord_ind *ev,
665					  struct sk_buff_head *list)
666{
667	struct ath10k *ar = htt->ar;
668	struct ath10k_hw_params *hw = &ar->hw_params;
669	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
670	struct htt_rx_desc *rxd;
671	struct rx_attention *rxd_attention;
672	struct sk_buff *msdu;
673	int msdu_count, ret;
674	bool is_offload;
675	u32 paddr;
676
677	lockdep_assert_held(&htt->rx_ring.lock);
678
679	msdu_count = __le16_to_cpu(ev->msdu_count);
680	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
681
682	while (msdu_count--) {
683		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
684
685		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
686		if (!msdu) {
687			__skb_queue_purge(list);
688			return -ENOENT;
689		}
690
691		if (!is_offload && ar->monitor_arvif) {
692			ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
693								&msdu_desc);
694			if (ret) {
695				__skb_queue_purge(list);
696				return ret;
697			}
698			__skb_queue_tail(list, msdu);
699			msdu_desc++;
700			continue;
701		}
702
703		__skb_queue_tail(list, msdu);
704
705		if (!is_offload) {
706			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
707			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
708
709			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
710
711			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
712			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
713			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
714
715			if (!(__le32_to_cpu(rxd_attention->flags) &
716			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
717				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
718				return -EIO;
719			}
720		}
721
722		msdu_desc++;
723	}
724
725	return 0;
726}
727
728static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
729					  struct htt_rx_in_ord_ind *ev,
730					  struct sk_buff_head *list)
731{
732	struct ath10k *ar = htt->ar;
733	struct ath10k_hw_params *hw = &ar->hw_params;
734	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
735	struct htt_rx_desc *rxd;
736	struct rx_attention *rxd_attention;
737	struct sk_buff *msdu;
738	int msdu_count, ret;
739	bool is_offload;
740	u64 paddr;
741
742	lockdep_assert_held(&htt->rx_ring.lock);
743
744	msdu_count = __le16_to_cpu(ev->msdu_count);
745	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
746
747	while (msdu_count--) {
748		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
749		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
750		if (!msdu) {
751			__skb_queue_purge(list);
752			return -ENOENT;
753		}
754
755		if (!is_offload && ar->monitor_arvif) {
756			ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
757								&msdu_desc);
758			if (ret) {
759				__skb_queue_purge(list);
760				return ret;
761			}
762			__skb_queue_tail(list, msdu);
763			msdu_desc++;
764			continue;
765		}
766
767		__skb_queue_tail(list, msdu);
768
769		if (!is_offload) {
770			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
771			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
772
773			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
774
775			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
776			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
777			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
778
779			if (!(__le32_to_cpu(rxd_attention->flags) &
780			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
781				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
782				return -EIO;
783			}
784		}
785
786		msdu_desc++;
787	}
788
789	return 0;
790}
791
792int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
793{
794	struct ath10k *ar = htt->ar;
795	dma_addr_t paddr;
796	void *vaddr, *vaddr_ring;
797	size_t size;
798	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
799
800	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
801		return 0;
802
803	htt->rx_confused = false;
804
805	/* XXX: The fill level could be changed during runtime in response to
806	 * the host processing latency. Is this really worth it?
807	 */
808	htt->rx_ring.size = HTT_RX_RING_SIZE;
809	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
810	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
811
812	if (!is_power_of_2(htt->rx_ring.size)) {
813		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
814		return -EINVAL;
815	}
816
817	htt->rx_ring.netbufs_ring =
818		kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
819			GFP_KERNEL);
820	if (!htt->rx_ring.netbufs_ring)
821		goto err_netbuf;
822
823	size = ath10k_htt_get_rx_ring_size(htt);
824
825	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
826	if (!vaddr_ring)
827		goto err_dma_ring;
828
829	ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
830	htt->rx_ring.base_paddr = paddr;
831
832	vaddr = dma_alloc_coherent(htt->ar->dev,
833				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
834				   &paddr, GFP_KERNEL);
835	if (!vaddr)
836		goto err_dma_idx;
837
838	htt->rx_ring.alloc_idx.vaddr = vaddr;
839	htt->rx_ring.alloc_idx.paddr = paddr;
840	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
841	*htt->rx_ring.alloc_idx.vaddr = 0;
842
843	/* Initialize the Rx refill retry timer */
844	timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
845
846	spin_lock_init(&htt->rx_ring.lock);
847#if defined(__FreeBSD__)
848	spin_lock_init(&htt->tx_fetch_ind_q.lock);
849#endif
850
851	htt->rx_ring.fill_cnt = 0;
852	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
853	hash_init(htt->rx_ring.skb_table);
854
855	skb_queue_head_init(&htt->rx_msdus_q);
856	skb_queue_head_init(&htt->rx_in_ord_compl_q);
857	skb_queue_head_init(&htt->tx_fetch_ind_q);
858	atomic_set(&htt->num_mpdus_ready, 0);
859
860	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
861		   htt->rx_ring.size, htt->rx_ring.fill_level);
862	return 0;
863
864err_dma_idx:
865	dma_free_coherent(htt->ar->dev,
866			  ath10k_htt_get_rx_ring_size(htt),
867			  vaddr_ring,
868			  htt->rx_ring.base_paddr);
869	ath10k_htt_config_paddrs_ring(htt, NULL);
870err_dma_ring:
871	kfree(htt->rx_ring.netbufs_ring);
872	htt->rx_ring.netbufs_ring = NULL;
873err_netbuf:
874	return -ENOMEM;
875}
876
877static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
878					  enum htt_rx_mpdu_encrypt_type type)
879{
880	switch (type) {
881	case HTT_RX_MPDU_ENCRYPT_NONE:
882		return 0;
883	case HTT_RX_MPDU_ENCRYPT_WEP40:
884	case HTT_RX_MPDU_ENCRYPT_WEP104:
885		return IEEE80211_WEP_IV_LEN;
886	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
887	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
888		return IEEE80211_TKIP_IV_LEN;
889	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
890		return IEEE80211_CCMP_HDR_LEN;
891	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
892		return IEEE80211_CCMP_256_HDR_LEN;
893	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
894	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
895		return IEEE80211_GCMP_HDR_LEN;
896	case HTT_RX_MPDU_ENCRYPT_WEP128:
897	case HTT_RX_MPDU_ENCRYPT_WAPI:
898		break;
899	}
900
901	ath10k_warn(ar, "unsupported encryption type %d\n", type);
902	return 0;
903}
904
905#define MICHAEL_MIC_LEN 8
906
907static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
908					enum htt_rx_mpdu_encrypt_type type)
909{
910	switch (type) {
911	case HTT_RX_MPDU_ENCRYPT_NONE:
912	case HTT_RX_MPDU_ENCRYPT_WEP40:
913	case HTT_RX_MPDU_ENCRYPT_WEP104:
914	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
915	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
916		return 0;
917	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
918		return IEEE80211_CCMP_MIC_LEN;
919	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
920		return IEEE80211_CCMP_256_MIC_LEN;
921	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
922	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
923		return IEEE80211_GCMP_MIC_LEN;
924	case HTT_RX_MPDU_ENCRYPT_WEP128:
925	case HTT_RX_MPDU_ENCRYPT_WAPI:
926		break;
927	}
928
929	ath10k_warn(ar, "unsupported encryption type %d\n", type);
930	return 0;
931}
932
933static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
934					enum htt_rx_mpdu_encrypt_type type)
935{
936	switch (type) {
937	case HTT_RX_MPDU_ENCRYPT_NONE:
938	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
939	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
940	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
941	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
942		return 0;
943	case HTT_RX_MPDU_ENCRYPT_WEP40:
944	case HTT_RX_MPDU_ENCRYPT_WEP104:
945		return IEEE80211_WEP_ICV_LEN;
946	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
947	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
948		return IEEE80211_TKIP_ICV_LEN;
949	case HTT_RX_MPDU_ENCRYPT_WEP128:
950	case HTT_RX_MPDU_ENCRYPT_WAPI:
951		break;
952	}
953
954	ath10k_warn(ar, "unsupported encryption type %d\n", type);
955	return 0;
956}
957
958struct amsdu_subframe_hdr {
959	u8 dst[ETH_ALEN];
960	u8 src[ETH_ALEN];
961	__be16 len;
962} __packed;
963
964#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
965
966static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
967{
968	u8 ret = 0;
969
970	switch (bw) {
971	case 0:
972		ret = RATE_INFO_BW_20;
973		break;
974	case 1:
975		ret = RATE_INFO_BW_40;
976		break;
977	case 2:
978		ret = RATE_INFO_BW_80;
979		break;
980	case 3:
981		ret = RATE_INFO_BW_160;
982		break;
983	}
984
985	return ret;
986}
987
988static void ath10k_htt_rx_h_rates(struct ath10k *ar,
989				  struct ieee80211_rx_status *status,
990				  struct htt_rx_desc *rxd)
991{
992	struct ath10k_hw_params *hw = &ar->hw_params;
993	struct rx_attention *rxd_attention;
994	struct rx_mpdu_start *rxd_mpdu_start;
995	struct rx_mpdu_end *rxd_mpdu_end;
996	struct rx_msdu_start_common *rxd_msdu_start_common;
997	struct rx_msdu_end_common *rxd_msdu_end_common;
998	struct rx_ppdu_start *rxd_ppdu_start;
999	struct ieee80211_supported_band *sband;
1000	u8 cck, rate, bw, sgi, mcs, nss;
1001	u8 *rxd_msdu_payload;
1002	u8 preamble = 0;
1003	u8 group_id;
1004	u32 info1, info2, info3;
1005	u32 stbc, nsts_su;
1006
1007	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1008	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1009	rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
1010	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1011	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1012	rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1013	rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
1014
1015	info1 = __le32_to_cpu(rxd_ppdu_start->info1);
1016	info2 = __le32_to_cpu(rxd_ppdu_start->info2);
1017	info3 = __le32_to_cpu(rxd_ppdu_start->info3);
1018
1019	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
1020
1021	switch (preamble) {
1022	case HTT_RX_LEGACY:
1023		/* To get legacy rate index band is required. Since band can't
1024		 * be undefined check if freq is non-zero.
1025		 */
1026		if (!status->freq)
1027			return;
1028
1029		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
1030		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
1031		rate &= ~RX_PPDU_START_RATE_FLAG;
1032
1033		sband = &ar->mac.sbands[status->band];
1034		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
1035		break;
1036	case HTT_RX_HT:
1037	case HTT_RX_HT_WITH_TXBF:
1038		/* HT-SIG - Table 20-11 in info2 and info3 */
1039		mcs = info2 & 0x1F;
1040		nss = mcs >> 3;
1041		bw = (info2 >> 7) & 1;
1042		sgi = (info3 >> 7) & 1;
1043
1044		status->rate_idx = mcs;
1045		status->encoding = RX_ENC_HT;
1046		if (sgi)
1047			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1048		if (bw)
1049			status->bw = RATE_INFO_BW_40;
1050		break;
1051	case HTT_RX_VHT:
1052	case HTT_RX_VHT_WITH_TXBF:
1053		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1054		 * TODO check this
1055		 */
1056		bw = info2 & 3;
1057		sgi = info3 & 1;
1058		stbc = (info2 >> 3) & 1;
1059		group_id = (info2 >> 4) & 0x3F;
1060
1061		if (GROUP_ID_IS_SU_MIMO(group_id)) {
1062			mcs = (info3 >> 4) & 0x0F;
1063			nsts_su = ((info2 >> 10) & 0x07);
1064			if (stbc)
1065				nss = (nsts_su >> 2) + 1;
1066			else
1067				nss = (nsts_su + 1);
1068		} else {
1069			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1070			 * so it's impossible to decode MCS. Also since
1071			 * firmware consumes Group Id Management frames host
1072			 * has no knowledge regarding group/user position
1073			 * mapping so it's impossible to pick the correct Nsts
1074			 * from VHT-SIG-A1.
1075			 *
1076			 * Bandwidth and SGI are valid so report the rateinfo
1077			 * on best-effort basis.
1078			 */
1079			mcs = 0;
1080			nss = 1;
1081		}
1082
1083		if (mcs > 0x09) {
1084			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1085			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1086				    __le32_to_cpu(rxd_attention->flags),
1087				    __le32_to_cpu(rxd_mpdu_start->info0),
1088				    __le32_to_cpu(rxd_mpdu_start->info1),
1089				    __le32_to_cpu(rxd_msdu_start_common->info0),
1090				    __le32_to_cpu(rxd_msdu_start_common->info1),
1091				    rxd_ppdu_start->info0,
1092				    __le32_to_cpu(rxd_ppdu_start->info1),
1093				    __le32_to_cpu(rxd_ppdu_start->info2),
1094				    __le32_to_cpu(rxd_ppdu_start->info3),
1095				    __le32_to_cpu(rxd_ppdu_start->info4));
1096
1097			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1098				    __le32_to_cpu(rxd_msdu_end_common->info0),
1099				    __le32_to_cpu(rxd_mpdu_end->info0));
1100
1101			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1102					"rx desc msdu payload: ",
1103					rxd_msdu_payload, 50);
1104		}
1105
1106		status->rate_idx = mcs;
1107		status->nss = nss;
1108
1109		if (sgi)
1110			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1111
1112		status->bw = ath10k_bw_to_mac80211_bw(bw);
1113		status->encoding = RX_ENC_VHT;
1114		break;
1115	default:
1116		break;
1117	}
1118}
1119
1120static struct ieee80211_channel *
1121ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1122{
1123	struct ath10k_hw_params *hw = &ar->hw_params;
1124	struct rx_attention *rxd_attention;
1125	struct rx_msdu_end_common *rxd_msdu_end_common;
1126	struct rx_mpdu_start *rxd_mpdu_start;
1127	struct ath10k_peer *peer;
1128	struct ath10k_vif *arvif;
1129	struct cfg80211_chan_def def;
1130	u16 peer_id;
1131
1132	lockdep_assert_held(&ar->data_lock);
1133
1134	if (!rxd)
1135		return NULL;
1136
1137	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1138	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1139	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1140
1141	if (rxd_attention->flags &
1142	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1143		return NULL;
1144
1145	if (!(rxd_msdu_end_common->info0 &
1146	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1147		return NULL;
1148
1149	peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1150		     RX_MPDU_START_INFO0_PEER_IDX);
1151
1152	peer = ath10k_peer_find_by_id(ar, peer_id);
1153	if (!peer)
1154		return NULL;
1155
1156	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1157	if (WARN_ON_ONCE(!arvif))
1158		return NULL;
1159
1160	if (ath10k_mac_vif_chan(arvif->vif, &def))
1161		return NULL;
1162
1163	return def.chan;
1164}
1165
1166static struct ieee80211_channel *
1167ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1168{
1169	struct ath10k_vif *arvif;
1170	struct cfg80211_chan_def def;
1171
1172	lockdep_assert_held(&ar->data_lock);
1173
1174	list_for_each_entry(arvif, &ar->arvifs, list) {
1175		if (arvif->vdev_id == vdev_id &&
1176		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1177			return def.chan;
1178	}
1179
1180	return NULL;
1181}
1182
1183static void
1184ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1185			      struct ieee80211_chanctx_conf *conf,
1186			      void *data)
1187{
1188	struct cfg80211_chan_def *def = data;
1189
1190	*def = conf->def;
1191}
1192
1193static struct ieee80211_channel *
1194ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1195{
1196	struct cfg80211_chan_def def = {};
1197
1198	ieee80211_iter_chan_contexts_atomic(ar->hw,
1199					    ath10k_htt_rx_h_any_chan_iter,
1200					    &def);
1201
1202	return def.chan;
1203}
1204
1205static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1206				    struct ieee80211_rx_status *status,
1207				    struct htt_rx_desc *rxd,
1208				    u32 vdev_id)
1209{
1210	struct ieee80211_channel *ch;
1211
1212	spin_lock_bh(&ar->data_lock);
1213	ch = ar->scan_channel;
1214	if (!ch)
1215		ch = ar->rx_channel;
1216	if (!ch)
1217		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1218	if (!ch)
1219		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1220	if (!ch)
1221		ch = ath10k_htt_rx_h_any_channel(ar);
1222	if (!ch)
1223		ch = ar->tgt_oper_chan;
1224	spin_unlock_bh(&ar->data_lock);
1225
1226	if (!ch)
1227		return false;
1228
1229	status->band = ch->band;
1230	status->freq = ch->center_freq;
1231
1232	return true;
1233}
1234
1235static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1236				   struct ieee80211_rx_status *status,
1237				   struct htt_rx_desc *rxd)
1238{
1239	struct ath10k_hw_params *hw = &ar->hw_params;
1240	struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1241	int i;
1242
1243	for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1244		status->chains &= ~BIT(i);
1245
1246		if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
1247			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1248				rxd_ppdu_start->rssi_chains[i].pri20_mhz;
1249
1250			status->chains |= BIT(i);
1251		}
1252	}
1253
1254	/* FIXME: Get real NF */
1255	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1256			 rxd_ppdu_start->rssi_comb;
1257	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1258}
1259
1260static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1261				    struct ieee80211_rx_status *status,
1262				    struct htt_rx_desc *rxd)
1263{
1264	struct ath10k_hw_params *hw = &ar->hw_params;
1265	struct rx_ppdu_end_common *rxd_ppdu_end_common;
1266
1267	rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
1268
1269	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1270	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1271	 * TSF. Is it worth holding frames until end of PPDU is known?
1272	 *
1273	 * FIXME: Can we get/compute 64bit TSF?
1274	 */
1275	status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
1276	status->flag |= RX_FLAG_MACTIME_END;
1277}
1278
1279static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1280				 struct sk_buff_head *amsdu,
1281				 struct ieee80211_rx_status *status,
1282				 u32 vdev_id)
1283{
1284	struct sk_buff *first;
1285	struct ath10k_hw_params *hw = &ar->hw_params;
1286	struct htt_rx_desc *rxd;
1287	struct rx_attention *rxd_attention;
1288	bool is_first_ppdu;
1289	bool is_last_ppdu;
1290
1291	if (skb_queue_empty(amsdu))
1292		return;
1293
1294	first = skb_peek(amsdu);
1295	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1296#if defined(__linux__)
1297				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1298#elif defined(__FreeBSD__)
1299				    (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
1300#endif
1301
1302	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1303
1304	is_first_ppdu = !!(rxd_attention->flags &
1305			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1306	is_last_ppdu = !!(rxd_attention->flags &
1307			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1308
1309	if (is_first_ppdu) {
1310		/* New PPDU starts so clear out the old per-PPDU status. */
1311		status->freq = 0;
1312		status->rate_idx = 0;
1313		status->nss = 0;
1314		status->encoding = RX_ENC_LEGACY;
1315		status->bw = RATE_INFO_BW_20;
1316
1317		status->flag &= ~RX_FLAG_MACTIME_END;
1318		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1319
1320		status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1321		status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1322		status->ampdu_reference = ar->ampdu_reference;
1323
1324		ath10k_htt_rx_h_signal(ar, status, rxd);
1325		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1326		ath10k_htt_rx_h_rates(ar, status, rxd);
1327	}
1328
1329	if (is_last_ppdu) {
1330		ath10k_htt_rx_h_mactime(ar, status, rxd);
1331
1332		/* set ampdu last segment flag */
1333		status->flag |= RX_FLAG_AMPDU_IS_LAST;
1334		ar->ampdu_reference++;
1335	}
1336}
1337
1338static const char * const tid_to_ac[] = {
1339	"BE",
1340	"BK",
1341	"BK",
1342	"BE",
1343	"VI",
1344	"VI",
1345	"VO",
1346	"VO",
1347};
1348
1349static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1350{
1351	u8 *qc;
1352	int tid;
1353
1354	if (!ieee80211_is_data_qos(hdr->frame_control))
1355		return "";
1356
1357	qc = ieee80211_get_qos_ctl(hdr);
1358	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1359	if (tid < 8)
1360		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1361	else
1362		snprintf(out, size, "tid %d", tid);
1363
1364	return out;
1365}
1366
1367static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1368				       struct ieee80211_rx_status *rx_status,
1369				       struct sk_buff *skb)
1370{
1371	struct ieee80211_rx_status *status;
1372
1373	status = IEEE80211_SKB_RXCB(skb);
1374	*status = *rx_status;
1375
1376	skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1377}
1378
1379static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1380{
1381	struct ieee80211_rx_status *status;
1382	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1383	char tid[32];
1384
1385	status = IEEE80211_SKB_RXCB(skb);
1386
1387	if (!(ar->filter_flags & FIF_FCSFAIL) &&
1388	    status->flag & RX_FLAG_FAILED_FCS_CRC) {
1389		ar->stats.rx_crc_err_drop++;
1390		dev_kfree_skb_any(skb);
1391		return;
1392	}
1393
1394	ath10k_dbg(ar, ATH10K_DBG_DATA,
1395		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1396		   skb,
1397		   skb->len,
1398		   ieee80211_get_SA(hdr),
1399		   ath10k_get_tid(hdr, tid, sizeof(tid)),
1400		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1401							"mcast" : "ucast",
1402		   IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
1403		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1404		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1405		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1406		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1407		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1408		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1409		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1410		   status->rate_idx,
1411		   status->nss,
1412		   status->freq,
1413		   status->band, status->flag,
1414		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1415		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1416		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1417	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1418			skb->data, skb->len);
1419	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1420	trace_ath10k_rx_payload(ar, skb->data, skb->len);
1421
1422	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1423}
1424
1425static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1426				      struct ieee80211_hdr *hdr)
1427{
1428	int len = ieee80211_hdrlen(hdr->frame_control);
1429
1430	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1431		      ar->running_fw->fw_file.fw_features))
1432		len = round_up(len, 4);
1433
1434	return len;
1435}
1436
1437static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1438					struct sk_buff *msdu,
1439					struct ieee80211_rx_status *status,
1440					enum htt_rx_mpdu_encrypt_type enctype,
1441					bool is_decrypted,
1442					const u8 first_hdr[64])
1443{
1444	struct ieee80211_hdr *hdr;
1445	struct ath10k_hw_params *hw = &ar->hw_params;
1446	struct htt_rx_desc *rxd;
1447	struct rx_msdu_end_common *rxd_msdu_end_common;
1448	size_t hdr_len;
1449	size_t crypto_len;
1450	bool is_first;
1451	bool is_last;
1452	bool msdu_limit_err;
1453	int bytes_aligned = ar->hw_params.decap_align_bytes;
1454	u8 *qos;
1455
1456	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1457#if defined(__linux__)
1458				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1459#elif defined(__FreeBSD__)
1460				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1461#endif
1462
1463	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1464	is_first = !!(rxd_msdu_end_common->info0 &
1465		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1466	is_last = !!(rxd_msdu_end_common->info0 &
1467		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1468
1469	/* Delivered decapped frame:
1470	 * [802.11 header]
1471	 * [crypto param] <-- can be trimmed if !fcs_err &&
1472	 *                    !decrypt_err && !peer_idx_invalid
1473	 * [amsdu header] <-- only if A-MSDU
1474	 * [rfc1042/llc]
1475	 * [payload]
1476	 * [FCS] <-- at end, needs to be trimmed
1477	 */
1478
1479	/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1480	 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1481	 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1482	 * a single last MSDU with this msdu limit error set.
1483	 */
1484	msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
1485
1486	/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1487	 * without first MSDU is expected in that case, and handled later here.
1488	 */
1489	/* This probably shouldn't happen but warn just in case */
1490	if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1491		return;
1492
1493	/* This probably shouldn't happen but warn just in case */
1494	if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1495		return;
1496
1497	skb_trim(msdu, msdu->len - FCS_LEN);
1498
1499	/* Push original 80211 header */
1500	if (unlikely(msdu_limit_err)) {
1501#if defined(__linux__)
1502		hdr = (struct ieee80211_hdr *)first_hdr;
1503#elif defined(__FreeBSD__)
1504		hdr = __DECONST(struct ieee80211_hdr *, first_hdr);
1505#endif
1506		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1507		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1508
1509		if (ieee80211_is_data_qos(hdr->frame_control)) {
1510			qos = ieee80211_get_qos_ctl(hdr);
1511			qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1512		}
1513
1514		if (crypto_len)
1515			memcpy(skb_push(msdu, crypto_len),
1516#if defined(__linux__)
1517			       (void *)hdr + round_up(hdr_len, bytes_aligned),
1518#elif defined(__FreeBSD__)
1519			       (u8 *)hdr + round_up(hdr_len, bytes_aligned),
1520#endif
1521			       crypto_len);
1522
1523		memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1524	}
1525
1526	/* In most cases this will be true for sniffed frames. It makes sense
1527	 * to deliver them as-is without stripping the crypto param. This is
1528	 * necessary for software based decryption.
1529	 *
1530	 * If there's no error then the frame is decrypted. At least that is
1531	 * the case for frames that come in via fragmented rx indication.
1532	 */
1533	if (!is_decrypted)
1534		return;
1535
1536	/* The payload is decrypted so strip crypto params. Start from tail
1537	 * since hdr is used to compute some stuff.
1538	 */
1539
1540	hdr = (void *)msdu->data;
1541
1542	/* Tail */
1543	if (status->flag & RX_FLAG_IV_STRIPPED) {
1544		skb_trim(msdu, msdu->len -
1545			 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1546
1547		skb_trim(msdu, msdu->len -
1548			 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1549	} else {
1550		/* MIC */
1551		if (status->flag & RX_FLAG_MIC_STRIPPED)
1552			skb_trim(msdu, msdu->len -
1553				 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1554
1555		/* ICV */
1556		if (status->flag & RX_FLAG_ICV_STRIPPED)
1557			skb_trim(msdu, msdu->len -
1558				 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1559	}
1560
1561	/* MMIC */
1562	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1563	    !ieee80211_has_morefrags(hdr->frame_control) &&
1564	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1565		skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1566
1567	/* Head */
1568	if (status->flag & RX_FLAG_IV_STRIPPED) {
1569		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1570		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1571
1572#if defined(__linux__)
1573		memmove((void *)msdu->data + crypto_len,
1574#elif defined(__FreeBSD__)
1575		memmove((u8 *)msdu->data + crypto_len,
1576#endif
1577			(void *)msdu->data, hdr_len);
1578		skb_pull(msdu, crypto_len);
1579	}
1580}
1581
1582static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1583					  struct sk_buff *msdu,
1584					  struct ieee80211_rx_status *status,
1585					  const u8 first_hdr[64],
1586					  enum htt_rx_mpdu_encrypt_type enctype)
1587{
1588	struct ath10k_hw_params *hw = &ar->hw_params;
1589#if defined(__linux__)
1590	struct ieee80211_hdr *hdr;
1591#elif defined(__FreeBSD__)
1592	const struct ieee80211_hdr *hdr;
1593	struct ieee80211_hdr *hdr2;
1594#endif
1595	struct htt_rx_desc *rxd;
1596	size_t hdr_len;
1597	u8 da[ETH_ALEN];
1598	u8 sa[ETH_ALEN];
1599	int l3_pad_bytes;
1600	int bytes_aligned = ar->hw_params.decap_align_bytes;
1601
1602	/* Delivered decapped frame:
1603	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1604	 * [rfc1042/llc]
1605	 *
1606	 * Note: The nwifi header doesn't have QoS Control and is
1607	 * (always?) a 3addr frame.
1608	 *
1609	 * Note2: There's no A-MSDU subframe header. Even if it's part
1610	 * of an A-MSDU.
1611	 */
1612
1613	/* pull decapped header and copy SA & DA */
1614#if defined(__linux__)
1615	rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
1616#elif defined(__FreeBSD__)
1617	rxd = HTT_RX_BUF_TO_RX_DESC(hw, (u8 *)msdu->data -
1618#endif
1619				    hw->rx_desc_ops->rx_desc_size);
1620
1621	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1622	skb_put(msdu, l3_pad_bytes);
1623
1624#if defined(__linux__)
1625	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1626
1627	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1628	ether_addr_copy(da, ieee80211_get_DA(hdr));
1629	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1630#elif defined(__FreeBSD__)
1631	hdr2 = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1632
1633	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr2);
1634	ether_addr_copy(da, ieee80211_get_DA(hdr2));
1635	ether_addr_copy(sa, ieee80211_get_SA(hdr2));
1636#endif
1637	skb_pull(msdu, hdr_len);
1638
1639	/* push original 802.11 header */
1640#if defined(__linux__)
1641	hdr = (struct ieee80211_hdr *)first_hdr;
1642#elif defined(__FreeBSD__)
1643	hdr = (const struct ieee80211_hdr *)first_hdr;
1644#endif
1645	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1646
1647	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1648		memcpy(skb_push(msdu,
1649				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1650#if defined(__linux__)
1651		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1652#elif defined(__FreeBSD__)
1653		       (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1654#endif
1655			ath10k_htt_rx_crypto_param_len(ar, enctype));
1656	}
1657
1658	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1659
1660	/* original 802.11 header has a different DA and in
1661	 * case of 4addr it may also have different SA
1662	 */
1663#if defined(__linux__)
1664	hdr = (struct ieee80211_hdr *)msdu->data;
1665	ether_addr_copy(ieee80211_get_DA(hdr), da);
1666	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1667#elif defined(__FreeBSD__)
1668	hdr2 = (struct ieee80211_hdr *)msdu->data;
1669	ether_addr_copy(ieee80211_get_DA(hdr2), da);
1670	ether_addr_copy(ieee80211_get_SA(hdr2), sa);
1671#endif
1672}
1673
1674static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1675					  struct sk_buff *msdu,
1676					  enum htt_rx_mpdu_encrypt_type enctype)
1677{
1678	struct ieee80211_hdr *hdr;
1679	struct ath10k_hw_params *hw = &ar->hw_params;
1680	struct htt_rx_desc *rxd;
1681	struct rx_msdu_end_common *rxd_msdu_end_common;
1682	u8 *rxd_rx_hdr_status;
1683	size_t hdr_len, crypto_len;
1684#if defined(__linux__)
1685	void *rfc1042;
1686#elif defined(__FreeBSD__)
1687	u8 *rfc1042;
1688#endif
1689	bool is_first, is_last, is_amsdu;
1690	int bytes_aligned = ar->hw_params.decap_align_bytes;
1691
1692	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1693#if defined(__linux__)
1694				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1695#elif defined(__FreeBSD__)
1696				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1697#endif
1698
1699	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1700	rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1701	hdr = (void *)rxd_rx_hdr_status;
1702
1703	is_first = !!(rxd_msdu_end_common->info0 &
1704		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1705	is_last = !!(rxd_msdu_end_common->info0 &
1706		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1707	is_amsdu = !(is_first && is_last);
1708
1709#if defined(__linux__)
1710	rfc1042 = hdr;
1711#elif defined(__FreeBSD__)
1712	rfc1042 = (void *)hdr;
1713#endif
1714
1715	if (is_first) {
1716		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1717		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1718
1719		rfc1042 += round_up(hdr_len, bytes_aligned) +
1720			   round_up(crypto_len, bytes_aligned);
1721	}
1722
1723	if (is_amsdu)
1724		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1725
1726	return rfc1042;
1727}
1728
1729static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1730					struct sk_buff *msdu,
1731					struct ieee80211_rx_status *status,
1732					const u8 first_hdr[64],
1733					enum htt_rx_mpdu_encrypt_type enctype)
1734{
1735	struct ath10k_hw_params *hw = &ar->hw_params;
1736#if defined(__linux__)
1737	struct ieee80211_hdr *hdr;
1738#elif defined(__FreeBSD__)
1739	const struct ieee80211_hdr *hdr;
1740	struct ieee80211_hdr *hdr2;
1741#endif
1742	struct ethhdr *eth;
1743	size_t hdr_len;
1744	void *rfc1042;
1745	u8 da[ETH_ALEN];
1746	u8 sa[ETH_ALEN];
1747	int l3_pad_bytes;
1748	struct htt_rx_desc *rxd;
1749	int bytes_aligned = ar->hw_params.decap_align_bytes;
1750
1751	/* Delivered decapped frame:
1752	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1753	 * [payload]
1754	 */
1755
1756	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1757	if (WARN_ON_ONCE(!rfc1042))
1758		return;
1759
1760	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1761#if defined(__linux__)
1762				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1763#elif defined(__FreeBSD__)
1764				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1765#endif
1766
1767	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1768	skb_put(msdu, l3_pad_bytes);
1769	skb_pull(msdu, l3_pad_bytes);
1770
1771	/* pull decapped header and copy SA & DA */
1772	eth = (struct ethhdr *)msdu->data;
1773	ether_addr_copy(da, eth->h_dest);
1774	ether_addr_copy(sa, eth->h_source);
1775	skb_pull(msdu, sizeof(struct ethhdr));
1776
1777	/* push rfc1042/llc/snap */
1778	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1779	       sizeof(struct rfc1042_hdr));
1780
1781	/* push original 802.11 header */
1782#if defined(__linux__)
1783	hdr = (struct ieee80211_hdr *)first_hdr;
1784#elif defined(__FreeBSD__)
1785	hdr = (const struct ieee80211_hdr *)first_hdr;
1786#endif
1787	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1788
1789	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1790		memcpy(skb_push(msdu,
1791				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1792#if defined(__linux__)
1793		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1794#elif defined(__FreeBSD__)
1795		       (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1796#endif
1797			ath10k_htt_rx_crypto_param_len(ar, enctype));
1798	}
1799
1800	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1801
1802	/* original 802.11 header has a different DA and in
1803	 * case of 4addr it may also have different SA
1804	 */
1805#if defined(__linux__)
1806	hdr = (struct ieee80211_hdr *)msdu->data;
1807	ether_addr_copy(ieee80211_get_DA(hdr), da);
1808	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1809#elif defined(__FreeBSD__)
1810	hdr2 = (struct ieee80211_hdr *)msdu->data;
1811	ether_addr_copy(ieee80211_get_DA(hdr2), da);
1812	ether_addr_copy(ieee80211_get_SA(hdr2), sa);
1813#endif
1814}
1815
1816static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1817					 struct sk_buff *msdu,
1818					 struct ieee80211_rx_status *status,
1819					 const u8 first_hdr[64],
1820					 enum htt_rx_mpdu_encrypt_type enctype)
1821{
1822	struct ath10k_hw_params *hw = &ar->hw_params;
1823#if defined(__linux__)
1824	struct ieee80211_hdr *hdr;
1825#elif defined(__FreeBSD__)
1826	const struct ieee80211_hdr *hdr;
1827#endif
1828	size_t hdr_len;
1829	int l3_pad_bytes;
1830	struct htt_rx_desc *rxd;
1831	int bytes_aligned = ar->hw_params.decap_align_bytes;
1832
1833	/* Delivered decapped frame:
1834	 * [amsdu header] <-- replaced with 802.11 hdr
1835	 * [rfc1042/llc]
1836	 * [payload]
1837	 */
1838
1839	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1840#if defined(__linux__)
1841				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1842#elif defined(__FreeBSD__)
1843				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1844#endif
1845
1846	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1847
1848	skb_put(msdu, l3_pad_bytes);
1849	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1850
1851#if defined(__linux__)
1852	hdr = (struct ieee80211_hdr *)first_hdr;
1853#elif defined(__FreeBSD__)
1854	hdr = (const struct ieee80211_hdr *)first_hdr;
1855#endif
1856	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1857
1858	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1859		memcpy(skb_push(msdu,
1860				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1861#if defined(__linux__)
1862		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1863#elif defined(__FreeBSD__)
1864		       (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1865#endif
1866			ath10k_htt_rx_crypto_param_len(ar, enctype));
1867	}
1868
1869	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1870}
1871
1872static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1873				    struct sk_buff *msdu,
1874				    struct ieee80211_rx_status *status,
1875				    u8 first_hdr[64],
1876				    enum htt_rx_mpdu_encrypt_type enctype,
1877				    bool is_decrypted)
1878{
1879	struct ath10k_hw_params *hw = &ar->hw_params;
1880	struct htt_rx_desc *rxd;
1881	struct rx_msdu_start_common *rxd_msdu_start_common;
1882	enum rx_msdu_decap_format decap;
1883
1884	/* First msdu's decapped header:
1885	 * [802.11 header] <-- padded to 4 bytes long
1886	 * [crypto param] <-- padded to 4 bytes long
1887	 * [amsdu header] <-- only if A-MSDU
1888	 * [rfc1042/llc]
1889	 *
1890	 * Other (2nd, 3rd, ..) msdu's decapped header:
1891	 * [amsdu header] <-- only if A-MSDU
1892	 * [rfc1042/llc]
1893	 */
1894
1895	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1896#if defined(__linux__)
1897				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1898#elif defined(__FreeBSD__)
1899				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1900#endif
1901
1902	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1903	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
1904		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1905
1906	switch (decap) {
1907	case RX_MSDU_DECAP_RAW:
1908		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1909					    is_decrypted, first_hdr);
1910		break;
1911	case RX_MSDU_DECAP_NATIVE_WIFI:
1912		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1913					      enctype);
1914		break;
1915	case RX_MSDU_DECAP_ETHERNET2_DIX:
1916		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1917		break;
1918	case RX_MSDU_DECAP_8023_SNAP_LLC:
1919		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1920					     enctype);
1921		break;
1922	}
1923}
1924
1925static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
1926{
1927	struct htt_rx_desc *rxd;
1928	struct rx_attention *rxd_attention;
1929	struct rx_msdu_start_common *rxd_msdu_start_common;
1930	u32 flags, info;
1931	bool is_ip4, is_ip6;
1932	bool is_tcp, is_udp;
1933	bool ip_csum_ok, tcpudp_csum_ok;
1934
1935	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1936#if defined(__linux__)
1937				    (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
1938#elif defined(__FreeBSD__)
1939				    (u8 *)skb->data - hw->rx_desc_ops->rx_desc_size);
1940#endif
1941
1942	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1943	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1944	flags = __le32_to_cpu(rxd_attention->flags);
1945	info = __le32_to_cpu(rxd_msdu_start_common->info1);
1946
1947	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1948	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1949	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1950	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1951	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1952	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1953
1954	if (!is_ip4 && !is_ip6)
1955		return CHECKSUM_NONE;
1956	if (!is_tcp && !is_udp)
1957		return CHECKSUM_NONE;
1958	if (!ip_csum_ok)
1959		return CHECKSUM_NONE;
1960	if (!tcpudp_csum_ok)
1961		return CHECKSUM_NONE;
1962
1963	return CHECKSUM_UNNECESSARY;
1964}
1965
1966static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
1967					 struct sk_buff *msdu)
1968{
1969	msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
1970}
1971
1972static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1973				  enum htt_rx_mpdu_encrypt_type enctype)
1974{
1975	struct ieee80211_hdr *hdr;
1976	u64 pn = 0;
1977	u8 *ehdr;
1978
1979	hdr = (struct ieee80211_hdr *)skb->data;
1980	ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
1981
1982	if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1983		pn = ehdr[0];
1984		pn |= (u64)ehdr[1] << 8;
1985		pn |= (u64)ehdr[4] << 16;
1986		pn |= (u64)ehdr[5] << 24;
1987		pn |= (u64)ehdr[6] << 32;
1988		pn |= (u64)ehdr[7] << 40;
1989	}
1990	return pn;
1991}
1992
1993static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1994						 struct sk_buff *skb)
1995{
1996	struct ieee80211_hdr *hdr;
1997
1998	hdr = (struct ieee80211_hdr *)skb->data;
1999	return !is_multicast_ether_addr(hdr->addr1);
2000}
2001
2002static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
2003					  struct sk_buff *skb,
2004					  u16 peer_id,
2005					  enum htt_rx_mpdu_encrypt_type enctype)
2006{
2007	struct ath10k_peer *peer;
2008	union htt_rx_pn_t *last_pn, new_pn = {0};
2009	struct ieee80211_hdr *hdr;
2010	u8 tid, frag_number;
2011	u32 seq;
2012
2013	peer = ath10k_peer_find_by_id(ar, peer_id);
2014	if (!peer) {
2015		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
2016		return false;
2017	}
2018
2019	hdr = (struct ieee80211_hdr *)skb->data;
2020	if (ieee80211_is_data_qos(hdr->frame_control))
2021		tid = ieee80211_get_tid(hdr);
2022	else
2023		tid = ATH10K_TXRX_NON_QOS_TID;
2024
2025	last_pn = &peer->frag_tids_last_pn[tid];
2026	new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
2027	frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2028	seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
2029
2030	if (frag_number == 0) {
2031		last_pn->pn48 = new_pn.pn48;
2032		peer->frag_tids_seq[tid] = seq;
2033	} else {
2034		if (seq != peer->frag_tids_seq[tid])
2035			return false;
2036
2037		if (new_pn.pn48 != last_pn->pn48 + 1)
2038			return false;
2039
2040		last_pn->pn48 = new_pn.pn48;
2041	}
2042
2043	return true;
2044}
2045
2046static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
2047				 struct sk_buff_head *amsdu,
2048				 struct ieee80211_rx_status *status,
2049				 bool fill_crypt_header,
2050				 u8 *rx_hdr,
2051				 enum ath10k_pkt_rx_err *err,
2052				 u16 peer_id,
2053				 bool frag)
2054{
2055	struct sk_buff *first;
2056	struct sk_buff *last;
2057	struct sk_buff *msdu, *temp;
2058	struct ath10k_hw_params *hw = &ar->hw_params;
2059	struct htt_rx_desc *rxd;
2060	struct rx_attention *rxd_attention;
2061	struct rx_mpdu_start *rxd_mpdu_start;
2062
2063	struct ieee80211_hdr *hdr;
2064	enum htt_rx_mpdu_encrypt_type enctype;
2065	u8 first_hdr[64];
2066	u8 *qos;
2067	bool has_fcs_err;
2068	bool has_crypto_err;
2069	bool has_tkip_err;
2070	bool has_peer_idx_invalid;
2071	bool is_decrypted;
2072	bool is_mgmt;
2073	u32 attention;
2074	bool frag_pn_check = true, multicast_check = true;
2075
2076	if (skb_queue_empty(amsdu))
2077		return;
2078
2079	first = skb_peek(amsdu);
2080	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2081#if defined(__linux__)
2082				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2083#elif defined(__FreeBSD__)
2084				    (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2085#endif
2086
2087	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
2088	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2089
2090	is_mgmt = !!(rxd_attention->flags &
2091		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
2092
2093	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2094		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2095
2096	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
2097	 * decapped header. It'll be used for undecapping of each MSDU.
2098	 */
2099	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2100	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
2101
2102	if (rx_hdr)
2103		memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
2104
2105	/* Each A-MSDU subframe will use the original header as the base and be
2106	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
2107	 */
2108	hdr = (void *)first_hdr;
2109
2110	if (ieee80211_is_data_qos(hdr->frame_control)) {
2111		qos = ieee80211_get_qos_ctl(hdr);
2112		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2113	}
2114
2115	/* Some attention flags are valid only in the last MSDU. */
2116	last = skb_peek_tail(amsdu);
2117	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2118#if defined(__linux__)
2119				    (void *)last->data - hw->rx_desc_ops->rx_desc_size);
2120#elif defined(__FreeBSD__)
2121				    (u8 *)last->data - hw->rx_desc_ops->rx_desc_size);
2122#endif
2123
2124	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
2125	attention = __le32_to_cpu(rxd_attention->flags);
2126
2127	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
2128	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
2129	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
2130	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
2131
2132	/* Note: If hardware captures an encrypted frame that it can't decrypt,
2133	 * e.g. due to fcs error, missing peer or invalid key data it will
2134	 * report the frame as raw.
2135	 */
2136	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
2137			!has_fcs_err &&
2138			!has_crypto_err &&
2139			!has_peer_idx_invalid);
2140
2141	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2142	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2143			  RX_FLAG_MMIC_ERROR |
2144			  RX_FLAG_DECRYPTED |
2145			  RX_FLAG_IV_STRIPPED |
2146			  RX_FLAG_ONLY_MONITOR |
2147			  RX_FLAG_MMIC_STRIPPED);
2148
2149	if (has_fcs_err)
2150		status->flag |= RX_FLAG_FAILED_FCS_CRC;
2151
2152	if (has_tkip_err)
2153		status->flag |= RX_FLAG_MMIC_ERROR;
2154
2155	if (err) {
2156		if (has_fcs_err)
2157			*err = ATH10K_PKT_RX_ERR_FCS;
2158		else if (has_tkip_err)
2159			*err = ATH10K_PKT_RX_ERR_TKIP;
2160		else if (has_crypto_err)
2161			*err = ATH10K_PKT_RX_ERR_CRYPT;
2162		else if (has_peer_idx_invalid)
2163			*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
2164	}
2165
2166	/* Firmware reports all necessary management frames via WMI already.
2167	 * They are not reported to monitor interfaces at all so pass the ones
2168	 * coming via HTT to monitor interfaces instead. This simplifies
2169	 * matters a lot.
2170	 */
2171	if (is_mgmt)
2172		status->flag |= RX_FLAG_ONLY_MONITOR;
2173
2174	if (is_decrypted) {
2175		status->flag |= RX_FLAG_DECRYPTED;
2176
2177		if (likely(!is_mgmt))
2178			status->flag |= RX_FLAG_MMIC_STRIPPED;
2179
2180		if (fill_crypt_header)
2181			status->flag |= RX_FLAG_MIC_STRIPPED |
2182					RX_FLAG_ICV_STRIPPED;
2183		else
2184			status->flag |= RX_FLAG_IV_STRIPPED;
2185	}
2186
2187	skb_queue_walk(amsdu, msdu) {
2188		if (frag && !fill_crypt_header && is_decrypted &&
2189		    enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
2190			frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
2191								      msdu,
2192								      peer_id,
2193								      enctype);
2194
2195		if (frag)
2196			multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
2197									       msdu);
2198
2199		if (!frag_pn_check || !multicast_check) {
2200			/* Discard the fragment with invalid PN or multicast DA
2201			 */
2202			temp = msdu->prev;
2203			__skb_unlink(msdu, amsdu);
2204			dev_kfree_skb_any(msdu);
2205			msdu = temp;
2206			frag_pn_check = true;
2207			multicast_check = true;
2208			continue;
2209		}
2210
2211		ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
2212
2213		if (frag && !fill_crypt_header &&
2214		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2215			status->flag &= ~RX_FLAG_MMIC_STRIPPED;
2216
2217		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
2218					is_decrypted);
2219
2220		/* Undecapping involves copying the original 802.11 header back
2221		 * to sk_buff. If frame is protected and hardware has decrypted
2222		 * it then remove the protected bit.
2223		 */
2224		if (!is_decrypted)
2225			continue;
2226		if (is_mgmt)
2227			continue;
2228
2229		if (fill_crypt_header)
2230			continue;
2231
2232		hdr = (void *)msdu->data;
2233		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2234
2235		if (frag && !fill_crypt_header &&
2236		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2237			status->flag &= ~RX_FLAG_IV_STRIPPED &
2238					~RX_FLAG_MMIC_STRIPPED;
2239	}
2240}
2241
2242static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2243				    struct sk_buff_head *amsdu,
2244				    struct ieee80211_rx_status *status)
2245{
2246	struct sk_buff *msdu;
2247	struct sk_buff *first_subframe;
2248
2249	first_subframe = skb_peek(amsdu);
2250
2251	while ((msdu = __skb_dequeue(amsdu))) {
2252		/* Setup per-MSDU flags */
2253		if (skb_queue_empty(amsdu))
2254			status->flag &= ~RX_FLAG_AMSDU_MORE;
2255		else
2256			status->flag |= RX_FLAG_AMSDU_MORE;
2257
2258		if (msdu == first_subframe) {
2259			first_subframe = NULL;
2260			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2261		} else {
2262			status->flag |= RX_FLAG_ALLOW_SAME_PN;
2263		}
2264
2265		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2266	}
2267}
2268
2269static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2270			       unsigned long *unchain_cnt)
2271{
2272	struct sk_buff *skb, *first;
2273	int space;
2274	int total_len = 0;
2275	int amsdu_len = skb_queue_len(amsdu);
2276
2277	/* TODO:  Might could optimize this by using
2278	 * skb_try_coalesce or similar method to
2279	 * decrease copying, or maybe get mac80211 to
2280	 * provide a way to just receive a list of
2281	 * skb?
2282	 */
2283
2284	first = __skb_dequeue(amsdu);
2285
2286	/* Allocate total length all at once. */
2287	skb_queue_walk(amsdu, skb)
2288		total_len += skb->len;
2289
2290	space = total_len - skb_tailroom(first);
2291	if ((space > 0) &&
2292	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2293		/* TODO:  bump some rx-oom error stat */
2294		/* put it back together so we can free the
2295		 * whole list at once.
2296		 */
2297		__skb_queue_head(amsdu, first);
2298		return -1;
2299	}
2300
2301	/* Walk list again, copying contents into
2302	 * msdu_head
2303	 */
2304	while ((skb = __skb_dequeue(amsdu))) {
2305		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2306					  skb->len);
2307		dev_kfree_skb_any(skb);
2308	}
2309
2310	__skb_queue_head(amsdu, first);
2311
2312	*unchain_cnt += amsdu_len - 1;
2313
2314	return 0;
2315}
2316
2317static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2318				    struct sk_buff_head *amsdu,
2319				    unsigned long *drop_cnt,
2320				    unsigned long *unchain_cnt)
2321{
2322	struct sk_buff *first;
2323	struct ath10k_hw_params *hw = &ar->hw_params;
2324	struct htt_rx_desc *rxd;
2325	struct rx_msdu_start_common *rxd_msdu_start_common;
2326	struct rx_frag_info_common *rxd_frag_info;
2327	enum rx_msdu_decap_format decap;
2328
2329	first = skb_peek(amsdu);
2330	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2331#if defined(__linux__)
2332				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2333#elif defined(__FreeBSD__)
2334				    (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2335#endif
2336
2337	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
2338	rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
2339	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
2340		   RX_MSDU_START_INFO1_DECAP_FORMAT);
2341
2342	/* FIXME: Current unchaining logic can only handle simple case of raw
2343	 * msdu chaining. If decapping is other than raw the chaining may be
2344	 * more complex and this isn't handled by the current code. Don't even
2345	 * try re-constructing such frames - it'll be pretty much garbage.
2346	 */
2347	if (decap != RX_MSDU_DECAP_RAW ||
2348	    skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
2349		*drop_cnt += skb_queue_len(amsdu);
2350		__skb_queue_purge(amsdu);
2351		return;
2352	}
2353
2354	ath10k_unchain_msdu(amsdu, unchain_cnt);
2355}
2356
2357static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2358					 struct sk_buff_head *amsdu)
2359{
2360	u8 *subframe_hdr;
2361	struct sk_buff *first;
2362	bool is_first, is_last;
2363	struct ath10k_hw_params *hw = &ar->hw_params;
2364	struct htt_rx_desc *rxd;
2365	struct rx_msdu_end_common *rxd_msdu_end_common;
2366	struct rx_mpdu_start *rxd_mpdu_start;
2367	struct ieee80211_hdr *hdr;
2368	size_t hdr_len, crypto_len;
2369	enum htt_rx_mpdu_encrypt_type enctype;
2370	int bytes_aligned = ar->hw_params.decap_align_bytes;
2371
2372	first = skb_peek(amsdu);
2373
2374	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2375#if defined(__linux__)
2376				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2377#elif defined(__FreeBSD__)
2378				    (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2379#endif
2380
2381	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
2382	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2383	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2384
2385	is_first = !!(rxd_msdu_end_common->info0 &
2386		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2387	is_last = !!(rxd_msdu_end_common->info0 &
2388		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2389
2390	/* Return in case of non-aggregated msdu */
2391	if (is_first && is_last)
2392		return true;
2393
2394	/* First msdu flag is not set for the first msdu of the list */
2395	if (!is_first)
2396		return false;
2397
2398	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2399		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2400
2401	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2402	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2403
2404	subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2405		       crypto_len;
2406
2407	/* Validate if the amsdu has a proper first subframe.
2408	 * There are chances a single msdu can be received as amsdu when
2409	 * the unauthenticated amsdu flag of a QoS header
2410	 * gets flipped in non-SPP AMSDU's, in such cases the first
2411	 * subframe has llc/snap header in place of a valid da.
2412	 * return false if the da matches rfc1042 pattern
2413	 */
2414	if (ether_addr_equal(subframe_hdr, rfc1042_header))
2415		return false;
2416
2417	return true;
2418}
2419
2420static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2421					struct sk_buff_head *amsdu,
2422					struct ieee80211_rx_status *rx_status)
2423{
2424	if (!rx_status->freq) {
2425		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2426		return false;
2427	}
2428
2429	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2430		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2431		return false;
2432	}
2433
2434	if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2435		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2436		return false;
2437	}
2438
2439	return true;
2440}
2441
2442static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2443				   struct sk_buff_head *amsdu,
2444				   struct ieee80211_rx_status *rx_status,
2445				   unsigned long *drop_cnt)
2446{
2447	if (skb_queue_empty(amsdu))
2448		return;
2449
2450	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2451		return;
2452
2453	if (drop_cnt)
2454		*drop_cnt += skb_queue_len(amsdu);
2455
2456	__skb_queue_purge(amsdu);
2457}
2458
2459static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2460{
2461	struct ath10k *ar = htt->ar;
2462	struct ieee80211_rx_status *rx_status = &htt->rx_status;
2463	struct sk_buff_head amsdu;
2464	int ret;
2465	unsigned long drop_cnt = 0;
2466	unsigned long unchain_cnt = 0;
2467	unsigned long drop_cnt_filter = 0;
2468	unsigned long msdus_to_queue, num_msdus;
2469	enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2470	u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2471
2472	__skb_queue_head_init(&amsdu);
2473
2474	spin_lock_bh(&htt->rx_ring.lock);
2475	if (htt->rx_confused) {
2476		spin_unlock_bh(&htt->rx_ring.lock);
2477		return -EIO;
2478	}
2479	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2480	spin_unlock_bh(&htt->rx_ring.lock);
2481
2482	if (ret < 0) {
2483		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2484		__skb_queue_purge(&amsdu);
2485		/* FIXME: It's probably a good idea to reboot the
2486		 * device instead of leaving it inoperable.
2487		 */
2488		htt->rx_confused = true;
2489		return ret;
2490	}
2491
2492	num_msdus = skb_queue_len(&amsdu);
2493
2494	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2495
2496	/* only for ret = 1 indicates chained msdus */
2497	if (ret > 0)
2498		ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2499
2500	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2501	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2502			     false);
2503	msdus_to_queue = skb_queue_len(&amsdu);
2504	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2505
2506	ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2507				       unchain_cnt, drop_cnt, drop_cnt_filter,
2508				       msdus_to_queue);
2509
2510	return 0;
2511}
2512
2513static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2514					  union htt_rx_pn_t *pn,
2515					  int pn_len_bits)
2516{
2517	switch (pn_len_bits) {
2518	case 48:
2519		pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2520			   ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2521		break;
2522	case 24:
2523		pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2524		break;
2525	}
2526}
2527
2528static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2529				   union htt_rx_pn_t *old_pn)
2530{
2531	return ((new_pn->pn48 & 0xffffffffffffULL) <=
2532		(old_pn->pn48 & 0xffffffffffffULL));
2533}
2534
2535static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2536					     struct ath10k_peer *peer,
2537					     struct htt_rx_indication_hl *rx)
2538{
2539	bool last_pn_valid, pn_invalid = false;
2540	enum htt_txrx_sec_cast_type sec_index;
2541	enum htt_security_types sec_type;
2542	union htt_rx_pn_t new_pn = {0};
2543	struct htt_hl_rx_desc *rx_desc;
2544	union htt_rx_pn_t *last_pn;
2545	u32 rx_desc_info, tid;
2546	int num_mpdu_ranges;
2547
2548	lockdep_assert_held(&ar->data_lock);
2549
2550	if (!peer)
2551		return false;
2552
2553	if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2554		return false;
2555
2556	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2557			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2558
2559	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2560	rx_desc_info = __le32_to_cpu(rx_desc->info);
2561
2562	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2563		return false;
2564
2565	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2566	last_pn_valid = peer->tids_last_pn_valid[tid];
2567	last_pn = &peer->tids_last_pn[tid];
2568
2569	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2570		sec_index = HTT_TXRX_SEC_MCAST;
2571	else
2572		sec_index = HTT_TXRX_SEC_UCAST;
2573
2574	sec_type = peer->rx_pn[sec_index].sec_type;
2575	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2576
2577	if (sec_type != HTT_SECURITY_AES_CCMP &&
2578	    sec_type != HTT_SECURITY_TKIP &&
2579	    sec_type != HTT_SECURITY_TKIP_NOMIC)
2580		return false;
2581
2582	if (last_pn_valid)
2583		pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2584	else
2585		peer->tids_last_pn_valid[tid] = true;
2586
2587	if (!pn_invalid)
2588		last_pn->pn48 = new_pn.pn48;
2589
2590	return pn_invalid;
2591}
2592
2593static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2594					 struct htt_rx_indication_hl *rx,
2595					 struct sk_buff *skb,
2596					 enum htt_rx_pn_check_type check_pn_type,
2597					 enum htt_rx_tkip_demic_type tkip_mic_type)
2598{
2599	struct ath10k *ar = htt->ar;
2600	struct ath10k_peer *peer;
2601	struct htt_rx_indication_mpdu_range *mpdu_ranges;
2602	struct fw_rx_desc_hl *fw_desc;
2603	enum htt_txrx_sec_cast_type sec_index;
2604	enum htt_security_types sec_type;
2605	union htt_rx_pn_t new_pn = {0};
2606	struct htt_hl_rx_desc *rx_desc;
2607	struct ieee80211_hdr *hdr;
2608	struct ieee80211_rx_status *rx_status;
2609	u16 peer_id;
2610	u8 rx_desc_len;
2611	int num_mpdu_ranges;
2612	size_t tot_hdr_len;
2613	struct ieee80211_channel *ch;
2614	bool pn_invalid, qos, first_msdu;
2615	u32 tid, rx_desc_info;
2616
2617	peer_id = __le16_to_cpu(rx->hdr.peer_id);
2618	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2619
2620	spin_lock_bh(&ar->data_lock);
2621	peer = ath10k_peer_find_by_id(ar, peer_id);
2622	spin_unlock_bh(&ar->data_lock);
2623	if (!peer && peer_id != HTT_INVALID_PEERID)
2624		ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2625
2626	if (!peer)
2627		return true;
2628
2629	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2630			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2631	mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2632	fw_desc = &rx->fw_desc;
2633	rx_desc_len = fw_desc->len;
2634
2635	if (fw_desc->u.bits.discard) {
2636		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2637		goto err;
2638	}
2639
2640	/* I have not yet seen any case where num_mpdu_ranges > 1.
2641	 * qcacld does not seem handle that case either, so we introduce the
2642	 * same limitation here as well.
2643	 */
2644	if (num_mpdu_ranges > 1)
2645		ath10k_warn(ar,
2646			    "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2647			    num_mpdu_ranges);
2648
2649	if (mpdu_ranges->mpdu_range_status !=
2650	    HTT_RX_IND_MPDU_STATUS_OK &&
2651	    mpdu_ranges->mpdu_range_status !=
2652	    HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2653		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2654			   mpdu_ranges->mpdu_range_status);
2655		goto err;
2656	}
2657
2658	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2659	rx_desc_info = __le32_to_cpu(rx_desc->info);
2660
2661	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2662		sec_index = HTT_TXRX_SEC_MCAST;
2663	else
2664		sec_index = HTT_TXRX_SEC_UCAST;
2665
2666	sec_type = peer->rx_pn[sec_index].sec_type;
2667	first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2668
2669	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2670
2671	if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2672		spin_lock_bh(&ar->data_lock);
2673		pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2674		spin_unlock_bh(&ar->data_lock);
2675
2676		if (pn_invalid)
2677			goto err;
2678	}
2679
2680	/* Strip off all headers before the MAC header before delivery to
2681	 * mac80211
2682	 */
2683	tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2684		      sizeof(rx->ppdu) + sizeof(rx->prefix) +
2685		      sizeof(rx->fw_desc) +
2686		      sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2687
2688	skb_pull(skb, tot_hdr_len);
2689
2690	hdr = (struct ieee80211_hdr *)skb->data;
2691	qos = ieee80211_is_data_qos(hdr->frame_control);
2692
2693	rx_status = IEEE80211_SKB_RXCB(skb);
2694	memset(rx_status, 0, sizeof(*rx_status));
2695
2696	if (rx->ppdu.combined_rssi == 0) {
2697		/* SDIO firmware does not provide signal */
2698		rx_status->signal = 0;
2699		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2700	} else {
2701		rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2702			rx->ppdu.combined_rssi;
2703		rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2704	}
2705
2706	spin_lock_bh(&ar->data_lock);
2707	ch = ar->scan_channel;
2708	if (!ch)
2709		ch = ar->rx_channel;
2710	if (!ch)
2711		ch = ath10k_htt_rx_h_any_channel(ar);
2712	if (!ch)
2713		ch = ar->tgt_oper_chan;
2714	spin_unlock_bh(&ar->data_lock);
2715
2716	if (ch) {
2717		rx_status->band = ch->band;
2718		rx_status->freq = ch->center_freq;
2719	}
2720	if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2721		rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2722	else
2723		rx_status->flag |= RX_FLAG_AMSDU_MORE;
2724
2725	/* Not entirely sure about this, but all frames from the chipset has
2726	 * the protected flag set even though they have already been decrypted.
2727	 * Unmasking this flag is necessary in order for mac80211 not to drop
2728	 * the frame.
2729	 * TODO: Verify this is always the case or find out a way to check
2730	 * if there has been hw decryption.
2731	 */
2732	if (ieee80211_has_protected(hdr->frame_control)) {
2733		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2734		rx_status->flag |= RX_FLAG_DECRYPTED |
2735				   RX_FLAG_IV_STRIPPED |
2736				   RX_FLAG_MMIC_STRIPPED;
2737
2738		if (tid < IEEE80211_NUM_TIDS &&
2739		    first_msdu &&
2740		    check_pn_type == HTT_RX_PN_CHECK &&
2741		   (sec_type == HTT_SECURITY_AES_CCMP ||
2742		    sec_type == HTT_SECURITY_TKIP ||
2743		    sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2744			u8 offset, *ivp, i;
2745			s8 keyidx = 0;
2746			__le64 pn48 = cpu_to_le64(new_pn.pn48);
2747
2748			hdr = (struct ieee80211_hdr *)skb->data;
2749			offset = ieee80211_hdrlen(hdr->frame_control);
2750			hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2751			rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2752
2753			memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2754				skb->data, offset);
2755			skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2756			ivp = skb->data + offset;
2757			memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2758			/* Ext IV */
2759			ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2760
2761			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2762				if (peer->keys[i] &&
2763				    peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2764					keyidx = peer->keys[i]->keyidx;
2765			}
2766
2767			/* Key ID */
2768			ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2769
2770			if (sec_type == HTT_SECURITY_AES_CCMP) {
2771				rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2772				/* pn 0, pn 1 */
2773				memcpy(skb->data + offset, &pn48, 2);
2774				/* pn 1, pn 3 , pn 34 , pn 5 */
2775				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2776			} else {
2777				rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2778				/* TSC 0 */
2779				memcpy(skb->data + offset + 2, &pn48, 1);
2780				/* TSC 1 */
2781				memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2782				/* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2783				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2784			}
2785		}
2786	}
2787
2788	if (tkip_mic_type == HTT_RX_TKIP_MIC)
2789		rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2790				   ~RX_FLAG_MMIC_STRIPPED;
2791
2792	if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2793		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2794
2795	if (!qos && tid < IEEE80211_NUM_TIDS) {
2796		u8 offset;
2797		__le16 qos_ctrl = 0;
2798
2799		hdr = (struct ieee80211_hdr *)skb->data;
2800		offset = ieee80211_hdrlen(hdr->frame_control);
2801
2802		hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2803		memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2804		skb_push(skb, IEEE80211_QOS_CTL_LEN);
2805		qos_ctrl = cpu_to_le16(tid);
2806		memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2807	}
2808
2809	if (ar->napi.dev)
2810		ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2811	else
2812		ieee80211_rx_ni(ar->hw, skb);
2813
2814	/* We have delivered the skb to the upper layers (mac80211) so we
2815	 * must not free it.
2816	 */
2817	return false;
2818err:
2819	/* Tell the caller that it must free the skb since we have not
2820	 * consumed it
2821	 */
2822	return true;
2823}
2824
2825static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2826					       u16 head_len,
2827					       u16 hdr_len)
2828{
2829	u8 *ivp, *orig_hdr;
2830
2831	orig_hdr = skb->data;
2832	ivp = orig_hdr + hdr_len + head_len;
2833
2834	/* the ExtIV bit is always set to 1 for TKIP */
2835	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2836		return -EINVAL;
2837
2838	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2839	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2840	skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2841	return 0;
2842}
2843
2844static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2845						 u16 head_len,
2846						 u16 hdr_len)
2847{
2848	u8 *ivp, *orig_hdr;
2849
2850	orig_hdr = skb->data;
2851	ivp = orig_hdr + hdr_len + head_len;
2852
2853	/* the ExtIV bit is always set to 1 for TKIP */
2854	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2855		return -EINVAL;
2856
2857	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2858	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2859	skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2860	return 0;
2861}
2862
2863static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2864					 u16 head_len,
2865					 u16 hdr_len)
2866{
2867	u8 *ivp, *orig_hdr;
2868
2869	orig_hdr = skb->data;
2870	ivp = orig_hdr + hdr_len + head_len;
2871
2872	/* the ExtIV bit is always set to 1 for CCMP */
2873	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2874		return -EINVAL;
2875
2876	skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2877	memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2878	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2879	return 0;
2880}
2881
2882static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2883					u16 head_len,
2884					u16 hdr_len)
2885{
2886	u8 *orig_hdr;
2887
2888	orig_hdr = skb->data;
2889
2890	memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2891		orig_hdr, head_len + hdr_len);
2892	skb_pull(skb, IEEE80211_WEP_IV_LEN);
2893	skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2894	return 0;
2895}
2896
2897static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2898					      struct htt_rx_fragment_indication *rx,
2899					      struct sk_buff *skb)
2900{
2901	struct ath10k *ar = htt->ar;
2902	enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2903	enum htt_txrx_sec_cast_type sec_index;
2904	struct htt_rx_indication_hl *rx_hl;
2905	enum htt_security_types sec_type;
2906	u32 tid, frag, seq, rx_desc_info;
2907	union htt_rx_pn_t new_pn = {0};
2908	struct htt_hl_rx_desc *rx_desc;
2909	u16 peer_id, sc, hdr_space;
2910	union htt_rx_pn_t *last_pn;
2911	struct ieee80211_hdr *hdr;
2912	int ret, num_mpdu_ranges;
2913	struct ath10k_peer *peer;
2914	struct htt_resp *resp;
2915	size_t tot_hdr_len;
2916
2917	resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2918	skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2919	skb_trim(skb, skb->len - FCS_LEN);
2920
2921	peer_id = __le16_to_cpu(rx->peer_id);
2922	rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2923
2924	spin_lock_bh(&ar->data_lock);
2925	peer = ath10k_peer_find_by_id(ar, peer_id);
2926	if (!peer) {
2927		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2928		goto err;
2929	}
2930
2931	num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2932			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2933
2934	tot_hdr_len = sizeof(struct htt_resp_hdr) +
2935		      sizeof(rx_hl->hdr) +
2936		      sizeof(rx_hl->ppdu) +
2937		      sizeof(rx_hl->prefix) +
2938		      sizeof(rx_hl->fw_desc) +
2939		      sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2940
2941	tid =  MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2942	rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2943	rx_desc_info = __le32_to_cpu(rx_desc->info);
2944
2945	hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2946
2947	if (is_multicast_ether_addr(hdr->addr1)) {
2948		/* Discard the fragment with multicast DA */
2949		goto err;
2950	}
2951
2952	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2953		spin_unlock_bh(&ar->data_lock);
2954		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2955						    HTT_RX_NON_PN_CHECK,
2956						    HTT_RX_NON_TKIP_MIC);
2957	}
2958
2959	if (ieee80211_has_retry(hdr->frame_control))
2960		goto err;
2961
2962	hdr_space = ieee80211_hdrlen(hdr->frame_control);
2963	sc = __le16_to_cpu(hdr->seq_ctrl);
2964	seq = IEEE80211_SEQ_TO_SN(sc);
2965	frag = sc & IEEE80211_SCTL_FRAG;
2966
2967	sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2968		    HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2969	sec_type = peer->rx_pn[sec_index].sec_type;
2970	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2971
2972	switch (sec_type) {
2973	case HTT_SECURITY_TKIP:
2974		tkip_mic = HTT_RX_TKIP_MIC;
2975		ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2976							    tot_hdr_len +
2977							    rx_hl->fw_desc.len,
2978							    hdr_space);
2979		if (ret)
2980			goto err;
2981		break;
2982	case HTT_SECURITY_TKIP_NOMIC:
2983		ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2984							  tot_hdr_len +
2985							  rx_hl->fw_desc.len,
2986							  hdr_space);
2987		if (ret)
2988			goto err;
2989		break;
2990	case HTT_SECURITY_AES_CCMP:
2991		ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2992						    tot_hdr_len + rx_hl->fw_desc.len,
2993						    hdr_space);
2994		if (ret)
2995			goto err;
2996		break;
2997	case HTT_SECURITY_WEP128:
2998	case HTT_SECURITY_WEP104:
2999	case HTT_SECURITY_WEP40:
3000		ret = ath10k_htt_rx_frag_wep_decap(skb,
3001						   tot_hdr_len + rx_hl->fw_desc.len,
3002						   hdr_space);
3003		if (ret)
3004			goto err;
3005		break;
3006	default:
3007		break;
3008	}
3009
3010	resp = (struct htt_resp *)(skb->data);
3011
3012	if (sec_type != HTT_SECURITY_AES_CCMP &&
3013	    sec_type != HTT_SECURITY_TKIP &&
3014	    sec_type != HTT_SECURITY_TKIP_NOMIC) {
3015		spin_unlock_bh(&ar->data_lock);
3016		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
3017						    HTT_RX_NON_PN_CHECK,
3018						    HTT_RX_NON_TKIP_MIC);
3019	}
3020
3021	last_pn = &peer->frag_tids_last_pn[tid];
3022
3023	if (frag == 0) {
3024		if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
3025			goto err;
3026
3027		last_pn->pn48 = new_pn.pn48;
3028		peer->frag_tids_seq[tid] = seq;
3029	} else if (sec_type == HTT_SECURITY_AES_CCMP) {
3030		if (seq != peer->frag_tids_seq[tid])
3031			goto err;
3032
3033		if (new_pn.pn48 != last_pn->pn48 + 1)
3034			goto err;
3035
3036		last_pn->pn48 = new_pn.pn48;
3037		last_pn = &peer->tids_last_pn[tid];
3038		last_pn->pn48 = new_pn.pn48;
3039	}
3040
3041	spin_unlock_bh(&ar->data_lock);
3042
3043	return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
3044					    HTT_RX_NON_PN_CHECK, tkip_mic);
3045
3046err:
3047	spin_unlock_bh(&ar->data_lock);
3048
3049	/* Tell the caller that it must free the skb since we have not
3050	 * consumed it
3051	 */
3052	return true;
3053}
3054
3055static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
3056					 struct htt_rx_indication *rx)
3057{
3058	struct ath10k *ar = htt->ar;
3059	struct htt_rx_indication_mpdu_range *mpdu_ranges;
3060	int num_mpdu_ranges;
3061	int i, mpdu_count = 0;
3062	u16 peer_id;
3063	u8 tid;
3064
3065	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
3066			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
3067	peer_id = __le16_to_cpu(rx->hdr.peer_id);
3068	tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
3069
3070	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
3071
3072	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
3073			rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
3074
3075	for (i = 0; i < num_mpdu_ranges; i++)
3076		mpdu_count += mpdu_ranges[i].mpdu_count;
3077
3078	atomic_add(mpdu_count, &htt->num_mpdus_ready);
3079
3080	ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
3081					     num_mpdu_ranges);
3082}
3083
3084static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
3085				       struct sk_buff *skb)
3086{
3087	struct ath10k_htt *htt = &ar->htt;
3088	struct htt_resp *resp = (struct htt_resp *)skb->data;
3089	struct htt_tx_done tx_done = {};
3090	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
3091	__le16 msdu_id, *msdus;
3092	bool rssi_enabled = false;
3093	u8 msdu_count = 0, num_airtime_records, tid;
3094	int i, htt_pad = 0;
3095	struct htt_data_tx_compl_ppdu_dur *ppdu_info;
3096	struct ath10k_peer *peer;
3097	u16 ppdu_info_offset = 0, peer_id;
3098	u32 tx_duration;
3099
3100	switch (status) {
3101	case HTT_DATA_TX_STATUS_NO_ACK:
3102		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
3103		break;
3104	case HTT_DATA_TX_STATUS_OK:
3105		tx_done.status = HTT_TX_COMPL_STATE_ACK;
3106		break;
3107	case HTT_DATA_TX_STATUS_DISCARD:
3108	case HTT_DATA_TX_STATUS_POSTPONE:
3109	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
3110		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3111		break;
3112	default:
3113		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
3114		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3115		break;
3116	}
3117
3118	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
3119		   resp->data_tx_completion.num_msdus);
3120
3121	msdu_count = resp->data_tx_completion.num_msdus;
3122	msdus = resp->data_tx_completion.msdus;
3123	rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
3124
3125	if (rssi_enabled)
3126		htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
3127							    resp);
3128
3129	for (i = 0; i < msdu_count; i++) {
3130		msdu_id = msdus[i];
3131		tx_done.msdu_id = __le16_to_cpu(msdu_id);
3132
3133		if (rssi_enabled) {
3134			/* Total no of MSDUs should be even,
3135			 * if odd MSDUs are sent firmware fills
3136			 * last msdu id with 0xffff
3137			 */
3138			if (msdu_count & 0x01) {
3139				msdu_id = msdus[msdu_count +  i + 1 + htt_pad];
3140				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3141			} else {
3142				msdu_id = msdus[msdu_count +  i + htt_pad];
3143				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3144			}
3145		}
3146
3147		/* kfifo_put: In practice firmware shouldn't fire off per-CE
3148		 * interrupt and main interrupt (MSI/-X range case) for the same
3149		 * HTC service so it should be safe to use kfifo_put w/o lock.
3150		 *
3151		 * From kfifo_put() documentation:
3152		 *  Note that with only one concurrent reader and one concurrent
3153		 *  writer, you don't need extra locking to use these macro.
3154		 */
3155		if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
3156			ath10k_txrx_tx_unref(htt, &tx_done);
3157		} else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
3158			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
3159				    tx_done.msdu_id, tx_done.status);
3160			ath10k_txrx_tx_unref(htt, &tx_done);
3161		}
3162	}
3163
3164	if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
3165		return;
3166
3167	ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
3168
3169	if (rssi_enabled)
3170		ppdu_info_offset += ppdu_info_offset;
3171
3172	if (resp->data_tx_completion.flags2 &
3173	    (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
3174		ppdu_info_offset += 2;
3175
3176	ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
3177	num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
3178					__le32_to_cpu(ppdu_info->info0));
3179
3180	for (i = 0; i < num_airtime_records; i++) {
3181		struct htt_data_tx_ppdu_dur *ppdu_dur;
3182		u32 info0;
3183
3184		ppdu_dur = &ppdu_info->ppdu_dur[i];
3185		info0 = __le32_to_cpu(ppdu_dur->info0);
3186
3187		peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
3188				    info0);
3189		rcu_read_lock();
3190		spin_lock_bh(&ar->data_lock);
3191
3192		peer = ath10k_peer_find_by_id(ar, peer_id);
3193		if (!peer || !peer->sta) {
3194			spin_unlock_bh(&ar->data_lock);
3195			rcu_read_unlock();
3196			continue;
3197		}
3198
3199		tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
3200						IEEE80211_QOS_CTL_TID_MASK;
3201		tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
3202
3203		ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
3204
3205		spin_unlock_bh(&ar->data_lock);
3206		rcu_read_unlock();
3207	}
3208}
3209
3210static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
3211{
3212	struct htt_rx_addba *ev = &resp->rx_addba;
3213	struct ath10k_peer *peer;
3214	struct ath10k_vif *arvif;
3215	u16 info0, tid, peer_id;
3216
3217	info0 = __le16_to_cpu(ev->info0);
3218	tid = MS(info0, HTT_RX_BA_INFO0_TID);
3219	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3220
3221	ath10k_dbg(ar, ATH10K_DBG_HTT,
3222		   "htt rx addba tid %u peer_id %u size %u\n",
3223		   tid, peer_id, ev->window_size);
3224
3225	spin_lock_bh(&ar->data_lock);
3226	peer = ath10k_peer_find_by_id(ar, peer_id);
3227	if (!peer) {
3228		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3229			    peer_id);
3230		spin_unlock_bh(&ar->data_lock);
3231		return;
3232	}
3233
3234	arvif = ath10k_get_arvif(ar, peer->vdev_id);
3235	if (!arvif) {
3236		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3237			    peer->vdev_id);
3238		spin_unlock_bh(&ar->data_lock);
3239		return;
3240	}
3241
3242	ath10k_dbg(ar, ATH10K_DBG_HTT,
3243		   "htt rx start rx ba session sta %pM tid %u size %u\n",
3244		   peer->addr, tid, ev->window_size);
3245
3246	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3247	spin_unlock_bh(&ar->data_lock);
3248}
3249
3250static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
3251{
3252	struct htt_rx_delba *ev = &resp->rx_delba;
3253	struct ath10k_peer *peer;
3254	struct ath10k_vif *arvif;
3255	u16 info0, tid, peer_id;
3256
3257	info0 = __le16_to_cpu(ev->info0);
3258	tid = MS(info0, HTT_RX_BA_INFO0_TID);
3259	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3260
3261	ath10k_dbg(ar, ATH10K_DBG_HTT,
3262		   "htt rx delba tid %u peer_id %u\n",
3263		   tid, peer_id);
3264
3265	spin_lock_bh(&ar->data_lock);
3266	peer = ath10k_peer_find_by_id(ar, peer_id);
3267	if (!peer) {
3268		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3269			    peer_id);
3270		spin_unlock_bh(&ar->data_lock);
3271		return;
3272	}
3273
3274	arvif = ath10k_get_arvif(ar, peer->vdev_id);
3275	if (!arvif) {
3276		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3277			    peer->vdev_id);
3278		spin_unlock_bh(&ar->data_lock);
3279		return;
3280	}
3281
3282	ath10k_dbg(ar, ATH10K_DBG_HTT,
3283		   "htt rx stop rx ba session sta %pM tid %u\n",
3284		   peer->addr, tid);
3285
3286	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3287	spin_unlock_bh(&ar->data_lock);
3288}
3289
3290static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
3291				       struct sk_buff_head *list,
3292				       struct sk_buff_head *amsdu)
3293{
3294	struct sk_buff *msdu;
3295	struct htt_rx_desc *rxd;
3296	struct rx_msdu_end_common *rxd_msdu_end_common;
3297
3298	if (skb_queue_empty(list))
3299		return -ENOBUFS;
3300
3301	if (WARN_ON(!skb_queue_empty(amsdu)))
3302		return -EINVAL;
3303
3304	while ((msdu = __skb_dequeue(list))) {
3305		__skb_queue_tail(amsdu, msdu);
3306
3307		rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3308#if defined(__linux__)
3309					    (void *)msdu->data -
3310#elif defined(__FreeBSD__)
3311					    (u8 *)msdu->data -
3312#endif
3313					    hw->rx_desc_ops->rx_desc_size);
3314
3315		rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3316		if (rxd_msdu_end_common->info0 &
3317		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3318			break;
3319	}
3320
3321	msdu = skb_peek_tail(amsdu);
3322	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3323#if defined(__linux__)
3324				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3325#elif defined(__FreeBSD__)
3326				    (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3327#endif
3328
3329	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3330	if (!(rxd_msdu_end_common->info0 &
3331	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3332		skb_queue_splice_init(amsdu, list);
3333		return -EAGAIN;
3334	}
3335
3336	return 0;
3337}
3338
3339static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3340					    struct sk_buff *skb)
3341{
3342	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3343
3344	if (!ieee80211_has_protected(hdr->frame_control))
3345		return;
3346
3347	/* Offloaded frames are already decrypted but firmware insists they are
3348	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
3349	 * will drop the frame.
3350	 */
3351
3352	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3353	status->flag |= RX_FLAG_DECRYPTED |
3354			RX_FLAG_IV_STRIPPED |
3355			RX_FLAG_MMIC_STRIPPED;
3356}
3357
3358static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3359				       struct sk_buff_head *list)
3360{
3361	struct ath10k_htt *htt = &ar->htt;
3362	struct ieee80211_rx_status *status = &htt->rx_status;
3363	struct htt_rx_offload_msdu *rx;
3364	struct sk_buff *msdu;
3365	size_t offset;
3366
3367	while ((msdu = __skb_dequeue(list))) {
3368		/* Offloaded frames don't have Rx descriptor. Instead they have
3369		 * a short meta information header.
3370		 */
3371
3372		rx = (void *)msdu->data;
3373
3374		skb_put(msdu, sizeof(*rx));
3375		skb_pull(msdu, sizeof(*rx));
3376
3377		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3378			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3379			dev_kfree_skb_any(msdu);
3380			continue;
3381		}
3382
3383		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3384
3385		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
3386		 * actual payload is unaligned. Align the frame.  Otherwise
3387		 * mac80211 complains.  This shouldn't reduce performance much
3388		 * because these offloaded frames are rare.
3389		 */
3390		offset = 4 - ((unsigned long)msdu->data & 3);
3391		skb_put(msdu, offset);
3392		memmove(msdu->data + offset, msdu->data, msdu->len);
3393		skb_pull(msdu, offset);
3394
3395		/* FIXME: The frame is NWifi. Re-construct QoS Control
3396		 * if possible later.
3397		 */
3398
3399		memset(status, 0, sizeof(*status));
3400		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3401
3402		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3403		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3404		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3405	}
3406}
3407
3408static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3409{
3410	struct ath10k_htt *htt = &ar->htt;
3411	struct htt_resp *resp = (void *)skb->data;
3412	struct ieee80211_rx_status *status = &htt->rx_status;
3413	struct sk_buff_head list;
3414	struct sk_buff_head amsdu;
3415	u16 peer_id;
3416	u16 msdu_count;
3417	u8 vdev_id;
3418	u8 tid;
3419	bool offload;
3420	bool frag;
3421	int ret;
3422
3423	lockdep_assert_held(&htt->rx_ring.lock);
3424
3425	if (htt->rx_confused)
3426		return -EIO;
3427
3428	skb_pull(skb, sizeof(resp->hdr));
3429	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3430
3431	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3432	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3433	vdev_id = resp->rx_in_ord_ind.vdev_id;
3434	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3435	offload = !!(resp->rx_in_ord_ind.info &
3436			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3437	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3438
3439	ath10k_dbg(ar, ATH10K_DBG_HTT,
3440		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3441		   vdev_id, peer_id, tid, offload, frag, msdu_count);
3442
3443	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3444		ath10k_warn(ar, "dropping invalid in order rx indication\n");
3445		return -EINVAL;
3446	}
3447
3448	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3449	 * extracted and processed.
3450	 */
3451	__skb_queue_head_init(&list);
3452	if (ar->hw_params.target_64bit)
3453		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3454						     &list);
3455	else
3456		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3457						     &list);
3458
3459	if (ret < 0) {
3460		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3461		htt->rx_confused = true;
3462		return -EIO;
3463	}
3464
3465	/* Offloaded frames are very different and need to be handled
3466	 * separately.
3467	 */
3468	if (offload)
3469		ath10k_htt_rx_h_rx_offload(ar, &list);
3470
3471	while (!skb_queue_empty(&list)) {
3472		__skb_queue_head_init(&amsdu);
3473		ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
3474		switch (ret) {
3475		case 0:
3476			/* Note: The in-order indication may report interleaved
3477			 * frames from different PPDUs meaning reported rx rate
3478			 * to mac80211 isn't accurate/reliable. It's still
3479			 * better to report something than nothing though. This
3480			 * should still give an idea about rx rate to the user.
3481			 */
3482			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3483			ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3484			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3485					     NULL, peer_id, frag);
3486			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3487			break;
3488		case -EAGAIN:
3489			fallthrough;
3490		default:
3491			/* Should not happen. */
3492			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3493			htt->rx_confused = true;
3494			__skb_queue_purge(&list);
3495			return -EIO;
3496		}
3497	}
3498	return ret;
3499}
3500
3501static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3502						   const __le32 *resp_ids,
3503						   int num_resp_ids)
3504{
3505	int i;
3506	u32 resp_id;
3507
3508	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3509		   num_resp_ids);
3510
3511	for (i = 0; i < num_resp_ids; i++) {
3512		resp_id = le32_to_cpu(resp_ids[i]);
3513
3514		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3515			   resp_id);
3516
3517		/* TODO: free resp_id */
3518	}
3519}
3520
3521static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3522{
3523	struct ieee80211_hw *hw = ar->hw;
3524	struct ieee80211_txq *txq;
3525	struct htt_resp *resp = (struct htt_resp *)skb->data;
3526	struct htt_tx_fetch_record *record;
3527	size_t len;
3528	size_t max_num_bytes;
3529	size_t max_num_msdus;
3530	size_t num_bytes;
3531	size_t num_msdus;
3532	const __le32 *resp_ids;
3533	u16 num_records;
3534	u16 num_resp_ids;
3535	u16 peer_id;
3536	u8 tid;
3537	int ret;
3538	int i;
3539	bool may_tx;
3540
3541	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3542
3543	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3544	if (unlikely(skb->len < len)) {
3545		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3546		return;
3547	}
3548
3549	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3550	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3551
3552	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3553	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3554
3555	if (unlikely(skb->len < len)) {
3556		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3557		return;
3558	}
3559
3560	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3561		   num_records, num_resp_ids,
3562		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3563
3564	if (!ar->htt.tx_q_state.enabled) {
3565		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3566		return;
3567	}
3568
3569	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3570		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3571		return;
3572	}
3573
3574	rcu_read_lock();
3575
3576	for (i = 0; i < num_records; i++) {
3577		record = &resp->tx_fetch_ind.records[i];
3578		peer_id = MS(le16_to_cpu(record->info),
3579			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3580		tid = MS(le16_to_cpu(record->info),
3581			 HTT_TX_FETCH_RECORD_INFO_TID);
3582		max_num_msdus = le16_to_cpu(record->num_msdus);
3583		max_num_bytes = le32_to_cpu(record->num_bytes);
3584
3585		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3586			   i, peer_id, tid, max_num_msdus, max_num_bytes);
3587
3588		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3589		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3590			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3591				    peer_id, tid);
3592			continue;
3593		}
3594
3595		spin_lock_bh(&ar->data_lock);
3596		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3597		spin_unlock_bh(&ar->data_lock);
3598
3599		/* It is okay to release the lock and use txq because RCU read
3600		 * lock is held.
3601		 */
3602
3603		if (unlikely(!txq)) {
3604			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3605				    peer_id, tid);
3606			continue;
3607		}
3608
3609		num_msdus = 0;
3610		num_bytes = 0;
3611
3612		ieee80211_txq_schedule_start(hw, txq->ac);
3613		may_tx = ieee80211_txq_may_transmit(hw, txq);
3614		while (num_msdus < max_num_msdus &&
3615		       num_bytes < max_num_bytes) {
3616			if (!may_tx)
3617				break;
3618
3619			ret = ath10k_mac_tx_push_txq(hw, txq);
3620			if (ret < 0)
3621				break;
3622
3623			num_msdus++;
3624			num_bytes += ret;
3625		}
3626		ieee80211_return_txq(hw, txq, false);
3627		ieee80211_txq_schedule_end(hw, txq->ac);
3628
3629		record->num_msdus = cpu_to_le16(num_msdus);
3630		record->num_bytes = cpu_to_le32(num_bytes);
3631
3632		ath10k_htt_tx_txq_recalc(hw, txq);
3633	}
3634
3635	rcu_read_unlock();
3636
3637	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3638	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3639
3640	ret = ath10k_htt_tx_fetch_resp(ar,
3641				       resp->tx_fetch_ind.token,
3642				       resp->tx_fetch_ind.fetch_seq_num,
3643				       resp->tx_fetch_ind.records,
3644				       num_records);
3645	if (unlikely(ret)) {
3646		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3647			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
3648		/* FIXME: request fw restart */
3649	}
3650
3651	ath10k_htt_tx_txq_sync(ar);
3652}
3653
3654static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3655					   struct sk_buff *skb)
3656{
3657	const struct htt_resp *resp = (void *)skb->data;
3658	size_t len;
3659	int num_resp_ids;
3660
3661	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3662
3663	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3664	if (unlikely(skb->len < len)) {
3665		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3666		return;
3667	}
3668
3669	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3670	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3671
3672	if (unlikely(skb->len < len)) {
3673		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3674		return;
3675	}
3676
3677	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3678					       resp->tx_fetch_confirm.resp_ids,
3679					       num_resp_ids);
3680}
3681
3682static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3683					     struct sk_buff *skb)
3684{
3685	const struct htt_resp *resp = (void *)skb->data;
3686	const struct htt_tx_mode_switch_record *record;
3687	struct ieee80211_txq *txq;
3688	struct ath10k_txq *artxq;
3689	size_t len;
3690	size_t num_records;
3691	enum htt_tx_mode_switch_mode mode;
3692	bool enable;
3693	u16 info0;
3694	u16 info1;
3695	u16 threshold;
3696	u16 peer_id;
3697	u8 tid;
3698	int i;
3699
3700	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3701
3702	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3703	if (unlikely(skb->len < len)) {
3704		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3705		return;
3706	}
3707
3708	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3709	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3710
3711	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3712	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3713	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3714	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3715
3716	ath10k_dbg(ar, ATH10K_DBG_HTT,
3717		   "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3718		   info0, info1, enable, num_records, mode, threshold);
3719
3720	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3721
3722	if (unlikely(skb->len < len)) {
3723		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3724		return;
3725	}
3726
3727	switch (mode) {
3728	case HTT_TX_MODE_SWITCH_PUSH:
3729	case HTT_TX_MODE_SWITCH_PUSH_PULL:
3730		break;
3731	default:
3732		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3733			    mode);
3734		return;
3735	}
3736
3737	if (!enable)
3738		return;
3739
3740	ar->htt.tx_q_state.enabled = enable;
3741	ar->htt.tx_q_state.mode = mode;
3742	ar->htt.tx_q_state.num_push_allowed = threshold;
3743
3744	rcu_read_lock();
3745
3746	for (i = 0; i < num_records; i++) {
3747		record = &resp->tx_mode_switch_ind.records[i];
3748		info0 = le16_to_cpu(record->info0);
3749		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3750		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3751
3752		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3753		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3754			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3755				    peer_id, tid);
3756			continue;
3757		}
3758
3759		spin_lock_bh(&ar->data_lock);
3760		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3761		spin_unlock_bh(&ar->data_lock);
3762
3763		/* It is okay to release the lock and use txq because RCU read
3764		 * lock is held.
3765		 */
3766
3767		if (unlikely(!txq)) {
3768			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3769				    peer_id, tid);
3770			continue;
3771		}
3772
3773		spin_lock_bh(&ar->htt.tx_lock);
3774		artxq = (void *)txq->drv_priv;
3775		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3776		spin_unlock_bh(&ar->htt.tx_lock);
3777	}
3778
3779	rcu_read_unlock();
3780
3781	ath10k_mac_tx_push_pending(ar);
3782}
3783
3784void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3785{
3786	bool release;
3787
3788	release = ath10k_htt_t2h_msg_handler(ar, skb);
3789
3790	/* Free the indication buffer */
3791	if (release)
3792		dev_kfree_skb_any(skb);
3793}
3794
3795static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3796{
3797	static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3798					  18, 24, 36, 48, 54};
3799	int i;
3800
3801	for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3802		if (rate == legacy_rates[i])
3803			return i;
3804	}
3805
3806	ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
3807	return -EINVAL;
3808}
3809
3810static void
3811ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3812				    struct ath10k_sta *arsta,
3813				    struct ath10k_per_peer_tx_stats *pstats,
3814				    s8 legacy_rate_idx)
3815{
3816	struct rate_info *txrate = &arsta->txrate;
3817	struct ath10k_htt_tx_stats *tx_stats;
3818	int idx, ht_idx, gi, mcs, bw, nss;
3819	unsigned long flags;
3820
3821	if (!arsta->tx_stats)
3822		return;
3823
3824	tx_stats = arsta->tx_stats;
3825	flags = txrate->flags;
3826	gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3827	mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3828	bw = txrate->bw;
3829	nss = txrate->nss;
3830	ht_idx = mcs + (nss - 1) * 8;
3831	idx = mcs * 8 + 8 * 10 * (nss - 1);
3832	idx += bw * 2 + gi;
3833
3834#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3835
3836	if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3837		STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3838		STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3839		STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3840		STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3841		STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3842		STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3843	} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3844		STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3845		STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3846		STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3847		STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3848		STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3849		STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3850	} else {
3851		mcs = legacy_rate_idx;
3852
3853		STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3854		STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3855		STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3856		STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3857		STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3858		STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3859	}
3860
3861	if (ATH10K_HW_AMPDU(pstats->flags)) {
3862		tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3863
3864		if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3865			STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3866				pstats->succ_bytes + pstats->retry_bytes;
3867			STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3868				pstats->succ_pkts + pstats->retry_pkts;
3869		} else {
3870			STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3871				pstats->succ_bytes + pstats->retry_bytes;
3872			STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3873				pstats->succ_pkts + pstats->retry_pkts;
3874		}
3875		STATS_OP_FMT(AMPDU).bw[0][bw] +=
3876			pstats->succ_bytes + pstats->retry_bytes;
3877		STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3878			pstats->succ_bytes + pstats->retry_bytes;
3879		STATS_OP_FMT(AMPDU).gi[0][gi] +=
3880			pstats->succ_bytes + pstats->retry_bytes;
3881		STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3882			pstats->succ_bytes + pstats->retry_bytes;
3883		STATS_OP_FMT(AMPDU).bw[1][bw] +=
3884			pstats->succ_pkts + pstats->retry_pkts;
3885		STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3886			pstats->succ_pkts + pstats->retry_pkts;
3887		STATS_OP_FMT(AMPDU).gi[1][gi] +=
3888			pstats->succ_pkts + pstats->retry_pkts;
3889		STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3890			pstats->succ_pkts + pstats->retry_pkts;
3891	} else {
3892		tx_stats->ack_fails +=
3893				ATH10K_HW_BA_FAIL(pstats->flags);
3894	}
3895
3896	STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3897	STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3898	STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3899
3900	STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3901	STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3902	STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3903
3904	STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3905	STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3906	STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3907
3908	STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3909	STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3910	STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3911
3912	STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3913	STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3914	STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3915
3916	STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3917	STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3918	STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3919
3920	if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3921		STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3922		STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3923		STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3924		STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3925		STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3926		STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3927	}
3928
3929	tx_stats->tx_duration += pstats->duration;
3930}
3931
3932static void
3933ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3934				struct ieee80211_sta *sta,
3935				struct ath10k_per_peer_tx_stats *peer_stats)
3936{
3937	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3938	struct ieee80211_chanctx_conf *conf = NULL;
3939	u8 rate = 0, sgi;
3940	s8 rate_idx = 0;
3941	bool skip_auto_rate;
3942	struct rate_info txrate;
3943
3944	lockdep_assert_held(&ar->data_lock);
3945
3946	txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3947	txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3948	txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3949	txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3950	sgi = ATH10K_HW_GI(peer_stats->flags);
3951	skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3952
3953	/* Firmware's rate control skips broadcast/management frames,
3954	 * if host has configure fixed rates and in some other special cases.
3955	 */
3956	if (skip_auto_rate)
3957		return;
3958
3959	if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3960		ath10k_warn(ar, "Invalid VHT mcs %d peer stats",  txrate.mcs);
3961		return;
3962	}
3963
3964	if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3965	    (txrate.mcs > 7 || txrate.nss < 1)) {
3966		ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
3967			    txrate.mcs, txrate.nss);
3968		return;
3969	}
3970
3971	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3972	memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3973	if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3974	    txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3975		rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3976		/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3977		if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3978			rate = 5;
3979		rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3980		if (rate_idx < 0)
3981			return;
3982		arsta->txrate.legacy = rate;
3983	} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3984		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3985		arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3986	} else {
3987		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3988		arsta->txrate.mcs = txrate.mcs;
3989	}
3990
3991	switch (txrate.flags) {
3992	case WMI_RATE_PREAMBLE_OFDM:
3993		if (arsta->arvif && arsta->arvif->vif)
3994			conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
3995		if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3996			arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3997		break;
3998	case WMI_RATE_PREAMBLE_CCK:
3999		arsta->tx_info.status.rates[0].idx = rate_idx;
4000		if (sgi)
4001			arsta->tx_info.status.rates[0].flags |=
4002				(IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
4003				 IEEE80211_TX_RC_SHORT_GI);
4004		break;
4005	case WMI_RATE_PREAMBLE_HT:
4006		arsta->tx_info.status.rates[0].idx =
4007				txrate.mcs + ((txrate.nss - 1) * 8);
4008		if (sgi)
4009			arsta->tx_info.status.rates[0].flags |=
4010					IEEE80211_TX_RC_SHORT_GI;
4011		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
4012		break;
4013	case WMI_RATE_PREAMBLE_VHT:
4014		ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
4015				       txrate.mcs, txrate.nss);
4016		if (sgi)
4017			arsta->tx_info.status.rates[0].flags |=
4018						IEEE80211_TX_RC_SHORT_GI;
4019		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
4020		break;
4021	}
4022
4023	arsta->txrate.nss = txrate.nss;
4024	arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
4025	arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
4026	if (sgi)
4027		arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
4028
4029	switch (arsta->txrate.bw) {
4030	case RATE_INFO_BW_40:
4031		arsta->tx_info.status.rates[0].flags |=
4032				IEEE80211_TX_RC_40_MHZ_WIDTH;
4033		break;
4034	case RATE_INFO_BW_80:
4035		arsta->tx_info.status.rates[0].flags |=
4036				IEEE80211_TX_RC_80_MHZ_WIDTH;
4037		break;
4038	case RATE_INFO_BW_160:
4039		arsta->tx_info.status.rates[0].flags |=
4040				IEEE80211_TX_RC_160_MHZ_WIDTH;
4041		break;
4042	}
4043
4044	if (peer_stats->succ_pkts) {
4045		arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
4046		arsta->tx_info.status.rates[0].count = 1;
4047		ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
4048	}
4049
4050	if (ar->htt.disable_tx_comp) {
4051		arsta->tx_failed += peer_stats->failed_pkts;
4052		ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
4053			   arsta->tx_failed);
4054	}
4055
4056	arsta->tx_retries += peer_stats->retry_pkts;
4057	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
4058
4059	if (ath10k_debug_is_extd_tx_stats_enabled(ar))
4060		ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
4061						    rate_idx);
4062}
4063
4064static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
4065					struct sk_buff *skb)
4066{
4067	struct htt_resp *resp = (struct htt_resp *)skb->data;
4068	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
4069	struct htt_per_peer_tx_stats_ind *tx_stats;
4070	struct ieee80211_sta *sta;
4071	struct ath10k_peer *peer;
4072	int peer_id, i;
4073	u8 ppdu_len, num_ppdu;
4074
4075	num_ppdu = resp->peer_tx_stats.num_ppdu;
4076	ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
4077
4078	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
4079		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
4080		return;
4081	}
4082
4083	tx_stats = (struct htt_per_peer_tx_stats_ind *)
4084			(resp->peer_tx_stats.payload);
4085	peer_id = __le16_to_cpu(tx_stats->peer_id);
4086
4087	rcu_read_lock();
4088	spin_lock_bh(&ar->data_lock);
4089	peer = ath10k_peer_find_by_id(ar, peer_id);
4090	if (!peer || !peer->sta) {
4091		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
4092			    peer_id);
4093		goto out;
4094	}
4095
4096	sta = peer->sta;
4097	for (i = 0; i < num_ppdu; i++) {
4098		tx_stats = (struct htt_per_peer_tx_stats_ind *)
4099			   (resp->peer_tx_stats.payload + i * ppdu_len);
4100
4101		p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
4102		p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
4103		p_tx_stats->failed_bytes =
4104				__le32_to_cpu(tx_stats->failed_bytes);
4105		p_tx_stats->ratecode = tx_stats->ratecode;
4106		p_tx_stats->flags = tx_stats->flags;
4107		p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
4108		p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
4109		p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
4110		p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
4111
4112		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4113	}
4114
4115out:
4116	spin_unlock_bh(&ar->data_lock);
4117	rcu_read_unlock();
4118}
4119
4120static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
4121{
4122	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
4123	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
4124	struct ath10k_10_2_peer_tx_stats *tx_stats;
4125	struct ieee80211_sta *sta;
4126	struct ath10k_peer *peer;
4127	u16 log_type = __le16_to_cpu(hdr->log_type);
4128	u32 peer_id = 0, i;
4129
4130	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
4131		return;
4132
4133	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
4134		    ATH10K_10_2_TX_STATS_OFFSET);
4135
4136	if (!tx_stats->tx_ppdu_cnt)
4137		return;
4138
4139	peer_id = tx_stats->peer_id;
4140
4141	rcu_read_lock();
4142	spin_lock_bh(&ar->data_lock);
4143	peer = ath10k_peer_find_by_id(ar, peer_id);
4144	if (!peer || !peer->sta) {
4145		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
4146			    peer_id);
4147		goto out;
4148	}
4149
4150	sta = peer->sta;
4151	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
4152		p_tx_stats->succ_bytes =
4153			__le16_to_cpu(tx_stats->success_bytes[i]);
4154		p_tx_stats->retry_bytes =
4155			__le16_to_cpu(tx_stats->retry_bytes[i]);
4156		p_tx_stats->failed_bytes =
4157			__le16_to_cpu(tx_stats->failed_bytes[i]);
4158		p_tx_stats->ratecode = tx_stats->ratecode[i];
4159		p_tx_stats->flags = tx_stats->flags[i];
4160		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
4161		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
4162		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
4163
4164		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4165	}
4166	spin_unlock_bh(&ar->data_lock);
4167	rcu_read_unlock();
4168
4169	return;
4170
4171out:
4172	spin_unlock_bh(&ar->data_lock);
4173	rcu_read_unlock();
4174}
4175
4176static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
4177{
4178	switch (sec_type) {
4179	case HTT_SECURITY_TKIP:
4180	case HTT_SECURITY_TKIP_NOMIC:
4181	case HTT_SECURITY_AES_CCMP:
4182		return 48;
4183	default:
4184		return 0;
4185	}
4186}
4187
4188static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
4189					  struct htt_security_indication *ev)
4190{
4191	enum htt_txrx_sec_cast_type sec_index;
4192	enum htt_security_types sec_type;
4193	struct ath10k_peer *peer;
4194
4195	spin_lock_bh(&ar->data_lock);
4196
4197	peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
4198	if (!peer) {
4199		ath10k_warn(ar, "failed to find peer id %d for security indication",
4200			    __le16_to_cpu(ev->peer_id));
4201		goto out;
4202	}
4203
4204	sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
4205
4206	if (ev->flags & HTT_SECURITY_IS_UNICAST)
4207		sec_index = HTT_TXRX_SEC_UCAST;
4208	else
4209		sec_index = HTT_TXRX_SEC_MCAST;
4210
4211	peer->rx_pn[sec_index].sec_type = sec_type;
4212	peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
4213
4214	memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
4215	memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
4216
4217out:
4218	spin_unlock_bh(&ar->data_lock);
4219}
4220
4221bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
4222{
4223	struct ath10k_htt *htt = &ar->htt;
4224	struct htt_resp *resp = (struct htt_resp *)skb->data;
4225	enum htt_t2h_msg_type type;
4226
4227	/* confirm alignment */
4228	if (!IS_ALIGNED((unsigned long)skb->data, 4))
4229		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
4230
4231	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
4232		   resp->hdr.msg_type);
4233
4234	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
4235		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4236			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
4237		return true;
4238	}
4239	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
4240
4241	switch (type) {
4242	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
4243		htt->target_version_major = resp->ver_resp.major;
4244		htt->target_version_minor = resp->ver_resp.minor;
4245		complete(&htt->target_version_received);
4246		break;
4247	}
4248	case HTT_T2H_MSG_TYPE_RX_IND:
4249		if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
4250			ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
4251		} else {
4252			skb_queue_tail(&htt->rx_indication_head, skb);
4253			return false;
4254		}
4255		break;
4256	case HTT_T2H_MSG_TYPE_PEER_MAP: {
4257		struct htt_peer_map_event ev = {
4258			.vdev_id = resp->peer_map.vdev_id,
4259			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
4260		};
4261		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
4262		ath10k_peer_map_event(htt, &ev);
4263		break;
4264	}
4265	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
4266		struct htt_peer_unmap_event ev = {
4267			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
4268		};
4269		ath10k_peer_unmap_event(htt, &ev);
4270		break;
4271	}
4272	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
4273		struct htt_tx_done tx_done = {};
4274		struct ath10k_htt *htt = &ar->htt;
4275		struct ath10k_htc *htc = &ar->htc;
4276		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4277		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4278		int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4279
4280		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4281
4282		switch (status) {
4283		case HTT_MGMT_TX_STATUS_OK:
4284			tx_done.status = HTT_TX_COMPL_STATE_ACK;
4285			if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4286				     ar->wmi.svc_map) &&
4287			    (resp->mgmt_tx_completion.flags &
4288			     HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4289				tx_done.ack_rssi =
4290				FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4291					  info);
4292			}
4293			break;
4294		case HTT_MGMT_TX_STATUS_RETRY:
4295			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4296			break;
4297		case HTT_MGMT_TX_STATUS_DROP:
4298			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4299			break;
4300		}
4301
4302		if (htt->disable_tx_comp) {
4303			spin_lock_bh(&htc->tx_lock);
4304			ep->tx_credits++;
4305			spin_unlock_bh(&htc->tx_lock);
4306		}
4307
4308		status = ath10k_txrx_tx_unref(htt, &tx_done);
4309		if (!status) {
4310			spin_lock_bh(&htt->tx_lock);
4311			ath10k_htt_tx_mgmt_dec_pending(htt);
4312			spin_unlock_bh(&htt->tx_lock);
4313		}
4314		break;
4315	}
4316	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4317		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4318		break;
4319	case HTT_T2H_MSG_TYPE_SEC_IND: {
4320		struct ath10k *ar = htt->ar;
4321		struct htt_security_indication *ev = &resp->security_indication;
4322
4323		ath10k_htt_rx_sec_ind_handler(ar, ev);
4324		ath10k_dbg(ar, ATH10K_DBG_HTT,
4325			   "sec ind peer_id %d unicast %d type %d\n",
4326			  __le16_to_cpu(ev->peer_id),
4327			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4328			  MS(ev->flags, HTT_SECURITY_TYPE));
4329		complete(&ar->install_key_done);
4330		break;
4331	}
4332	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4333		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4334				skb->data, skb->len);
4335		atomic_inc(&htt->num_mpdus_ready);
4336
4337		return ath10k_htt_rx_proc_rx_frag_ind(htt,
4338						      &resp->rx_frag_ind,
4339						      skb);
4340	}
4341	case HTT_T2H_MSG_TYPE_TEST:
4342		break;
4343	case HTT_T2H_MSG_TYPE_STATS_CONF:
4344		trace_ath10k_htt_stats(ar, skb->data, skb->len);
4345		break;
4346	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4347		/* Firmware can return tx frames if it's unable to fully
4348		 * process them and suspects host may be able to fix it. ath10k
4349		 * sends all tx frames as already inspected so this shouldn't
4350		 * happen unless fw has a bug.
4351		 */
4352		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4353		break;
4354	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4355		ath10k_htt_rx_addba(ar, resp);
4356		break;
4357	case HTT_T2H_MSG_TYPE_RX_DELBA:
4358		ath10k_htt_rx_delba(ar, resp);
4359		break;
4360	case HTT_T2H_MSG_TYPE_PKTLOG: {
4361		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4362					skb->len -
4363					offsetof(struct htt_resp,
4364						 pktlog_msg.payload));
4365
4366		if (ath10k_peer_stats_enabled(ar))
4367			ath10k_fetch_10_2_tx_stats(ar,
4368						   resp->pktlog_msg.payload);
4369		break;
4370	}
4371	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4372		/* Ignore this event because mac80211 takes care of Rx
4373		 * aggregation reordering.
4374		 */
4375		break;
4376	}
4377	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4378		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4379		return false;
4380	}
4381	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4382		struct ath10k_htt *htt = &ar->htt;
4383		struct ath10k_htc *htc = &ar->htc;
4384		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4385		u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4386		int htt_credit_delta;
4387
4388		htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4389		if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4390			htt_credit_delta = -htt_credit_delta;
4391
4392		ath10k_dbg(ar, ATH10K_DBG_HTT,
4393			   "htt credit update delta %d\n",
4394			   htt_credit_delta);
4395
4396		if (htt->disable_tx_comp) {
4397			spin_lock_bh(&htc->tx_lock);
4398			ep->tx_credits += htt_credit_delta;
4399			spin_unlock_bh(&htc->tx_lock);
4400			ath10k_dbg(ar, ATH10K_DBG_HTT,
4401				   "htt credit total %d\n",
4402				   ep->tx_credits);
4403			ep->ep_ops.ep_tx_credits(htc->ar);
4404		}
4405		break;
4406	}
4407	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4408		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4409		u32 freq = __le32_to_cpu(resp->chan_change.freq);
4410
4411		ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4412		ath10k_dbg(ar, ATH10K_DBG_HTT,
4413			   "htt chan change freq %u phymode %s\n",
4414			   freq, ath10k_wmi_phymode_str(phymode));
4415		break;
4416	}
4417	case HTT_T2H_MSG_TYPE_AGGR_CONF:
4418		break;
4419	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4420		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4421
4422		if (!tx_fetch_ind) {
4423			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4424			break;
4425		}
4426		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4427		break;
4428	}
4429	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4430		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4431		break;
4432	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4433		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4434		break;
4435	case HTT_T2H_MSG_TYPE_PEER_STATS:
4436		ath10k_htt_fetch_peer_stats(ar, skb);
4437		break;
4438	case HTT_T2H_MSG_TYPE_EN_STATS:
4439	default:
4440		ath10k_warn(ar, "htt event (%d) not handled\n",
4441			    resp->hdr.msg_type);
4442		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4443				skb->data, skb->len);
4444		break;
4445	}
4446	return true;
4447}
4448EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4449
4450void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4451					     struct sk_buff *skb)
4452{
4453	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4454	dev_kfree_skb_any(skb);
4455}
4456EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4457
4458static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4459{
4460	struct sk_buff *skb;
4461
4462	while (quota < budget) {
4463		if (skb_queue_empty(&ar->htt.rx_msdus_q))
4464			break;
4465
4466		skb = skb_dequeue(&ar->htt.rx_msdus_q);
4467		if (!skb)
4468			break;
4469		ath10k_process_rx(ar, skb);
4470		quota++;
4471	}
4472
4473	return quota;
4474}
4475
4476int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4477{
4478	struct htt_resp *resp;
4479	struct ath10k_htt *htt = &ar->htt;
4480	struct sk_buff *skb;
4481	bool release;
4482	int quota;
4483
4484	for (quota = 0; quota < budget; quota++) {
4485		skb = skb_dequeue(&htt->rx_indication_head);
4486		if (!skb)
4487			break;
4488
4489		resp = (struct htt_resp *)skb->data;
4490
4491		release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4492						       &resp->rx_ind_hl,
4493						       skb,
4494						       HTT_RX_PN_CHECK,
4495						       HTT_RX_NON_TKIP_MIC);
4496
4497		if (release)
4498			dev_kfree_skb_any(skb);
4499
4500		ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4501			   skb_queue_len(&htt->rx_indication_head));
4502	}
4503	return quota;
4504}
4505EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4506
4507int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4508{
4509	struct ath10k_htt *htt = &ar->htt;
4510	struct htt_tx_done tx_done = {};
4511	struct sk_buff_head tx_ind_q;
4512	struct sk_buff *skb;
4513	unsigned long flags;
4514	int quota = 0, done, ret;
4515	bool resched_napi = false;
4516
4517	__skb_queue_head_init(&tx_ind_q);
4518
4519	/* Process pending frames before dequeuing more data
4520	 * from hardware.
4521	 */
4522	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4523	if (quota == budget) {
4524		resched_napi = true;
4525		goto exit;
4526	}
4527
4528	while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4529		spin_lock_bh(&htt->rx_ring.lock);
4530		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4531		spin_unlock_bh(&htt->rx_ring.lock);
4532
4533		dev_kfree_skb_any(skb);
4534		if (ret == -EIO) {
4535			resched_napi = true;
4536			goto exit;
4537		}
4538	}
4539
4540	while (atomic_read(&htt->num_mpdus_ready)) {
4541		ret = ath10k_htt_rx_handle_amsdu(htt);
4542		if (ret == -EIO) {
4543			resched_napi = true;
4544			goto exit;
4545		}
4546		atomic_dec(&htt->num_mpdus_ready);
4547	}
4548
4549	/* Deliver received data after processing data from hardware */
4550	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4551
4552	/* From NAPI documentation:
4553	 *  The napi poll() function may also process TX completions, in which
4554	 *  case if it processes the entire TX ring then it should count that
4555	 *  work as the rest of the budget.
4556	 */
4557	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4558		quota = budget;
4559
4560	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4561	 * From kfifo_get() documentation:
4562	 *  Note that with only one concurrent reader and one concurrent writer,
4563	 *  you don't need extra locking to use these macro.
4564	 */
4565	while (kfifo_get(&htt->txdone_fifo, &tx_done))
4566		ath10k_txrx_tx_unref(htt, &tx_done);
4567
4568	ath10k_mac_tx_push_pending(ar);
4569
4570	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4571	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4572	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4573
4574	while ((skb = __skb_dequeue(&tx_ind_q))) {
4575		ath10k_htt_rx_tx_fetch_ind(ar, skb);
4576		dev_kfree_skb_any(skb);
4577	}
4578
4579exit:
4580	ath10k_htt_rx_msdu_buff_replenish(htt);
4581	/* In case of rx failure or more data to read, report budget
4582	 * to reschedule NAPI poll
4583	 */
4584	done = resched_napi ? budget : quota;
4585
4586	return done;
4587}
4588EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4589
4590static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4591	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4592	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4593	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4594	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4595	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4596};
4597
4598static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4599	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4600	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4601	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4602	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4603	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4604};
4605
4606static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4607	.htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4608};
4609
4610void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4611{
4612	struct ath10k *ar = htt->ar;
4613
4614	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4615		htt->rx_ops = &htt_rx_ops_hl;
4616	else if (ar->hw_params.target_64bit)
4617		htt->rx_ops = &htt_rx_ops_64;
4618	else
4619		htt->rx_ops = &htt_rx_ops_32;
4620}
4621