1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/ieee80211.h>
7#include <linux/kernel.h>
8#include <linux/skbuff.h>
9#include <crypto/hash.h>
10#include "core.h"
11#include "debug.h"
12#include "debugfs_htt_stats.h"
13#include "debugfs_sta.h"
14#include "hal_desc.h"
15#include "hw.h"
16#include "dp_rx.h"
17#include "hal_rx.h"
18#include "dp_tx.h"
19#include "peer.h"
20
21#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23static inline
24u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
25{
26	return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
27}
28
29static inline
30enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
31							struct hal_rx_desc *desc)
32{
33	if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
34		return HAL_ENCRYPT_TYPE_OPEN;
35
36	return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
37}
38
39static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
40						      struct hal_rx_desc *desc)
41{
42	return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
43}
44
45static inline
46bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
47					    struct hal_rx_desc *desc)
48{
49	return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
50}
51
52static inline
53u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
54					      struct hal_rx_desc *desc)
55{
56	return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
57}
58
59static inline
60bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
61					      struct hal_rx_desc *desc)
62{
63	return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
64}
65
66static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
67						      struct hal_rx_desc *desc)
68{
69	return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
70}
71
72static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
73							struct sk_buff *skb)
74{
75	struct ieee80211_hdr *hdr;
76
77	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
78	return ieee80211_has_morefrags(hdr->frame_control);
79}
80
81static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
82						    struct sk_buff *skb)
83{
84	struct ieee80211_hdr *hdr;
85
86	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
87	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
88}
89
90static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
91						   struct hal_rx_desc *desc)
92{
93	return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
94}
95
96static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
97					       struct hal_rx_desc *desc)
98{
99	return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
100}
101
102static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
103{
104	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
105			   __le32_to_cpu(attn->info2));
106}
107
108static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
109{
110	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
111			   __le32_to_cpu(attn->info1));
112}
113
114static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
115{
116	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
117			   __le32_to_cpu(attn->info1));
118}
119
120static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
121{
122	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
123			  __le32_to_cpu(attn->info2)) ==
124		RX_DESC_DECRYPT_STATUS_CODE_OK);
125}
126
127static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
128{
129	u32 info = __le32_to_cpu(attn->info1);
130	u32 errmap = 0;
131
132	if (info & RX_ATTENTION_INFO1_FCS_ERR)
133		errmap |= DP_RX_MPDU_ERR_FCS;
134
135	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
136		errmap |= DP_RX_MPDU_ERR_DECRYPT;
137
138	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
139		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
140
141	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
142		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
143
144	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
145		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
146
147	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
148		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
149
150	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
151		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
152
153	return errmap;
154}
155
156static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
157					     struct hal_rx_desc *desc)
158{
159	struct rx_attention *rx_attention;
160	u32 errmap;
161
162	rx_attention = ath11k_dp_rx_get_attention(ab, desc);
163	errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
164
165	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
166}
167
168static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
169						     struct hal_rx_desc *desc)
170{
171	return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
172}
173
174static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
175					       struct hal_rx_desc *desc)
176{
177	return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
178}
179
180static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
181						    struct hal_rx_desc *desc)
182{
183	return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
184}
185
186static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
187						 struct hal_rx_desc *desc)
188{
189	return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
190}
191
192static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
193						 struct hal_rx_desc *desc)
194{
195	return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
196}
197
198static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
199						    struct hal_rx_desc *desc)
200{
201	return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
202}
203
204static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
205					       struct hal_rx_desc *desc)
206{
207	return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
208}
209
210static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
211					       struct hal_rx_desc *desc)
212{
213	return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
214}
215
216static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
217						    struct hal_rx_desc *desc)
218{
219	return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
220}
221
222static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
223					       struct hal_rx_desc *desc)
224{
225	return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
226}
227
228static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
229						      struct hal_rx_desc *desc)
230{
231	return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
232}
233
234static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
235					      struct hal_rx_desc *desc)
236{
237	return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
238}
239
240static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
241					   struct hal_rx_desc *fdesc,
242					   struct hal_rx_desc *ldesc)
243{
244	ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
245}
246
247static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
248{
249	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
250			 __le32_to_cpu(attn->info1));
251}
252
253static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
254						struct hal_rx_desc *rx_desc)
255{
256	u8 *rx_pkt_hdr;
257
258	rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
259
260	return rx_pkt_hdr;
261}
262
263static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
264					       struct hal_rx_desc *rx_desc)
265{
266	u32 tlv_tag;
267
268	tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
269
270	return tlv_tag == HAL_RX_MPDU_START;
271}
272
273static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
274					      struct hal_rx_desc *rx_desc)
275{
276	return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
277}
278
279static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
280						 struct hal_rx_desc *desc,
281						 u16 len)
282{
283	ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
284}
285
286static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
287					struct hal_rx_desc *desc)
288{
289	struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
290
291	return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
292		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
293		 __le32_to_cpu(attn->info1)));
294}
295
296static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
297					     struct hal_rx_desc *desc)
298{
299	return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
300}
301
302static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
303					     struct hal_rx_desc *desc)
304{
305	return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
306}
307
308static void ath11k_dp_service_mon_ring(struct timer_list *t)
309{
310	struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
311	int i;
312
313	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
314		ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
315
316	mod_timer(&ab->mon_reap_timer, jiffies +
317		  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
318}
319
320static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
321{
322	int i, reaped = 0;
323	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
324
325	do {
326		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
327			reaped += ath11k_dp_rx_process_mon_rings(ab, i,
328								 NULL,
329								 DP_MON_SERVICE_BUDGET);
330
331		/* nothing more to reap */
332		if (reaped < DP_MON_SERVICE_BUDGET)
333			return 0;
334
335	} while (time_before(jiffies, timeout));
336
337	ath11k_warn(ab, "dp mon ring purge timeout");
338
339	return -ETIMEDOUT;
340}
341
342/* Returns number of Rx buffers replenished */
343int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
344			       struct dp_rxdma_ring *rx_ring,
345			       int req_entries,
346			       enum hal_rx_buf_return_buf_manager mgr)
347{
348	struct hal_srng *srng;
349	u32 *desc;
350	struct sk_buff *skb;
351	int num_free;
352	int num_remain;
353	int buf_id;
354	u32 cookie;
355	dma_addr_t paddr;
356
357	req_entries = min(req_entries, rx_ring->bufs_max);
358
359	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
360
361	spin_lock_bh(&srng->lock);
362
363	ath11k_hal_srng_access_begin(ab, srng);
364
365	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
366	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
367		req_entries = num_free;
368
369	req_entries = min(num_free, req_entries);
370	num_remain = req_entries;
371
372	while (num_remain > 0) {
373		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
374				    DP_RX_BUFFER_ALIGN_SIZE);
375		if (!skb)
376			break;
377
378		if (!IS_ALIGNED((unsigned long)skb->data,
379				DP_RX_BUFFER_ALIGN_SIZE)) {
380			skb_pull(skb,
381				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
382				 skb->data);
383		}
384
385		paddr = dma_map_single(ab->dev, skb->data,
386				       skb->len + skb_tailroom(skb),
387				       DMA_FROM_DEVICE);
388		if (dma_mapping_error(ab->dev, paddr))
389			goto fail_free_skb;
390
391		spin_lock_bh(&rx_ring->idr_lock);
392		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
393				   (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
394		spin_unlock_bh(&rx_ring->idr_lock);
395		if (buf_id <= 0)
396			goto fail_dma_unmap;
397
398		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
399		if (!desc)
400			goto fail_idr_remove;
401
402		ATH11K_SKB_RXCB(skb)->paddr = paddr;
403
404		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
405			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
406
407		num_remain--;
408
409		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
410	}
411
412	ath11k_hal_srng_access_end(ab, srng);
413
414	spin_unlock_bh(&srng->lock);
415
416	return req_entries - num_remain;
417
418fail_idr_remove:
419	spin_lock_bh(&rx_ring->idr_lock);
420	idr_remove(&rx_ring->bufs_idr, buf_id);
421	spin_unlock_bh(&rx_ring->idr_lock);
422fail_dma_unmap:
423	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
424			 DMA_FROM_DEVICE);
425fail_free_skb:
426	dev_kfree_skb_any(skb);
427
428	ath11k_hal_srng_access_end(ab, srng);
429
430	spin_unlock_bh(&srng->lock);
431
432	return req_entries - num_remain;
433}
434
435static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
436					 struct dp_rxdma_ring *rx_ring)
437{
438	struct sk_buff *skb;
439	int buf_id;
440
441	spin_lock_bh(&rx_ring->idr_lock);
442	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
443		idr_remove(&rx_ring->bufs_idr, buf_id);
444		/* TODO: Understand where internal driver does this dma_unmap
445		 * of rxdma_buffer.
446		 */
447		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
448				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
449		dev_kfree_skb_any(skb);
450	}
451
452	idr_destroy(&rx_ring->bufs_idr);
453	spin_unlock_bh(&rx_ring->idr_lock);
454
455	return 0;
456}
457
458static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
459{
460	struct ath11k_pdev_dp *dp = &ar->dp;
461	struct ath11k_base *ab = ar->ab;
462	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
463	int i;
464
465	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
466
467	rx_ring = &dp->rxdma_mon_buf_ring;
468	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
469
470	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
471		rx_ring = &dp->rx_mon_status_refill_ring[i];
472		ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
473	}
474
475	return 0;
476}
477
478static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
479					  struct dp_rxdma_ring *rx_ring,
480					  u32 ringtype)
481{
482	struct ath11k_pdev_dp *dp = &ar->dp;
483	int num_entries;
484
485	num_entries = rx_ring->refill_buf_ring.size /
486		ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
487
488	rx_ring->bufs_max = num_entries;
489	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
490				   ar->ab->hw_params.hal_params->rx_buf_rbm);
491	return 0;
492}
493
494static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
495{
496	struct ath11k_pdev_dp *dp = &ar->dp;
497	struct ath11k_base *ab = ar->ab;
498	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
499	int i;
500
501	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
502
503	if (ar->ab->hw_params.rxdma1_enable) {
504		rx_ring = &dp->rxdma_mon_buf_ring;
505		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
506	}
507
508	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
509		rx_ring = &dp->rx_mon_status_refill_ring[i];
510		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
511	}
512
513	return 0;
514}
515
516static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
517{
518	struct ath11k_pdev_dp *dp = &ar->dp;
519	struct ath11k_base *ab = ar->ab;
520	int i;
521
522	ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
523
524	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
525		if (ab->hw_params.rx_mac_buf_ring)
526			ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
527
528		ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
529		ath11k_dp_srng_cleanup(ab,
530				       &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
531	}
532
533	ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
534}
535
536void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
537{
538	struct ath11k_dp *dp = &ab->dp;
539	int i;
540
541	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
542		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
543}
544
545int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
546{
547	struct ath11k_dp *dp = &ab->dp;
548	int ret;
549	int i;
550
551	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
552		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
553					   HAL_REO_DST, i, 0,
554					   DP_REO_DST_RING_SIZE);
555		if (ret) {
556			ath11k_warn(ab, "failed to setup reo_dst_ring\n");
557			goto err_reo_cleanup;
558		}
559	}
560
561	return 0;
562
563err_reo_cleanup:
564	ath11k_dp_pdev_reo_cleanup(ab);
565
566	return ret;
567}
568
569static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
570{
571	struct ath11k_pdev_dp *dp = &ar->dp;
572	struct ath11k_base *ab = ar->ab;
573	struct dp_srng *srng = NULL;
574	int i;
575	int ret;
576
577	ret = ath11k_dp_srng_setup(ar->ab,
578				   &dp->rx_refill_buf_ring.refill_buf_ring,
579				   HAL_RXDMA_BUF, 0,
580				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
581	if (ret) {
582		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
583		return ret;
584	}
585
586	if (ar->ab->hw_params.rx_mac_buf_ring) {
587		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
588			ret = ath11k_dp_srng_setup(ar->ab,
589						   &dp->rx_mac_buf_ring[i],
590						   HAL_RXDMA_BUF, 1,
591						   dp->mac_id + i, 1024);
592			if (ret) {
593				ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
594					    i);
595				return ret;
596			}
597		}
598	}
599
600	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
601		ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
602					   HAL_RXDMA_DST, 0, dp->mac_id + i,
603					   DP_RXDMA_ERR_DST_RING_SIZE);
604		if (ret) {
605			ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
606			return ret;
607		}
608	}
609
610	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
611		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
612		ret = ath11k_dp_srng_setup(ar->ab,
613					   srng,
614					   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
615					   DP_RXDMA_MON_STATUS_RING_SIZE);
616		if (ret) {
617			ath11k_warn(ar->ab,
618				    "failed to setup rx_mon_status_refill_ring %d\n", i);
619			return ret;
620		}
621	}
622
623	/* if rxdma1_enable is false, then it doesn't need
624	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
625	 * and rxdma_mon_desc_ring.
626	 * init reap timer for QCA6390.
627	 */
628	if (!ar->ab->hw_params.rxdma1_enable) {
629		//init mon status buffer reap timer
630		timer_setup(&ar->ab->mon_reap_timer,
631			    ath11k_dp_service_mon_ring, 0);
632		return 0;
633	}
634
635	ret = ath11k_dp_srng_setup(ar->ab,
636				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
637				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
638				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
639	if (ret) {
640		ath11k_warn(ar->ab,
641			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
642		return ret;
643	}
644
645	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
646				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
647				   DP_RXDMA_MONITOR_DST_RING_SIZE);
648	if (ret) {
649		ath11k_warn(ar->ab,
650			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
651		return ret;
652	}
653
654	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
655				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
656				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
657	if (ret) {
658		ath11k_warn(ar->ab,
659			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
660		return ret;
661	}
662
663	return 0;
664}
665
666void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
667{
668	struct ath11k_dp *dp = &ab->dp;
669	struct dp_reo_cmd *cmd, *tmp;
670	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
671	struct dp_rx_tid *rx_tid;
672
673	spin_lock_bh(&dp->reo_cmd_lock);
674	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
675		list_del(&cmd->list);
676		rx_tid = &cmd->data;
677		if (rx_tid->vaddr) {
678			dma_unmap_single(ab->dev, rx_tid->paddr,
679					 rx_tid->size, DMA_BIDIRECTIONAL);
680			kfree(rx_tid->vaddr);
681			rx_tid->vaddr = NULL;
682		}
683		kfree(cmd);
684	}
685
686	list_for_each_entry_safe(cmd_cache, tmp_cache,
687				 &dp->reo_cmd_cache_flush_list, list) {
688		list_del(&cmd_cache->list);
689		dp->reo_cmd_cache_flush_count--;
690		rx_tid = &cmd_cache->data;
691		if (rx_tid->vaddr) {
692			dma_unmap_single(ab->dev, rx_tid->paddr,
693					 rx_tid->size, DMA_BIDIRECTIONAL);
694			kfree(rx_tid->vaddr);
695			rx_tid->vaddr = NULL;
696		}
697		kfree(cmd_cache);
698	}
699	spin_unlock_bh(&dp->reo_cmd_lock);
700}
701
702static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
703				   enum hal_reo_cmd_status status)
704{
705	struct dp_rx_tid *rx_tid = ctx;
706
707	if (status != HAL_REO_CMD_SUCCESS)
708		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
709			    rx_tid->tid, status);
710	if (rx_tid->vaddr) {
711		dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
712				 DMA_BIDIRECTIONAL);
713		kfree(rx_tid->vaddr);
714		rx_tid->vaddr = NULL;
715	}
716}
717
718static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
719				      struct dp_rx_tid *rx_tid)
720{
721	struct ath11k_hal_reo_cmd cmd = {0};
722	unsigned long tot_desc_sz, desc_sz;
723	int ret;
724
725	tot_desc_sz = rx_tid->size;
726	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
727
728	while (tot_desc_sz > desc_sz) {
729		tot_desc_sz -= desc_sz;
730		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
731		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
732		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
733						HAL_REO_CMD_FLUSH_CACHE, &cmd,
734						NULL);
735		if (ret)
736			ath11k_warn(ab,
737				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
738				    rx_tid->tid, ret);
739	}
740
741	memset(&cmd, 0, sizeof(cmd));
742	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
743	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
744	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
745	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
746					HAL_REO_CMD_FLUSH_CACHE,
747					&cmd, ath11k_dp_reo_cmd_free);
748	if (ret) {
749		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
750			   rx_tid->tid, ret);
751		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
752				 DMA_BIDIRECTIONAL);
753		kfree(rx_tid->vaddr);
754		rx_tid->vaddr = NULL;
755	}
756}
757
758static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
759				      enum hal_reo_cmd_status status)
760{
761	struct ath11k_base *ab = dp->ab;
762	struct dp_rx_tid *rx_tid = ctx;
763	struct dp_reo_cache_flush_elem *elem, *tmp;
764
765	if (status == HAL_REO_CMD_DRAIN) {
766		goto free_desc;
767	} else if (status != HAL_REO_CMD_SUCCESS) {
768		/* Shouldn't happen! Cleanup in case of other failure? */
769		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
770			    rx_tid->tid, status);
771		return;
772	}
773
774	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
775	if (!elem)
776		goto free_desc;
777
778	elem->ts = jiffies;
779	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
780
781	spin_lock_bh(&dp->reo_cmd_lock);
782	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
783	dp->reo_cmd_cache_flush_count++;
784
785	/* Flush and invalidate aged REO desc from HW cache */
786	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
787				 list) {
788		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
789		    time_after(jiffies, elem->ts +
790			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
791			list_del(&elem->list);
792			dp->reo_cmd_cache_flush_count--;
793			spin_unlock_bh(&dp->reo_cmd_lock);
794
795			ath11k_dp_reo_cache_flush(ab, &elem->data);
796			kfree(elem);
797			spin_lock_bh(&dp->reo_cmd_lock);
798		}
799	}
800	spin_unlock_bh(&dp->reo_cmd_lock);
801
802	return;
803free_desc:
804	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
805			 DMA_BIDIRECTIONAL);
806	kfree(rx_tid->vaddr);
807	rx_tid->vaddr = NULL;
808}
809
810void ath11k_peer_rx_tid_delete(struct ath11k *ar,
811			       struct ath11k_peer *peer, u8 tid)
812{
813	struct ath11k_hal_reo_cmd cmd = {0};
814	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
815	int ret;
816
817	if (!rx_tid->active)
818		return;
819
820	rx_tid->active = false;
821
822	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
823	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
824	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
825	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
826	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
827					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
828					ath11k_dp_rx_tid_del_func);
829	if (ret) {
830		if (ret != -ESHUTDOWN)
831			ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
832				   tid, ret);
833		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
834				 DMA_BIDIRECTIONAL);
835		kfree(rx_tid->vaddr);
836		rx_tid->vaddr = NULL;
837	}
838
839	rx_tid->paddr = 0;
840	rx_tid->size = 0;
841}
842
843static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
844					 u32 *link_desc,
845					 enum hal_wbm_rel_bm_act action)
846{
847	struct ath11k_dp *dp = &ab->dp;
848	struct hal_srng *srng;
849	u32 *desc;
850	int ret = 0;
851
852	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
853
854	spin_lock_bh(&srng->lock);
855
856	ath11k_hal_srng_access_begin(ab, srng);
857
858	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
859	if (!desc) {
860		ret = -ENOBUFS;
861		goto exit;
862	}
863
864	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
865					 action);
866
867exit:
868	ath11k_hal_srng_access_end(ab, srng);
869
870	spin_unlock_bh(&srng->lock);
871
872	return ret;
873}
874
875static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
876{
877	struct ath11k_base *ab = rx_tid->ab;
878
879	lockdep_assert_held(&ab->base_lock);
880
881	if (rx_tid->dst_ring_desc) {
882		if (rel_link_desc)
883			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
884						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
885		kfree(rx_tid->dst_ring_desc);
886		rx_tid->dst_ring_desc = NULL;
887	}
888
889	rx_tid->cur_sn = 0;
890	rx_tid->last_frag_no = 0;
891	rx_tid->rx_frag_bitmap = 0;
892	__skb_queue_purge(&rx_tid->rx_frags);
893}
894
895void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
896{
897	struct dp_rx_tid *rx_tid;
898	int i;
899
900	lockdep_assert_held(&ar->ab->base_lock);
901
902	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
903		rx_tid = &peer->rx_tid[i];
904
905		spin_unlock_bh(&ar->ab->base_lock);
906		del_timer_sync(&rx_tid->frag_timer);
907		spin_lock_bh(&ar->ab->base_lock);
908
909		ath11k_dp_rx_frags_cleanup(rx_tid, true);
910	}
911}
912
913void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
914{
915	struct dp_rx_tid *rx_tid;
916	int i;
917
918	lockdep_assert_held(&ar->ab->base_lock);
919
920	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
921		rx_tid = &peer->rx_tid[i];
922
923		ath11k_peer_rx_tid_delete(ar, peer, i);
924		ath11k_dp_rx_frags_cleanup(rx_tid, true);
925
926		spin_unlock_bh(&ar->ab->base_lock);
927		del_timer_sync(&rx_tid->frag_timer);
928		spin_lock_bh(&ar->ab->base_lock);
929	}
930}
931
932static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
933					 struct ath11k_peer *peer,
934					 struct dp_rx_tid *rx_tid,
935					 u32 ba_win_sz, u16 ssn,
936					 bool update_ssn)
937{
938	struct ath11k_hal_reo_cmd cmd = {0};
939	int ret;
940
941	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
942	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
943	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
944	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
945	cmd.ba_window_size = ba_win_sz;
946
947	if (update_ssn) {
948		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
949		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
950	}
951
952	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
953					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
954					NULL);
955	if (ret) {
956		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
957			    rx_tid->tid, ret);
958		return ret;
959	}
960
961	rx_tid->ba_win_sz = ba_win_sz;
962
963	return 0;
964}
965
966static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
967				      const u8 *peer_mac, int vdev_id, u8 tid)
968{
969	struct ath11k_peer *peer;
970	struct dp_rx_tid *rx_tid;
971
972	spin_lock_bh(&ab->base_lock);
973
974	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
975	if (!peer) {
976		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
977		goto unlock_exit;
978	}
979
980	rx_tid = &peer->rx_tid[tid];
981	if (!rx_tid->active)
982		goto unlock_exit;
983
984	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
985			 DMA_BIDIRECTIONAL);
986	kfree(rx_tid->vaddr);
987	rx_tid->vaddr = NULL;
988
989	rx_tid->active = false;
990
991unlock_exit:
992	spin_unlock_bh(&ab->base_lock);
993}
994
995int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
996			     u8 tid, u32 ba_win_sz, u16 ssn,
997			     enum hal_pn_type pn_type)
998{
999	struct ath11k_base *ab = ar->ab;
1000	struct ath11k_peer *peer;
1001	struct dp_rx_tid *rx_tid;
1002	u32 hw_desc_sz;
1003	u32 *addr_aligned;
1004	void *vaddr;
1005	dma_addr_t paddr;
1006	int ret;
1007
1008	spin_lock_bh(&ab->base_lock);
1009
1010	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1011	if (!peer) {
1012		ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1013			    peer_mac);
1014		spin_unlock_bh(&ab->base_lock);
1015		return -ENOENT;
1016	}
1017
1018	rx_tid = &peer->rx_tid[tid];
1019	/* Update the tid queue if it is already setup */
1020	if (rx_tid->active) {
1021		paddr = rx_tid->paddr;
1022		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1023						    ba_win_sz, ssn, true);
1024		spin_unlock_bh(&ab->base_lock);
1025		if (ret) {
1026			ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1027				    peer_mac, tid, ret);
1028			return ret;
1029		}
1030
1031		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1032							     peer_mac, paddr,
1033							     tid, 1, ba_win_sz);
1034		if (ret)
1035			ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1036				    peer_mac, tid, ret);
1037		return ret;
1038	}
1039
1040	rx_tid->tid = tid;
1041
1042	rx_tid->ba_win_sz = ba_win_sz;
1043
1044	/* TODO: Optimize the memory allocation for qos tid based on
1045	 * the actual BA window size in REO tid update path.
1046	 */
1047	if (tid == HAL_DESC_REO_NON_QOS_TID)
1048		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1049	else
1050		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1051
1052	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1053	if (!vaddr) {
1054		spin_unlock_bh(&ab->base_lock);
1055		return -ENOMEM;
1056	}
1057
1058	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1059
1060	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1061				   ssn, pn_type);
1062
1063	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1064			       DMA_BIDIRECTIONAL);
1065
1066	ret = dma_mapping_error(ab->dev, paddr);
1067	if (ret) {
1068		spin_unlock_bh(&ab->base_lock);
1069		ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1070			    peer_mac, tid, ret);
1071		goto err_mem_free;
1072	}
1073
1074	rx_tid->vaddr = vaddr;
1075	rx_tid->paddr = paddr;
1076	rx_tid->size = hw_desc_sz;
1077	rx_tid->active = true;
1078
1079	spin_unlock_bh(&ab->base_lock);
1080
1081	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1082						     paddr, tid, 1, ba_win_sz);
1083	if (ret) {
1084		ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1085			    peer_mac, tid, ret);
1086		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1087	}
1088
1089	return ret;
1090
1091err_mem_free:
1092	kfree(rx_tid->vaddr);
1093	rx_tid->vaddr = NULL;
1094
1095	return ret;
1096}
1097
1098int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1099			     struct ieee80211_ampdu_params *params)
1100{
1101	struct ath11k_base *ab = ar->ab;
1102	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1103	int vdev_id = arsta->arvif->vdev_id;
1104	int ret;
1105
1106	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1107				       params->tid, params->buf_size,
1108				       params->ssn, arsta->pn_type);
1109	if (ret)
1110		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1111
1112	return ret;
1113}
1114
1115int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1116			    struct ieee80211_ampdu_params *params)
1117{
1118	struct ath11k_base *ab = ar->ab;
1119	struct ath11k_peer *peer;
1120	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1121	int vdev_id = arsta->arvif->vdev_id;
1122	dma_addr_t paddr;
1123	bool active;
1124	int ret;
1125
1126	spin_lock_bh(&ab->base_lock);
1127
1128	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1129	if (!peer) {
1130		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1131		spin_unlock_bh(&ab->base_lock);
1132		return -ENOENT;
1133	}
1134
1135	paddr = peer->rx_tid[params->tid].paddr;
1136	active = peer->rx_tid[params->tid].active;
1137
1138	if (!active) {
1139		spin_unlock_bh(&ab->base_lock);
1140		return 0;
1141	}
1142
1143	ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1144	spin_unlock_bh(&ab->base_lock);
1145	if (ret) {
1146		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1147			    params->tid, ret);
1148		return ret;
1149	}
1150
1151	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1152						     params->sta->addr, paddr,
1153						     params->tid, 1, 1);
1154	if (ret)
1155		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1156			    ret);
1157
1158	return ret;
1159}
1160
1161int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1162				       const u8 *peer_addr,
1163				       enum set_key_cmd key_cmd,
1164				       struct ieee80211_key_conf *key)
1165{
1166	struct ath11k *ar = arvif->ar;
1167	struct ath11k_base *ab = ar->ab;
1168	struct ath11k_hal_reo_cmd cmd = {0};
1169	struct ath11k_peer *peer;
1170	struct dp_rx_tid *rx_tid;
1171	u8 tid;
1172	int ret = 0;
1173
1174	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1175	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1176	 * for now.
1177	 */
1178	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1179		return 0;
1180
1181	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1182	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1183		    HAL_REO_CMD_UPD0_PN_SIZE |
1184		    HAL_REO_CMD_UPD0_PN_VALID |
1185		    HAL_REO_CMD_UPD0_PN_CHECK |
1186		    HAL_REO_CMD_UPD0_SVLD;
1187
1188	switch (key->cipher) {
1189	case WLAN_CIPHER_SUITE_TKIP:
1190	case WLAN_CIPHER_SUITE_CCMP:
1191	case WLAN_CIPHER_SUITE_CCMP_256:
1192	case WLAN_CIPHER_SUITE_GCMP:
1193	case WLAN_CIPHER_SUITE_GCMP_256:
1194		if (key_cmd == SET_KEY) {
1195			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1196			cmd.pn_size = 48;
1197		}
1198		break;
1199	default:
1200		break;
1201	}
1202
1203	spin_lock_bh(&ab->base_lock);
1204
1205	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1206	if (!peer) {
1207		ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1208		spin_unlock_bh(&ab->base_lock);
1209		return -ENOENT;
1210	}
1211
1212	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1213		rx_tid = &peer->rx_tid[tid];
1214		if (!rx_tid->active)
1215			continue;
1216		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1217		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1218		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1219						HAL_REO_CMD_UPDATE_RX_QUEUE,
1220						&cmd, NULL);
1221		if (ret) {
1222			ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1223				    tid, ret);
1224			break;
1225		}
1226	}
1227
1228	spin_unlock_bh(&ab->base_lock);
1229
1230	return ret;
1231}
1232
1233static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1234					     u16 peer_id)
1235{
1236	int i;
1237
1238	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1239		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1240			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1241				return i;
1242		} else {
1243			return i;
1244		}
1245	}
1246
1247	return -EINVAL;
1248}
1249
1250static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1251					   u16 tag, u16 len, const void *ptr,
1252					   void *data)
1253{
1254	struct htt_ppdu_stats_info *ppdu_info;
1255	struct htt_ppdu_user_stats *user_stats;
1256	int cur_user;
1257	u16 peer_id;
1258
1259	ppdu_info = (struct htt_ppdu_stats_info *)data;
1260
1261	switch (tag) {
1262	case HTT_PPDU_STATS_TAG_COMMON:
1263		if (len < sizeof(struct htt_ppdu_stats_common)) {
1264			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1265				    len, tag);
1266			return -EINVAL;
1267		}
1268		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1269		       sizeof(struct htt_ppdu_stats_common));
1270		break;
1271	case HTT_PPDU_STATS_TAG_USR_RATE:
1272		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1273			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1274				    len, tag);
1275			return -EINVAL;
1276		}
1277
1278#if defined(__linux__)
1279		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1280#elif defined(__FreeBSD__)
1281		peer_id = ((const struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1282#endif
1283		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1284						      peer_id);
1285		if (cur_user < 0)
1286			return -EINVAL;
1287		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1288		user_stats->peer_id = peer_id;
1289		user_stats->is_valid_peer_id = true;
1290		memcpy((void *)&user_stats->rate, ptr,
1291		       sizeof(struct htt_ppdu_stats_user_rate));
1292		user_stats->tlv_flags |= BIT(tag);
1293		break;
1294	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1295		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1296			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1297				    len, tag);
1298			return -EINVAL;
1299		}
1300
1301#if defined(__linux__)
1302		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1303#elif defined(__FreeBSD__)
1304		peer_id = ((const struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1305#endif
1306		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1307						      peer_id);
1308		if (cur_user < 0)
1309			return -EINVAL;
1310		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1311		user_stats->peer_id = peer_id;
1312		user_stats->is_valid_peer_id = true;
1313		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1314		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1315		user_stats->tlv_flags |= BIT(tag);
1316		break;
1317	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1318		if (len <
1319		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1320			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1321				    len, tag);
1322			return -EINVAL;
1323		}
1324
1325		peer_id =
1326#if defined(__linux__)
1327		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1328#elif defined(__FreeBSD__)
1329		((const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1330#endif
1331		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1332						      peer_id);
1333		if (cur_user < 0)
1334			return -EINVAL;
1335		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1336		user_stats->peer_id = peer_id;
1337		user_stats->is_valid_peer_id = true;
1338		memcpy((void *)&user_stats->ack_ba, ptr,
1339		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1340		user_stats->tlv_flags |= BIT(tag);
1341		break;
1342	}
1343	return 0;
1344}
1345
1346#if defined(__linux__)
1347int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1348#elif defined(__FreeBSD__)
1349int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const u8 *ptr, size_t len,
1350#endif
1351			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1352				       const void *ptr, void *data),
1353			   void *data)
1354{
1355	const struct htt_tlv *tlv;
1356#if defined(__linux__)
1357	const void *begin = ptr;
1358#elif defined(__FreeBSD__)
1359	const u8 *begin = ptr;
1360#endif
1361	u16 tlv_tag, tlv_len;
1362	int ret = -EINVAL;
1363
1364	while (len > 0) {
1365		if (len < sizeof(*tlv)) {
1366			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1367				   ptr - begin, len, sizeof(*tlv));
1368			return -EINVAL;
1369		}
1370#if defined(__linux__)
1371		tlv = (struct htt_tlv *)ptr;
1372#elif defined(__FreeBSD__)
1373		tlv = (const struct htt_tlv *)(const void *)ptr;
1374#endif
1375		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1376		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1377		ptr += sizeof(*tlv);
1378		len -= sizeof(*tlv);
1379
1380		if (tlv_len > len) {
1381			ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1382				   tlv_tag, ptr - begin, len, tlv_len);
1383			return -EINVAL;
1384		}
1385		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1386		if (ret == -ENOMEM)
1387			return ret;
1388
1389		ptr += tlv_len;
1390		len -= tlv_len;
1391	}
1392	return 0;
1393}
1394
1395static void
1396ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1397				struct htt_ppdu_stats *ppdu_stats, u8 user)
1398{
1399	struct ath11k_base *ab = ar->ab;
1400	struct ath11k_peer *peer;
1401	struct ieee80211_sta *sta;
1402	struct ath11k_sta *arsta;
1403	struct htt_ppdu_stats_user_rate *user_rate;
1404	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1405	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1406	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1407	int ret;
1408	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1409	u32 succ_bytes = 0;
1410	u16 rate = 0, succ_pkts = 0;
1411	u32 tx_duration = 0;
1412	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1413	bool is_ampdu = false;
1414
1415	if (!usr_stats)
1416		return;
1417
1418	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1419		return;
1420
1421	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1422		is_ampdu =
1423			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1424
1425	if (usr_stats->tlv_flags &
1426	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1427		succ_bytes = usr_stats->ack_ba.success_bytes;
1428		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1429				      usr_stats->ack_ba.info);
1430		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1431				usr_stats->ack_ba.info);
1432	}
1433
1434	if (common->fes_duration_us)
1435		tx_duration = common->fes_duration_us;
1436
1437	user_rate = &usr_stats->rate;
1438	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1439	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1440	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1441	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1442	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1443	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1444
1445	/* Note: If host configured fixed rates and in some other special
1446	 * cases, the broadcast/management frames are sent in different rates.
1447	 * Firmware rate's control to be skipped for this?
1448	 */
1449
1450	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1451		ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1452		return;
1453	}
1454
1455	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1456		ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1457		return;
1458	}
1459
1460	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1461		ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1462			    mcs, nss);
1463		return;
1464	}
1465
1466	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1467		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1468							    flags,
1469							    &rate_idx,
1470							    &rate);
1471		if (ret < 0)
1472			return;
1473	}
1474
1475	rcu_read_lock();
1476	spin_lock_bh(&ab->base_lock);
1477	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1478
1479	if (!peer || !peer->sta) {
1480		spin_unlock_bh(&ab->base_lock);
1481		rcu_read_unlock();
1482		return;
1483	}
1484
1485	sta = peer->sta;
1486	arsta = (struct ath11k_sta *)sta->drv_priv;
1487
1488	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1489
1490	switch (flags) {
1491	case WMI_RATE_PREAMBLE_OFDM:
1492		arsta->txrate.legacy = rate;
1493		break;
1494	case WMI_RATE_PREAMBLE_CCK:
1495		arsta->txrate.legacy = rate;
1496		break;
1497	case WMI_RATE_PREAMBLE_HT:
1498		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1499		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1500		if (sgi)
1501			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1502		break;
1503	case WMI_RATE_PREAMBLE_VHT:
1504		arsta->txrate.mcs = mcs;
1505		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1506		if (sgi)
1507			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1508		break;
1509	case WMI_RATE_PREAMBLE_HE:
1510		arsta->txrate.mcs = mcs;
1511		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1512		arsta->txrate.he_dcm = dcm;
1513		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1514		arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1515						((user_rate->ru_end -
1516						 user_rate->ru_start) + 1);
1517		break;
1518	}
1519
1520	arsta->txrate.nss = nss;
1521
1522	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1523	arsta->tx_duration += tx_duration;
1524	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1525
1526	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1527	 * So skip peer stats update for mgmt packets.
1528	 */
1529	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1530		memset(peer_stats, 0, sizeof(*peer_stats));
1531		peer_stats->succ_pkts = succ_pkts;
1532		peer_stats->succ_bytes = succ_bytes;
1533		peer_stats->is_ampdu = is_ampdu;
1534		peer_stats->duration = tx_duration;
1535		peer_stats->ba_fails =
1536			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1537			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1538
1539		if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1540			ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1541	}
1542
1543	spin_unlock_bh(&ab->base_lock);
1544	rcu_read_unlock();
1545}
1546
1547static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1548					 struct htt_ppdu_stats *ppdu_stats)
1549{
1550	u8 user;
1551
1552	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1553		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1554}
1555
1556static
1557struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1558							u32 ppdu_id)
1559{
1560	struct htt_ppdu_stats_info *ppdu_info;
1561
1562	lockdep_assert_held(&ar->data_lock);
1563
1564	if (!list_empty(&ar->ppdu_stats_info)) {
1565		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1566			if (ppdu_info->ppdu_id == ppdu_id)
1567				return ppdu_info;
1568		}
1569
1570		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1571			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1572						     typeof(*ppdu_info), list);
1573			list_del(&ppdu_info->list);
1574			ar->ppdu_stat_list_depth--;
1575			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1576			kfree(ppdu_info);
1577		}
1578	}
1579
1580	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1581	if (!ppdu_info)
1582		return NULL;
1583
1584	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1585	ar->ppdu_stat_list_depth++;
1586
1587	return ppdu_info;
1588}
1589
1590static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1591				      struct sk_buff *skb)
1592{
1593	struct ath11k_htt_ppdu_stats_msg *msg;
1594	struct htt_ppdu_stats_info *ppdu_info;
1595	struct ath11k *ar;
1596	int ret;
1597	u8 pdev_id;
1598	u32 ppdu_id, len;
1599
1600	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1601	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1602	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1603	ppdu_id = msg->ppdu_id;
1604
1605	rcu_read_lock();
1606	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1607	if (!ar) {
1608		ret = -EINVAL;
1609		goto out;
1610	}
1611
1612	if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1613		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1614
1615	spin_lock_bh(&ar->data_lock);
1616	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1617	if (!ppdu_info) {
1618		ret = -EINVAL;
1619		goto out_unlock_data;
1620	}
1621
1622	ppdu_info->ppdu_id = ppdu_id;
1623	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1624				     ath11k_htt_tlv_ppdu_stats_parse,
1625				     (void *)ppdu_info);
1626	if (ret) {
1627		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1628		goto out_unlock_data;
1629	}
1630
1631out_unlock_data:
1632	spin_unlock_bh(&ar->data_lock);
1633
1634out:
1635	rcu_read_unlock();
1636
1637	return ret;
1638}
1639
1640static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1641{
1642	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1643	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1644	struct ath11k *ar;
1645	u8 pdev_id;
1646
1647	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1648	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1649	if (!ar) {
1650		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1651		return;
1652	}
1653
1654	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1655				ar->ab->pktlog_defs_checksum);
1656}
1657
1658static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1659						  struct sk_buff *skb)
1660{
1661	u32 *data = (u32 *)skb->data;
1662	u8 pdev_id, ring_type, ring_id, pdev_idx;
1663	u16 hp, tp;
1664	u32 backpressure_time;
1665	struct ath11k_bp_stats *bp_stats;
1666
1667	pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1668	ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1669	ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1670	++data;
1671
1672	hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1673	tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1674	++data;
1675
1676	backpressure_time = *data;
1677
1678	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1679		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1680
1681	if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1682		if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1683			return;
1684
1685		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1686	} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1687		pdev_idx = DP_HW2SW_MACID(pdev_id);
1688
1689		if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1690			return;
1691
1692		bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1693	} else {
1694		ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1695			    ring_type);
1696		return;
1697	}
1698
1699	spin_lock_bh(&ab->base_lock);
1700	bp_stats->hp = hp;
1701	bp_stats->tp = tp;
1702	bp_stats->count++;
1703	bp_stats->jiffies = jiffies;
1704	spin_unlock_bh(&ab->base_lock);
1705}
1706
1707void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1708				       struct sk_buff *skb)
1709{
1710	struct ath11k_dp *dp = &ab->dp;
1711	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1712	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1713	u16 peer_id;
1714	u8 vdev_id;
1715	u8 mac_addr[ETH_ALEN];
1716	u16 peer_mac_h16;
1717	u16 ast_hash;
1718	u16 hw_peer_id;
1719
1720	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1721
1722	switch (type) {
1723	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1724		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1725						  resp->version_msg.version);
1726		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1727						  resp->version_msg.version);
1728		complete(&dp->htt_tgt_version_received);
1729		break;
1730	case HTT_T2H_MSG_TYPE_PEER_MAP:
1731		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1732				    resp->peer_map_ev.info);
1733		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1734				    resp->peer_map_ev.info);
1735		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1736					 resp->peer_map_ev.info1);
1737		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1738				       peer_mac_h16, mac_addr);
1739		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1740		break;
1741	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1742		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1743				    resp->peer_map_ev.info);
1744		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1745				    resp->peer_map_ev.info);
1746		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1747					 resp->peer_map_ev.info1);
1748		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1749				       peer_mac_h16, mac_addr);
1750		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1751				     resp->peer_map_ev.info2);
1752		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1753				       resp->peer_map_ev.info1);
1754		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1755				      hw_peer_id);
1756		break;
1757	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1758	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1759		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1760				    resp->peer_unmap_ev.info);
1761		ath11k_peer_unmap_event(ab, peer_id);
1762		break;
1763	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1764		ath11k_htt_pull_ppdu_stats(ab, skb);
1765		break;
1766	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1767		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1768		break;
1769	case HTT_T2H_MSG_TYPE_PKTLOG:
1770		ath11k_htt_pktlog(ab, skb);
1771		break;
1772	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1773		ath11k_htt_backpressure_event_handler(ab, skb);
1774		break;
1775	default:
1776		ath11k_warn(ab, "htt event %d not handled\n", type);
1777		break;
1778	}
1779
1780	dev_kfree_skb_any(skb);
1781}
1782
1783static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1784				      struct sk_buff_head *msdu_list,
1785				      struct sk_buff *first, struct sk_buff *last,
1786				      u8 l3pad_bytes, int msdu_len)
1787{
1788	struct ath11k_base *ab = ar->ab;
1789	struct sk_buff *skb;
1790	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1791	int buf_first_hdr_len, buf_first_len;
1792	struct hal_rx_desc *ldesc;
1793	int space_extra, rem_len, buf_len;
1794	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1795
1796	/* As the msdu is spread across multiple rx buffers,
1797	 * find the offset to the start of msdu for computing
1798	 * the length of the msdu in the first buffer.
1799	 */
1800	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1801	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1802
1803	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1804		skb_put(first, buf_first_hdr_len + msdu_len);
1805		skb_pull(first, buf_first_hdr_len);
1806		return 0;
1807	}
1808
1809	ldesc = (struct hal_rx_desc *)last->data;
1810	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1811	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1812
1813	/* MSDU spans over multiple buffers because the length of the MSDU
1814	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1815	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1816	 */
1817	skb_put(first, DP_RX_BUFFER_SIZE);
1818	skb_pull(first, buf_first_hdr_len);
1819
1820	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1821	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1822	 */
1823	ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1824
1825	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1826	if (space_extra > 0 &&
1827	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1828		/* Free up all buffers of the MSDU */
1829		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1830			rxcb = ATH11K_SKB_RXCB(skb);
1831			if (!rxcb->is_continuation) {
1832				dev_kfree_skb_any(skb);
1833				break;
1834			}
1835			dev_kfree_skb_any(skb);
1836		}
1837		return -ENOMEM;
1838	}
1839
1840	rem_len = msdu_len - buf_first_len;
1841	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1842		rxcb = ATH11K_SKB_RXCB(skb);
1843		if (rxcb->is_continuation)
1844			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1845		else
1846			buf_len = rem_len;
1847
1848		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1849			WARN_ON_ONCE(1);
1850			dev_kfree_skb_any(skb);
1851			return -EINVAL;
1852		}
1853
1854		skb_put(skb, buf_len + hal_rx_desc_sz);
1855		skb_pull(skb, hal_rx_desc_sz);
1856		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1857					  buf_len);
1858		dev_kfree_skb_any(skb);
1859
1860		rem_len -= buf_len;
1861		if (!rxcb->is_continuation)
1862			break;
1863	}
1864
1865	return 0;
1866}
1867
1868static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1869						      struct sk_buff *first)
1870{
1871	struct sk_buff *skb;
1872	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1873
1874	if (!rxcb->is_continuation)
1875		return first;
1876
1877	skb_queue_walk(msdu_list, skb) {
1878		rxcb = ATH11K_SKB_RXCB(skb);
1879		if (!rxcb->is_continuation)
1880			return skb;
1881	}
1882
1883	return NULL;
1884}
1885
1886static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1887{
1888	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1889	struct rx_attention *rx_attention;
1890	bool ip_csum_fail, l4_csum_fail;
1891
1892	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1893	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1894	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1895
1896	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1897			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1898}
1899
1900static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1901				       enum hal_encrypt_type enctype)
1902{
1903	switch (enctype) {
1904	case HAL_ENCRYPT_TYPE_OPEN:
1905	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1906	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1907		return 0;
1908	case HAL_ENCRYPT_TYPE_CCMP_128:
1909		return IEEE80211_CCMP_MIC_LEN;
1910	case HAL_ENCRYPT_TYPE_CCMP_256:
1911		return IEEE80211_CCMP_256_MIC_LEN;
1912	case HAL_ENCRYPT_TYPE_GCMP_128:
1913	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1914		return IEEE80211_GCMP_MIC_LEN;
1915	case HAL_ENCRYPT_TYPE_WEP_40:
1916	case HAL_ENCRYPT_TYPE_WEP_104:
1917	case HAL_ENCRYPT_TYPE_WEP_128:
1918	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1919	case HAL_ENCRYPT_TYPE_WAPI:
1920		break;
1921	}
1922
1923	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1924	return 0;
1925}
1926
1927static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1928					 enum hal_encrypt_type enctype)
1929{
1930	switch (enctype) {
1931	case HAL_ENCRYPT_TYPE_OPEN:
1932		return 0;
1933	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1934	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1935		return IEEE80211_TKIP_IV_LEN;
1936	case HAL_ENCRYPT_TYPE_CCMP_128:
1937		return IEEE80211_CCMP_HDR_LEN;
1938	case HAL_ENCRYPT_TYPE_CCMP_256:
1939		return IEEE80211_CCMP_256_HDR_LEN;
1940	case HAL_ENCRYPT_TYPE_GCMP_128:
1941	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1942		return IEEE80211_GCMP_HDR_LEN;
1943	case HAL_ENCRYPT_TYPE_WEP_40:
1944	case HAL_ENCRYPT_TYPE_WEP_104:
1945	case HAL_ENCRYPT_TYPE_WEP_128:
1946	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1947	case HAL_ENCRYPT_TYPE_WAPI:
1948		break;
1949	}
1950
1951	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1952	return 0;
1953}
1954
1955static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1956				       enum hal_encrypt_type enctype)
1957{
1958	switch (enctype) {
1959	case HAL_ENCRYPT_TYPE_OPEN:
1960	case HAL_ENCRYPT_TYPE_CCMP_128:
1961	case HAL_ENCRYPT_TYPE_CCMP_256:
1962	case HAL_ENCRYPT_TYPE_GCMP_128:
1963	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1964		return 0;
1965	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1966	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1967		return IEEE80211_TKIP_ICV_LEN;
1968	case HAL_ENCRYPT_TYPE_WEP_40:
1969	case HAL_ENCRYPT_TYPE_WEP_104:
1970	case HAL_ENCRYPT_TYPE_WEP_128:
1971	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1972	case HAL_ENCRYPT_TYPE_WAPI:
1973		break;
1974	}
1975
1976	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1977	return 0;
1978}
1979
1980static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1981					 struct sk_buff *msdu,
1982					 u8 *first_hdr,
1983					 enum hal_encrypt_type enctype,
1984					 struct ieee80211_rx_status *status)
1985{
1986	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1987	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1988	struct ieee80211_hdr *hdr;
1989	size_t hdr_len;
1990	u8 da[ETH_ALEN];
1991	u8 sa[ETH_ALEN];
1992	u16 qos_ctl = 0;
1993	u8 *qos;
1994
1995	/* copy SA & DA and pull decapped header */
1996	hdr = (struct ieee80211_hdr *)msdu->data;
1997	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1998	ether_addr_copy(da, ieee80211_get_DA(hdr));
1999	ether_addr_copy(sa, ieee80211_get_SA(hdr));
2000	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
2001
2002	if (rxcb->is_first_msdu) {
2003		/* original 802.11 header is valid for the first msdu
2004		 * hence we can reuse the same header
2005		 */
2006		hdr = (struct ieee80211_hdr *)first_hdr;
2007		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2008
2009		/* Each A-MSDU subframe will be reported as a separate MSDU,
2010		 * so strip the A-MSDU bit from QoS Ctl.
2011		 */
2012		if (ieee80211_is_data_qos(hdr->frame_control)) {
2013			qos = ieee80211_get_qos_ctl(hdr);
2014			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2015		}
2016	} else {
2017		/*  Rebuild qos header if this is a middle/last msdu */
2018		hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2019
2020		/* Reset the order bit as the HT_Control header is stripped */
2021		hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2022
2023		qos_ctl = rxcb->tid;
2024
2025		if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2026			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2027
2028		/* TODO Add other QoS ctl fields when required */
2029
2030		/* copy decap header before overwriting for reuse below */
2031		memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2032	}
2033
2034	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2035		memcpy(skb_push(msdu,
2036				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2037#if defined(__linux__)
2038		       (void *)hdr + hdr_len,
2039#elif defined(__FreeBSD__)
2040		       (u8 *)hdr + hdr_len,
2041#endif
2042		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2043	}
2044
2045	if (!rxcb->is_first_msdu) {
2046		memcpy(skb_push(msdu,
2047				IEEE80211_QOS_CTL_LEN), &qos_ctl,
2048				IEEE80211_QOS_CTL_LEN);
2049		memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2050		return;
2051	}
2052
2053	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2054
2055	/* original 802.11 header has a different DA and in
2056	 * case of 4addr it may also have different SA
2057	 */
2058	hdr = (struct ieee80211_hdr *)msdu->data;
2059	ether_addr_copy(ieee80211_get_DA(hdr), da);
2060	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2061}
2062
2063static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2064				       enum hal_encrypt_type enctype,
2065				       struct ieee80211_rx_status *status,
2066				       bool decrypted)
2067{
2068	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2069	struct ieee80211_hdr *hdr;
2070	size_t hdr_len;
2071	size_t crypto_len;
2072
2073	if (!rxcb->is_first_msdu ||
2074	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2075		WARN_ON_ONCE(1);
2076		return;
2077	}
2078
2079	skb_trim(msdu, msdu->len - FCS_LEN);
2080
2081	if (!decrypted)
2082		return;
2083
2084	hdr = (void *)msdu->data;
2085
2086	/* Tail */
2087	if (status->flag & RX_FLAG_IV_STRIPPED) {
2088		skb_trim(msdu, msdu->len -
2089			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2090
2091		skb_trim(msdu, msdu->len -
2092			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2093	} else {
2094		/* MIC */
2095		if (status->flag & RX_FLAG_MIC_STRIPPED)
2096			skb_trim(msdu, msdu->len -
2097				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2098
2099		/* ICV */
2100		if (status->flag & RX_FLAG_ICV_STRIPPED)
2101			skb_trim(msdu, msdu->len -
2102				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2103	}
2104
2105	/* MMIC */
2106	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2107	    !ieee80211_has_morefrags(hdr->frame_control) &&
2108	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2109		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2110
2111	/* Head */
2112	if (status->flag & RX_FLAG_IV_STRIPPED) {
2113		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2114		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2115
2116#if defined(__linux__)
2117		memmove((void *)msdu->data + crypto_len,
2118			(void *)msdu->data, hdr_len);
2119#elif defined(__FreeBSD__)
2120		memmove((u8 *)msdu->data + crypto_len,
2121			(u8 *)msdu->data, hdr_len);
2122#endif
2123		skb_pull(msdu, crypto_len);
2124	}
2125}
2126
2127static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2128					 struct sk_buff *msdu,
2129					 enum hal_encrypt_type enctype)
2130{
2131	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2132	struct ieee80211_hdr *hdr;
2133	size_t hdr_len, crypto_len;
2134#if defined(__linux__)
2135	void *rfc1042;
2136#elif defined(__FreeBSD__)
2137	u8 *rfc1042;
2138#endif
2139	bool is_amsdu;
2140
2141	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2142	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2143#if defined(__linux__)
2144	rfc1042 = hdr;
2145#elif defined(__FreeBSD__)
2146	rfc1042 = (void *)hdr;
2147#endif
2148
2149	if (rxcb->is_first_msdu) {
2150		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2151		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2152
2153		rfc1042 += hdr_len + crypto_len;
2154	}
2155
2156	if (is_amsdu)
2157		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2158
2159	return rfc1042;
2160}
2161
2162static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2163				       struct sk_buff *msdu,
2164				       u8 *first_hdr,
2165				       enum hal_encrypt_type enctype,
2166				       struct ieee80211_rx_status *status)
2167{
2168	struct ieee80211_hdr *hdr;
2169	struct ethhdr *eth;
2170	size_t hdr_len;
2171	u8 da[ETH_ALEN];
2172	u8 sa[ETH_ALEN];
2173	void *rfc1042;
2174
2175	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2176	if (WARN_ON_ONCE(!rfc1042))
2177		return;
2178
2179	/* pull decapped header and copy SA & DA */
2180	eth = (struct ethhdr *)msdu->data;
2181	ether_addr_copy(da, eth->h_dest);
2182	ether_addr_copy(sa, eth->h_source);
2183	skb_pull(msdu, sizeof(struct ethhdr));
2184
2185	/* push rfc1042/llc/snap */
2186	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2187	       sizeof(struct ath11k_dp_rfc1042_hdr));
2188
2189	/* push original 802.11 header */
2190	hdr = (struct ieee80211_hdr *)first_hdr;
2191	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2192
2193	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2194		memcpy(skb_push(msdu,
2195				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2196#if defined(__linux__)
2197		       (void *)hdr + hdr_len,
2198#elif defined(__FreeBSD__)
2199		       (u8 *)hdr + hdr_len,
2200#endif
2201		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2202	}
2203
2204	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2205
2206	/* original 802.11 header has a different DA and in
2207	 * case of 4addr it may also have different SA
2208	 */
2209	hdr = (struct ieee80211_hdr *)msdu->data;
2210	ether_addr_copy(ieee80211_get_DA(hdr), da);
2211	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2212}
2213
2214static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2215				   struct hal_rx_desc *rx_desc,
2216				   enum hal_encrypt_type enctype,
2217				   struct ieee80211_rx_status *status,
2218				   bool decrypted)
2219{
2220	u8 *first_hdr;
2221	u8 decap;
2222	struct ethhdr *ehdr;
2223
2224	first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2225	decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2226
2227	switch (decap) {
2228	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2229		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2230					     enctype, status);
2231		break;
2232	case DP_RX_DECAP_TYPE_RAW:
2233		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2234					   decrypted);
2235		break;
2236	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2237		ehdr = (struct ethhdr *)msdu->data;
2238
2239		/* mac80211 allows fast path only for authorized STA */
2240		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2241			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2242			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2243						   enctype, status);
2244			break;
2245		}
2246
2247		/* PN for mcast packets will be validated in mac80211;
2248		 * remove eth header and add 802.11 header.
2249		 */
2250		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2251			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2252						   enctype, status);
2253		break;
2254	case DP_RX_DECAP_TYPE_8023:
2255		/* TODO: Handle undecap for these formats */
2256		break;
2257	}
2258}
2259
2260static struct ath11k_peer *
2261ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2262{
2263	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2264	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2265	struct ath11k_peer *peer = NULL;
2266
2267	lockdep_assert_held(&ab->base_lock);
2268
2269	if (rxcb->peer_id)
2270		peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2271
2272	if (peer)
2273		return peer;
2274
2275	if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2276		return NULL;
2277
2278	peer = ath11k_peer_find_by_addr(ab,
2279					ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2280	return peer;
2281}
2282
2283static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2284				struct sk_buff *msdu,
2285				struct hal_rx_desc *rx_desc,
2286				struct ieee80211_rx_status *rx_status)
2287{
2288	bool  fill_crypto_hdr;
2289	enum hal_encrypt_type enctype;
2290	bool is_decrypted = false;
2291	struct ath11k_skb_rxcb *rxcb;
2292	struct ieee80211_hdr *hdr;
2293	struct ath11k_peer *peer;
2294	struct rx_attention *rx_attention;
2295	u32 err_bitmap;
2296
2297	/* PN for multicast packets will be checked in mac80211 */
2298	rxcb = ATH11K_SKB_RXCB(msdu);
2299	fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2300	rxcb->is_mcbc = fill_crypto_hdr;
2301
2302	if (rxcb->is_mcbc) {
2303		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2304		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2305	}
2306
2307	spin_lock_bh(&ar->ab->base_lock);
2308	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2309	if (peer) {
2310		if (rxcb->is_mcbc)
2311			enctype = peer->sec_type_grp;
2312		else
2313			enctype = peer->sec_type;
2314	} else {
2315		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2316	}
2317	spin_unlock_bh(&ar->ab->base_lock);
2318
2319	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2320	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2321	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2322		is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2323
2324	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2325	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2326			     RX_FLAG_MMIC_ERROR |
2327			     RX_FLAG_DECRYPTED |
2328			     RX_FLAG_IV_STRIPPED |
2329			     RX_FLAG_MMIC_STRIPPED);
2330
2331	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2332		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2333	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2334		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2335
2336	if (is_decrypted) {
2337		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2338
2339		if (fill_crypto_hdr)
2340			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2341					RX_FLAG_ICV_STRIPPED;
2342		else
2343			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2344					   RX_FLAG_PN_VALIDATED;
2345	}
2346
2347	ath11k_dp_rx_h_csum_offload(ar, msdu);
2348	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2349			       enctype, rx_status, is_decrypted);
2350
2351	if (!is_decrypted || fill_crypto_hdr)
2352		return;
2353
2354	if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2355	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2356		hdr = (void *)msdu->data;
2357		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2358	}
2359}
2360
2361static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2362				struct ieee80211_rx_status *rx_status)
2363{
2364	struct ieee80211_supported_band *sband;
2365	enum rx_msdu_start_pkt_type pkt_type;
2366	u8 bw;
2367	u8 rate_mcs, nss;
2368	u8 sgi;
2369	bool is_cck, is_ldpc;
2370
2371	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2372	bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2373	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2374	nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2375	sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2376
2377	switch (pkt_type) {
2378	case RX_MSDU_START_PKT_TYPE_11A:
2379	case RX_MSDU_START_PKT_TYPE_11B:
2380		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2381		sband = &ar->mac.sbands[rx_status->band];
2382		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2383								is_cck);
2384		break;
2385	case RX_MSDU_START_PKT_TYPE_11N:
2386		rx_status->encoding = RX_ENC_HT;
2387		if (rate_mcs > ATH11K_HT_MCS_MAX) {
2388			ath11k_warn(ar->ab,
2389				    "Received with invalid mcs in HT mode %d\n",
2390				     rate_mcs);
2391			break;
2392		}
2393		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2394		if (sgi)
2395			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2396		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2397		break;
2398	case RX_MSDU_START_PKT_TYPE_11AC:
2399		rx_status->encoding = RX_ENC_VHT;
2400		rx_status->rate_idx = rate_mcs;
2401		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2402			ath11k_warn(ar->ab,
2403				    "Received with invalid mcs in VHT mode %d\n",
2404				     rate_mcs);
2405			break;
2406		}
2407		rx_status->nss = nss;
2408		if (sgi)
2409			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2410		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2411		is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2412		if (is_ldpc)
2413			rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2414		break;
2415	case RX_MSDU_START_PKT_TYPE_11AX:
2416		rx_status->rate_idx = rate_mcs;
2417		if (rate_mcs > ATH11K_HE_MCS_MAX) {
2418			ath11k_warn(ar->ab,
2419				    "Received with invalid mcs in HE mode %d\n",
2420				    rate_mcs);
2421			break;
2422		}
2423		rx_status->encoding = RX_ENC_HE;
2424		rx_status->nss = nss;
2425		rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2426		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2427		break;
2428	}
2429}
2430
2431static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2432				struct ieee80211_rx_status *rx_status)
2433{
2434	u8 channel_num;
2435	u32 center_freq, meta_data;
2436	struct ieee80211_channel *channel;
2437
2438	rx_status->freq = 0;
2439	rx_status->rate_idx = 0;
2440	rx_status->nss = 0;
2441	rx_status->encoding = RX_ENC_LEGACY;
2442	rx_status->bw = RATE_INFO_BW_20;
2443
2444	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2445
2446	meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2447	channel_num = meta_data;
2448	center_freq = meta_data >> 16;
2449
2450	if (center_freq >= ATH11K_MIN_6G_FREQ &&
2451	    center_freq <= ATH11K_MAX_6G_FREQ) {
2452		rx_status->band = NL80211_BAND_6GHZ;
2453		rx_status->freq = center_freq;
2454	} else if (channel_num >= 1 && channel_num <= 14) {
2455		rx_status->band = NL80211_BAND_2GHZ;
2456	} else if (channel_num >= 36 && channel_num <= 177) {
2457		rx_status->band = NL80211_BAND_5GHZ;
2458	} else {
2459		spin_lock_bh(&ar->data_lock);
2460		channel = ar->rx_channel;
2461		if (channel) {
2462			rx_status->band = channel->band;
2463			channel_num =
2464				ieee80211_frequency_to_channel(channel->center_freq);
2465		}
2466		spin_unlock_bh(&ar->data_lock);
2467		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2468				rx_desc, sizeof(struct hal_rx_desc));
2469	}
2470
2471	if (rx_status->band != NL80211_BAND_6GHZ)
2472		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2473								 rx_status->band);
2474
2475	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2476}
2477
2478static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2479				      struct sk_buff *msdu,
2480				      struct ieee80211_rx_status *status)
2481{
2482	static const struct ieee80211_radiotap_he known = {
2483		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2484				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2485		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2486	};
2487	struct ieee80211_rx_status *rx_status;
2488	struct ieee80211_radiotap_he *he = NULL;
2489	struct ieee80211_sta *pubsta = NULL;
2490	struct ath11k_peer *peer;
2491	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2492	u8 decap = DP_RX_DECAP_TYPE_RAW;
2493	bool is_mcbc = rxcb->is_mcbc;
2494	bool is_eapol = rxcb->is_eapol;
2495
2496	if (status->encoding == RX_ENC_HE &&
2497	    !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2498	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2499		he = skb_push(msdu, sizeof(known));
2500		memcpy(he, &known, sizeof(known));
2501		status->flag |= RX_FLAG_RADIOTAP_HE;
2502	}
2503
2504	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2505		decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2506
2507	spin_lock_bh(&ar->ab->base_lock);
2508	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2509	if (peer && peer->sta)
2510		pubsta = peer->sta;
2511	spin_unlock_bh(&ar->ab->base_lock);
2512
2513	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2514		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2515		   msdu,
2516		   msdu->len,
2517		   peer ? peer->addr : NULL,
2518		   rxcb->tid,
2519		   is_mcbc ? "mcast" : "ucast",
2520		   rxcb->seq_no,
2521		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2522		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2523		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2524		   (status->encoding == RX_ENC_HE) ? "he" : "",
2525		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2526		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2527		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2528		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2529		   status->rate_idx,
2530		   status->nss,
2531		   status->freq,
2532		   status->band, status->flag,
2533		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2534		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2535		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2536
2537	ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2538			msdu->data, msdu->len);
2539
2540	rx_status = IEEE80211_SKB_RXCB(msdu);
2541	*rx_status = *status;
2542
2543	/* TODO: trace rx packet */
2544
2545	/* PN for multicast packets are not validate in HW,
2546	 * so skip 802.3 rx path
2547	 * Also, fast_rx expects the STA to be authorized, hence
2548	 * eapol packets are sent in slow path.
2549	 */
2550	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2551	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2552		rx_status->flag |= RX_FLAG_8023;
2553
2554	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2555}
2556
2557static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2558				     struct sk_buff *msdu,
2559				     struct sk_buff_head *msdu_list,
2560				     struct ieee80211_rx_status *rx_status)
2561{
2562	struct ath11k_base *ab = ar->ab;
2563	struct hal_rx_desc *rx_desc, *lrx_desc;
2564	struct rx_attention *rx_attention;
2565	struct ath11k_skb_rxcb *rxcb;
2566	struct sk_buff *last_buf;
2567	u8 l3_pad_bytes;
2568	u8 *hdr_status;
2569	u16 msdu_len;
2570	int ret;
2571	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2572
2573	last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2574	if (!last_buf) {
2575		ath11k_warn(ab,
2576			    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2577		ret = -EIO;
2578		goto free_out;
2579	}
2580
2581	rx_desc = (struct hal_rx_desc *)msdu->data;
2582	if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2583		ath11k_warn(ar->ab, "msdu len not valid\n");
2584		ret = -EIO;
2585		goto free_out;
2586	}
2587
2588	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2589	rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2590	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2591		ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2592		ret = -EIO;
2593		goto free_out;
2594	}
2595
2596	rxcb = ATH11K_SKB_RXCB(msdu);
2597	rxcb->rx_desc = rx_desc;
2598	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2599	l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2600
2601	if (rxcb->is_frag) {
2602		skb_pull(msdu, hal_rx_desc_sz);
2603	} else if (!rxcb->is_continuation) {
2604		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2605			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2606			ret = -EINVAL;
2607			ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2608			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2609					sizeof(struct ieee80211_hdr));
2610			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2611					sizeof(struct hal_rx_desc));
2612			goto free_out;
2613		}
2614		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2615		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2616	} else {
2617		ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2618						 msdu, last_buf,
2619						 l3_pad_bytes, msdu_len);
2620		if (ret) {
2621			ath11k_warn(ab,
2622				    "failed to coalesce msdu rx buffer%d\n", ret);
2623			goto free_out;
2624		}
2625	}
2626
2627	ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2628	ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2629
2630	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2631
2632	return 0;
2633
2634free_out:
2635	return ret;
2636}
2637
2638static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2639						  struct napi_struct *napi,
2640						  struct sk_buff_head *msdu_list,
2641						  int mac_id)
2642{
2643	struct sk_buff *msdu;
2644	struct ath11k *ar;
2645	struct ieee80211_rx_status rx_status = {0};
2646	int ret;
2647
2648	if (skb_queue_empty(msdu_list))
2649		return;
2650
2651	if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2652		__skb_queue_purge(msdu_list);
2653		return;
2654	}
2655
2656	ar = ab->pdevs[mac_id].ar;
2657	if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2658		__skb_queue_purge(msdu_list);
2659		return;
2660	}
2661
2662	while ((msdu = __skb_dequeue(msdu_list))) {
2663		ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2664		if (unlikely(ret)) {
2665			ath11k_dbg(ab, ATH11K_DBG_DATA,
2666				   "Unable to process msdu %d", ret);
2667			dev_kfree_skb_any(msdu);
2668			continue;
2669		}
2670
2671		ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2672	}
2673}
2674
2675int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2676			 struct napi_struct *napi, int budget)
2677{
2678	struct ath11k_dp *dp = &ab->dp;
2679	struct dp_rxdma_ring *rx_ring;
2680	int num_buffs_reaped[MAX_RADIOS] = {0};
2681	struct sk_buff_head msdu_list[MAX_RADIOS];
2682	struct ath11k_skb_rxcb *rxcb;
2683	int total_msdu_reaped = 0;
2684	struct hal_srng *srng;
2685	struct sk_buff *msdu;
2686	bool done = false;
2687	int buf_id, mac_id;
2688	struct ath11k *ar;
2689	struct hal_reo_dest_ring *desc;
2690	enum hal_reo_dest_ring_push_reason push_reason;
2691	u32 cookie;
2692	int i;
2693
2694	for (i = 0; i < MAX_RADIOS; i++)
2695		__skb_queue_head_init(&msdu_list[i]);
2696
2697	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2698
2699	spin_lock_bh(&srng->lock);
2700
2701try_again:
2702	ath11k_hal_srng_access_begin(ab, srng);
2703
2704	while (likely(desc =
2705	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2706									     srng))) {
2707		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2708				   desc->buf_addr_info.info1);
2709		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2710				   cookie);
2711		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2712
2713		if (unlikely(buf_id == 0))
2714			continue;
2715
2716		ar = ab->pdevs[mac_id].ar;
2717		rx_ring = &ar->dp.rx_refill_buf_ring;
2718		spin_lock_bh(&rx_ring->idr_lock);
2719		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2720		if (unlikely(!msdu)) {
2721			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2722				    buf_id);
2723			spin_unlock_bh(&rx_ring->idr_lock);
2724			continue;
2725		}
2726
2727		idr_remove(&rx_ring->bufs_idr, buf_id);
2728		spin_unlock_bh(&rx_ring->idr_lock);
2729
2730		rxcb = ATH11K_SKB_RXCB(msdu);
2731		dma_unmap_single(ab->dev, rxcb->paddr,
2732				 msdu->len + skb_tailroom(msdu),
2733				 DMA_FROM_DEVICE);
2734
2735		num_buffs_reaped[mac_id]++;
2736
2737		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2738					desc->info0);
2739		if (unlikely(push_reason !=
2740			     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2741			dev_kfree_skb_any(msdu);
2742			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2743			continue;
2744		}
2745
2746		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2747					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2748		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2749					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2750		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2751					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2752		rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2753					  desc->rx_mpdu_info.meta_data);
2754		rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2755					 desc->rx_mpdu_info.info0);
2756		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2757				      desc->info0);
2758
2759		rxcb->mac_id = mac_id;
2760		__skb_queue_tail(&msdu_list[mac_id], msdu);
2761
2762		if (rxcb->is_continuation) {
2763			done = false;
2764		} else {
2765			total_msdu_reaped++;
2766			done = true;
2767		}
2768
2769		if (total_msdu_reaped >= budget)
2770			break;
2771	}
2772
2773	/* Hw might have updated the head pointer after we cached it.
2774	 * In this case, even though there are entries in the ring we'll
2775	 * get rx_desc NULL. Give the read another try with updated cached
2776	 * head pointer so that we can reap complete MPDU in the current
2777	 * rx processing.
2778	 */
2779	if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2780		ath11k_hal_srng_access_end(ab, srng);
2781		goto try_again;
2782	}
2783
2784	ath11k_hal_srng_access_end(ab, srng);
2785
2786	spin_unlock_bh(&srng->lock);
2787
2788	if (unlikely(!total_msdu_reaped))
2789		goto exit;
2790
2791	for (i = 0; i < ab->num_radios; i++) {
2792		if (!num_buffs_reaped[i])
2793			continue;
2794
2795		ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2796
2797		ar = ab->pdevs[i].ar;
2798		rx_ring = &ar->dp.rx_refill_buf_ring;
2799
2800		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2801					   ab->hw_params.hal_params->rx_buf_rbm);
2802	}
2803exit:
2804	return total_msdu_reaped;
2805}
2806
2807static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2808					   struct hal_rx_mon_ppdu_info *ppdu_info)
2809{
2810	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2811	u32 num_msdu;
2812	int i;
2813
2814	if (!rx_stats)
2815		return;
2816
2817	arsta->rssi_comb = ppdu_info->rssi_comb;
2818	ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2819
2820	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2821		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2822
2823	rx_stats->num_msdu += num_msdu;
2824	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2825				    ppdu_info->tcp_ack_msdu_count;
2826	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2827	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2828
2829	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2830	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2831		ppdu_info->nss = 1;
2832		ppdu_info->mcs = HAL_RX_MAX_MCS;
2833		ppdu_info->tid = IEEE80211_NUM_TIDS;
2834	}
2835
2836	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2837		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2838
2839	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2840		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2841
2842	if (ppdu_info->gi < HAL_RX_GI_MAX)
2843		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2844
2845	if (ppdu_info->bw < HAL_RX_BW_MAX)
2846		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2847
2848	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2849		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2850
2851	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2852		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2853
2854	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2855		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2856
2857	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2858		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2859
2860	if (ppdu_info->is_stbc)
2861		rx_stats->stbc_count += num_msdu;
2862
2863	if (ppdu_info->beamformed)
2864		rx_stats->beamformed_count += num_msdu;
2865
2866	if (ppdu_info->num_mpdu_fcs_ok > 1)
2867		rx_stats->ampdu_msdu_count += num_msdu;
2868	else
2869		rx_stats->non_ampdu_msdu_count += num_msdu;
2870
2871	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2872	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2873	rx_stats->dcm_count += ppdu_info->dcm;
2874	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2875
2876	arsta->rssi_comb = ppdu_info->rssi_comb;
2877
2878	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2879			     ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2880
2881	for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2882		arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2883
2884	rx_stats->rx_duration += ppdu_info->rx_duration;
2885	arsta->rx_duration = rx_stats->rx_duration;
2886}
2887
2888static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2889							 struct dp_rxdma_ring *rx_ring,
2890							 int *buf_id)
2891{
2892	struct sk_buff *skb;
2893	dma_addr_t paddr;
2894
2895	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2896			    DP_RX_BUFFER_ALIGN_SIZE);
2897
2898	if (!skb)
2899		goto fail_alloc_skb;
2900
2901	if (!IS_ALIGNED((unsigned long)skb->data,
2902			DP_RX_BUFFER_ALIGN_SIZE)) {
2903		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2904			 skb->data);
2905	}
2906
2907	paddr = dma_map_single(ab->dev, skb->data,
2908			       skb->len + skb_tailroom(skb),
2909			       DMA_FROM_DEVICE);
2910	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2911		goto fail_free_skb;
2912
2913	spin_lock_bh(&rx_ring->idr_lock);
2914	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2915			    rx_ring->bufs_max, GFP_ATOMIC);
2916	spin_unlock_bh(&rx_ring->idr_lock);
2917	if (*buf_id < 0)
2918		goto fail_dma_unmap;
2919
2920	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2921	return skb;
2922
2923fail_dma_unmap:
2924	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2925			 DMA_FROM_DEVICE);
2926fail_free_skb:
2927	dev_kfree_skb_any(skb);
2928fail_alloc_skb:
2929	return NULL;
2930}
2931
2932int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2933					   struct dp_rxdma_ring *rx_ring,
2934					   int req_entries,
2935					   enum hal_rx_buf_return_buf_manager mgr)
2936{
2937	struct hal_srng *srng;
2938	u32 *desc;
2939	struct sk_buff *skb;
2940	int num_free;
2941	int num_remain;
2942	int buf_id;
2943	u32 cookie;
2944	dma_addr_t paddr;
2945
2946	req_entries = min(req_entries, rx_ring->bufs_max);
2947
2948	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2949
2950	spin_lock_bh(&srng->lock);
2951
2952	ath11k_hal_srng_access_begin(ab, srng);
2953
2954	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2955
2956	req_entries = min(num_free, req_entries);
2957	num_remain = req_entries;
2958
2959	while (num_remain > 0) {
2960		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2961							&buf_id);
2962		if (!skb)
2963			break;
2964		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2965
2966		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2967		if (!desc)
2968			goto fail_desc_get;
2969
2970		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2971			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2972
2973		num_remain--;
2974
2975		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2976	}
2977
2978	ath11k_hal_srng_access_end(ab, srng);
2979
2980	spin_unlock_bh(&srng->lock);
2981
2982	return req_entries - num_remain;
2983
2984fail_desc_get:
2985	spin_lock_bh(&rx_ring->idr_lock);
2986	idr_remove(&rx_ring->bufs_idr, buf_id);
2987	spin_unlock_bh(&rx_ring->idr_lock);
2988	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2989			 DMA_FROM_DEVICE);
2990	dev_kfree_skb_any(skb);
2991	ath11k_hal_srng_access_end(ab, srng);
2992	spin_unlock_bh(&srng->lock);
2993
2994	return req_entries - num_remain;
2995}
2996
2997#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2998
2999static void
3000ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
3001					 struct hal_tlv_hdr *tlv)
3002{
3003	struct hal_rx_ppdu_start *ppdu_start;
3004	u16 ppdu_id_diff, ppdu_id, tlv_len;
3005	u8 *ptr;
3006
3007	/* PPDU id is part of second tlv, move ptr to second tlv */
3008	tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
3009	ptr = (u8 *)tlv;
3010	ptr += sizeof(*tlv) + tlv_len;
3011	tlv = (struct hal_tlv_hdr *)ptr;
3012
3013	if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
3014		return;
3015
3016	ptr += sizeof(*tlv);
3017	ppdu_start = (struct hal_rx_ppdu_start *)ptr;
3018	ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
3019			    __le32_to_cpu(ppdu_start->info0));
3020
3021	if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
3022		pmon->buf_state = DP_MON_STATUS_LEAD;
3023		ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
3024		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3025			pmon->buf_state = DP_MON_STATUS_LAG;
3026	} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
3027		pmon->buf_state = DP_MON_STATUS_LAG;
3028		ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
3029		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3030			pmon->buf_state = DP_MON_STATUS_LEAD;
3031	}
3032}
3033
3034static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
3035					     int *budget, struct sk_buff_head *skb_list)
3036{
3037	struct ath11k *ar;
3038	const struct ath11k_hw_hal_params *hal_params;
3039	struct ath11k_pdev_dp *dp;
3040	struct dp_rxdma_ring *rx_ring;
3041	struct ath11k_mon_data *pmon;
3042	struct hal_srng *srng;
3043	void *rx_mon_status_desc;
3044	struct sk_buff *skb;
3045	struct ath11k_skb_rxcb *rxcb;
3046	struct hal_tlv_hdr *tlv;
3047	u32 cookie;
3048	int buf_id, srng_id;
3049	dma_addr_t paddr;
3050	u8 rbm;
3051	int num_buffs_reaped = 0;
3052
3053	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3054	dp = &ar->dp;
3055	pmon = &dp->mon_data;
3056	srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3057	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3058
3059	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3060
3061	spin_lock_bh(&srng->lock);
3062
3063	ath11k_hal_srng_access_begin(ab, srng);
3064	while (*budget) {
3065		*budget -= 1;
3066		rx_mon_status_desc =
3067			ath11k_hal_srng_src_peek(ab, srng);
3068		if (!rx_mon_status_desc) {
3069			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3070			break;
3071		}
3072
3073		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3074						&cookie, &rbm);
3075		if (paddr) {
3076			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3077
3078			spin_lock_bh(&rx_ring->idr_lock);
3079			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3080			spin_unlock_bh(&rx_ring->idr_lock);
3081
3082			if (!skb) {
3083				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3084					    buf_id);
3085				pmon->buf_state = DP_MON_STATUS_REPLINISH;
3086				goto move_next;
3087			}
3088
3089			rxcb = ATH11K_SKB_RXCB(skb);
3090
3091			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3092						skb->len + skb_tailroom(skb),
3093						DMA_FROM_DEVICE);
3094
3095			tlv = (struct hal_tlv_hdr *)skb->data;
3096			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3097					HAL_RX_STATUS_BUFFER_DONE) {
3098				ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3099					    FIELD_GET(HAL_TLV_HDR_TAG,
3100						      tlv->tl), buf_id);
3101				/* If done status is missing, hold onto status
3102				 * ring until status is done for this status
3103				 * ring buffer.
3104				 * Keep HP in mon_status_ring unchanged,
3105				 * and break from here.
3106				 * Check status for same buffer for next time
3107				 */
3108				pmon->buf_state = DP_MON_STATUS_NO_DMA;
3109				break;
3110			}
3111
3112			spin_lock_bh(&rx_ring->idr_lock);
3113			idr_remove(&rx_ring->bufs_idr, buf_id);
3114			spin_unlock_bh(&rx_ring->idr_lock);
3115			if (ab->hw_params.full_monitor_mode) {
3116				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3117				if (paddr == pmon->mon_status_paddr)
3118					pmon->buf_state = DP_MON_STATUS_MATCH;
3119			}
3120
3121			dma_unmap_single(ab->dev, rxcb->paddr,
3122					 skb->len + skb_tailroom(skb),
3123					 DMA_FROM_DEVICE);
3124
3125			__skb_queue_tail(skb_list, skb);
3126		} else {
3127			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3128		}
3129move_next:
3130		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3131							&buf_id);
3132
3133		if (!skb) {
3134			hal_params = ab->hw_params.hal_params;
3135			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3136							hal_params->rx_buf_rbm);
3137			num_buffs_reaped++;
3138			break;
3139		}
3140		rxcb = ATH11K_SKB_RXCB(skb);
3141
3142		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3143			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3144
3145		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3146						cookie,
3147						ab->hw_params.hal_params->rx_buf_rbm);
3148		ath11k_hal_srng_src_get_next_entry(ab, srng);
3149		num_buffs_reaped++;
3150	}
3151	ath11k_hal_srng_access_end(ab, srng);
3152	spin_unlock_bh(&srng->lock);
3153
3154	return num_buffs_reaped;
3155}
3156
3157static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3158{
3159	struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3160
3161	spin_lock_bh(&rx_tid->ab->base_lock);
3162	if (rx_tid->last_frag_no &&
3163	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3164		spin_unlock_bh(&rx_tid->ab->base_lock);
3165		return;
3166	}
3167	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3168	spin_unlock_bh(&rx_tid->ab->base_lock);
3169}
3170
3171int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3172{
3173	struct ath11k_base *ab = ar->ab;
3174	struct crypto_shash *tfm;
3175	struct ath11k_peer *peer;
3176	struct dp_rx_tid *rx_tid;
3177	int i;
3178
3179	tfm = crypto_alloc_shash("michael_mic", 0, 0);
3180	if (IS_ERR(tfm)) {
3181		ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3182			    PTR_ERR(tfm));
3183		return PTR_ERR(tfm);
3184	}
3185
3186	spin_lock_bh(&ab->base_lock);
3187
3188	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3189	if (!peer) {
3190		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3191		spin_unlock_bh(&ab->base_lock);
3192		crypto_free_shash(tfm);
3193		return -ENOENT;
3194	}
3195
3196	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3197		rx_tid = &peer->rx_tid[i];
3198		rx_tid->ab = ab;
3199		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3200		skb_queue_head_init(&rx_tid->rx_frags);
3201	}
3202
3203	peer->tfm_mmic = tfm;
3204	peer->dp_setup_done = true;
3205	spin_unlock_bh(&ab->base_lock);
3206
3207	return 0;
3208}
3209
3210static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3211				      struct ieee80211_hdr *hdr, u8 *data,
3212				      size_t data_len, u8 *mic)
3213{
3214	SHASH_DESC_ON_STACK(desc, tfm);
3215	u8 mic_hdr[16] = {0};
3216	u8 tid = 0;
3217	int ret;
3218
3219	if (!tfm)
3220		return -EINVAL;
3221
3222	desc->tfm = tfm;
3223
3224	ret = crypto_shash_setkey(tfm, key, 8);
3225	if (ret)
3226		goto out;
3227
3228	ret = crypto_shash_init(desc);
3229	if (ret)
3230		goto out;
3231
3232	/* TKIP MIC header */
3233	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3234	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3235	if (ieee80211_is_data_qos(hdr->frame_control))
3236		tid = ieee80211_get_tid(hdr);
3237	mic_hdr[12] = tid;
3238
3239	ret = crypto_shash_update(desc, mic_hdr, 16);
3240	if (ret)
3241		goto out;
3242	ret = crypto_shash_update(desc, data, data_len);
3243	if (ret)
3244		goto out;
3245	ret = crypto_shash_final(desc, mic);
3246out:
3247	shash_desc_zero(desc);
3248	return ret;
3249}
3250
3251static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3252					  struct sk_buff *msdu)
3253{
3254	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3255	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3256	struct ieee80211_key_conf *key_conf;
3257	struct ieee80211_hdr *hdr;
3258	u8 mic[IEEE80211_CCMP_MIC_LEN];
3259	int head_len, tail_len, ret;
3260	size_t data_len;
3261	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3262	u8 *key, *data;
3263	u8 key_idx;
3264
3265	if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3266	    HAL_ENCRYPT_TYPE_TKIP_MIC)
3267		return 0;
3268
3269	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3270	hdr_len = ieee80211_hdrlen(hdr->frame_control);
3271	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3272	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3273
3274	if (!is_multicast_ether_addr(hdr->addr1))
3275		key_idx = peer->ucast_keyidx;
3276	else
3277		key_idx = peer->mcast_keyidx;
3278
3279	key_conf = peer->keys[key_idx];
3280
3281	data = msdu->data + head_len;
3282	data_len = msdu->len - head_len - tail_len;
3283	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3284
3285	ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3286	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3287		goto mic_fail;
3288
3289	return 0;
3290
3291mic_fail:
3292	(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3293	(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3294
3295	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3296		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3297	skb_pull(msdu, hal_rx_desc_sz);
3298
3299	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3300	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3301			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3302	ieee80211_rx(ar->hw, msdu);
3303	return -EINVAL;
3304}
3305
3306static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3307					enum hal_encrypt_type enctype, u32 flags)
3308{
3309	struct ieee80211_hdr *hdr;
3310	size_t hdr_len;
3311	size_t crypto_len;
3312	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3313
3314	if (!flags)
3315		return;
3316
3317	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3318
3319	if (flags & RX_FLAG_MIC_STRIPPED)
3320		skb_trim(msdu, msdu->len -
3321			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3322
3323	if (flags & RX_FLAG_ICV_STRIPPED)
3324		skb_trim(msdu, msdu->len -
3325			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3326
3327	if (flags & RX_FLAG_IV_STRIPPED) {
3328		hdr_len = ieee80211_hdrlen(hdr->frame_control);
3329		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3330
3331#if defined(__linux__)
3332		memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3333			(void *)msdu->data + hal_rx_desc_sz, hdr_len);
3334#elif defined(__FreeBSD__)
3335		memmove((u8 *)msdu->data + hal_rx_desc_sz + crypto_len,
3336			(u8 *)msdu->data + hal_rx_desc_sz, hdr_len);
3337#endif
3338		skb_pull(msdu, crypto_len);
3339	}
3340}
3341
3342static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3343				 struct ath11k_peer *peer,
3344				 struct dp_rx_tid *rx_tid,
3345				 struct sk_buff **defrag_skb)
3346{
3347	struct hal_rx_desc *rx_desc;
3348	struct sk_buff *skb, *first_frag, *last_frag;
3349	struct ieee80211_hdr *hdr;
3350	struct rx_attention *rx_attention;
3351	enum hal_encrypt_type enctype;
3352	bool is_decrypted = false;
3353	int msdu_len = 0;
3354	int extra_space;
3355	u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3356
3357	first_frag = skb_peek(&rx_tid->rx_frags);
3358	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3359
3360	skb_queue_walk(&rx_tid->rx_frags, skb) {
3361		flags = 0;
3362		rx_desc = (struct hal_rx_desc *)skb->data;
3363		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3364
3365		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3366		if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3367			rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3368			is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3369		}
3370
3371		if (is_decrypted) {
3372			if (skb != first_frag)
3373				flags |=  RX_FLAG_IV_STRIPPED;
3374			if (skb != last_frag)
3375				flags |= RX_FLAG_ICV_STRIPPED |
3376					 RX_FLAG_MIC_STRIPPED;
3377		}
3378
3379		/* RX fragments are always raw packets */
3380		if (skb != last_frag)
3381			skb_trim(skb, skb->len - FCS_LEN);
3382		ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3383
3384		if (skb != first_frag)
3385			skb_pull(skb, hal_rx_desc_sz +
3386				      ieee80211_hdrlen(hdr->frame_control));
3387		msdu_len += skb->len;
3388	}
3389
3390	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3391	if (extra_space > 0 &&
3392	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3393		return -ENOMEM;
3394
3395	__skb_unlink(first_frag, &rx_tid->rx_frags);
3396	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3397		skb_put_data(first_frag, skb->data, skb->len);
3398		dev_kfree_skb_any(skb);
3399	}
3400
3401	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3402	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3403	ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3404
3405	if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3406		first_frag = NULL;
3407
3408	*defrag_skb = first_frag;
3409	return 0;
3410}
3411
3412static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3413					      struct sk_buff *defrag_skb)
3414{
3415	struct ath11k_base *ab = ar->ab;
3416	struct ath11k_pdev_dp *dp = &ar->dp;
3417	struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3418	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3419	struct hal_reo_entrance_ring *reo_ent_ring;
3420	struct hal_reo_dest_ring *reo_dest_ring;
3421	struct dp_link_desc_bank *link_desc_banks;
3422	struct hal_rx_msdu_link *msdu_link;
3423	struct hal_rx_msdu_details *msdu0;
3424	struct hal_srng *srng;
3425	dma_addr_t paddr;
3426	u32 desc_bank, msdu_info, mpdu_info;
3427	u32 dst_idx, cookie, hal_rx_desc_sz;
3428	int ret, buf_id;
3429
3430	hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3431	link_desc_banks = ab->dp.link_desc_banks;
3432	reo_dest_ring = rx_tid->dst_ring_desc;
3433
3434	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3435#if defined(__linux__)
3436	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3437#elif defined(__FreeBSD__)
3438	msdu_link = (struct hal_rx_msdu_link *)((u8 *)link_desc_banks[desc_bank].vaddr +
3439#endif
3440			(paddr - link_desc_banks[desc_bank].paddr));
3441	msdu0 = &msdu_link->msdu_link[0];
3442	dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3443	memset(msdu0, 0, sizeof(*msdu0));
3444
3445	msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3446		    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3447		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3448		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3449			       defrag_skb->len - hal_rx_desc_sz) |
3450		    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3451		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3452		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3453	msdu0->rx_msdu_info.info0 = msdu_info;
3454
3455	/* change msdu len in hal rx desc */
3456	ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3457
3458	paddr = dma_map_single(ab->dev, defrag_skb->data,
3459			       defrag_skb->len + skb_tailroom(defrag_skb),
3460			       DMA_TO_DEVICE);
3461	if (dma_mapping_error(ab->dev, paddr))
3462		return -ENOMEM;
3463
3464	spin_lock_bh(&rx_refill_ring->idr_lock);
3465	buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3466			   rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3467	spin_unlock_bh(&rx_refill_ring->idr_lock);
3468	if (buf_id < 0) {
3469		ret = -ENOMEM;
3470		goto err_unmap_dma;
3471	}
3472
3473	ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3474	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3475		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3476
3477	ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3478					ab->hw_params.hal_params->rx_buf_rbm);
3479
3480	/* Fill mpdu details into reo entrace ring */
3481	srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3482
3483	spin_lock_bh(&srng->lock);
3484	ath11k_hal_srng_access_begin(ab, srng);
3485
3486	reo_ent_ring = (struct hal_reo_entrance_ring *)
3487			ath11k_hal_srng_src_get_next_entry(ab, srng);
3488	if (!reo_ent_ring) {
3489		ath11k_hal_srng_access_end(ab, srng);
3490		spin_unlock_bh(&srng->lock);
3491		ret = -ENOSPC;
3492		goto err_free_idr;
3493	}
3494	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3495
3496	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3497	ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3498					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3499
3500	mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3501		    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3502		    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3503		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3504		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3505		    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3506		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3507
3508	reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3509	reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3510	reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3511	reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3512					 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3513						   reo_dest_ring->info0)) |
3514			      FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3515	ath11k_hal_srng_access_end(ab, srng);
3516	spin_unlock_bh(&srng->lock);
3517
3518	return 0;
3519
3520err_free_idr:
3521	spin_lock_bh(&rx_refill_ring->idr_lock);
3522	idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3523	spin_unlock_bh(&rx_refill_ring->idr_lock);
3524err_unmap_dma:
3525	dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3526			 DMA_TO_DEVICE);
3527	return ret;
3528}
3529
3530static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3531				    struct sk_buff *a, struct sk_buff *b)
3532{
3533	int frag1, frag2;
3534
3535	frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3536	frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3537
3538	return frag1 - frag2;
3539}
3540
3541static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3542				      struct sk_buff_head *frag_list,
3543				      struct sk_buff *cur_frag)
3544{
3545	struct sk_buff *skb;
3546	int cmp;
3547
3548	skb_queue_walk(frag_list, skb) {
3549		cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3550		if (cmp < 0)
3551			continue;
3552		__skb_queue_before(frag_list, skb, cur_frag);
3553		return;
3554	}
3555	__skb_queue_tail(frag_list, cur_frag);
3556}
3557
3558static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3559{
3560	struct ieee80211_hdr *hdr;
3561	u64 pn = 0;
3562	u8 *ehdr;
3563	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3564
3565	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3566	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3567
3568	pn = ehdr[0];
3569	pn |= (u64)ehdr[1] << 8;
3570	pn |= (u64)ehdr[4] << 16;
3571	pn |= (u64)ehdr[5] << 24;
3572	pn |= (u64)ehdr[6] << 32;
3573	pn |= (u64)ehdr[7] << 40;
3574
3575	return pn;
3576}
3577
3578static bool
3579ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3580{
3581	enum hal_encrypt_type encrypt_type;
3582	struct sk_buff *first_frag, *skb;
3583	struct hal_rx_desc *desc;
3584	u64 last_pn;
3585	u64 cur_pn;
3586
3587	first_frag = skb_peek(&rx_tid->rx_frags);
3588	desc = (struct hal_rx_desc *)first_frag->data;
3589
3590	encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3591	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3592	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3593	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3594	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3595		return true;
3596
3597	last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3598	skb_queue_walk(&rx_tid->rx_frags, skb) {
3599		if (skb == first_frag)
3600			continue;
3601
3602		cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3603		if (cur_pn != last_pn + 1)
3604			return false;
3605		last_pn = cur_pn;
3606	}
3607	return true;
3608}
3609
3610static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3611				    struct sk_buff *msdu,
3612				    u32 *ring_desc)
3613{
3614	struct ath11k_base *ab = ar->ab;
3615	struct hal_rx_desc *rx_desc;
3616	struct ath11k_peer *peer;
3617	struct dp_rx_tid *rx_tid;
3618	struct sk_buff *defrag_skb = NULL;
3619	u32 peer_id;
3620	u16 seqno, frag_no;
3621	u8 tid;
3622	int ret = 0;
3623	bool more_frags;
3624	bool is_mcbc;
3625
3626	rx_desc = (struct hal_rx_desc *)msdu->data;
3627	peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3628	tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3629	seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3630	frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3631	more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3632	is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3633
3634	/* Multicast/Broadcast fragments are not expected */
3635	if (is_mcbc)
3636		return -EINVAL;
3637
3638	if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3639	    !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3640	    tid > IEEE80211_NUM_TIDS)
3641		return -EINVAL;
3642
3643	/* received unfragmented packet in reo
3644	 * exception ring, this shouldn't happen
3645	 * as these packets typically come from
3646	 * reo2sw srngs.
3647	 */
3648	if (WARN_ON_ONCE(!frag_no && !more_frags))
3649		return -EINVAL;
3650
3651	spin_lock_bh(&ab->base_lock);
3652	peer = ath11k_peer_find_by_id(ab, peer_id);
3653	if (!peer) {
3654		ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3655			    peer_id);
3656		ret = -ENOENT;
3657		goto out_unlock;
3658	}
3659	if (!peer->dp_setup_done) {
3660		ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3661			    peer->addr, peer_id);
3662		ret = -ENOENT;
3663		goto out_unlock;
3664	}
3665
3666	rx_tid = &peer->rx_tid[tid];
3667
3668	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3669	    skb_queue_empty(&rx_tid->rx_frags)) {
3670		/* Flush stored fragments and start a new sequence */
3671		ath11k_dp_rx_frags_cleanup(rx_tid, true);
3672		rx_tid->cur_sn = seqno;
3673	}
3674
3675	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3676		/* Fragment already present */
3677		ret = -EINVAL;
3678		goto out_unlock;
3679	}
3680
3681	if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3682		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3683	else
3684		ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3685
3686	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3687	if (!more_frags)
3688		rx_tid->last_frag_no = frag_no;
3689
3690	if (frag_no == 0) {
3691		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3692						sizeof(*rx_tid->dst_ring_desc),
3693						GFP_ATOMIC);
3694		if (!rx_tid->dst_ring_desc) {
3695			ret = -ENOMEM;
3696			goto out_unlock;
3697		}
3698	} else {
3699		ath11k_dp_rx_link_desc_return(ab, ring_desc,
3700					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3701	}
3702
3703	if (!rx_tid->last_frag_no ||
3704	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3705		mod_timer(&rx_tid->frag_timer, jiffies +
3706					       ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3707		goto out_unlock;
3708	}
3709
3710	spin_unlock_bh(&ab->base_lock);
3711	del_timer_sync(&rx_tid->frag_timer);
3712	spin_lock_bh(&ab->base_lock);
3713
3714	peer = ath11k_peer_find_by_id(ab, peer_id);
3715	if (!peer)
3716		goto err_frags_cleanup;
3717
3718	if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3719		goto err_frags_cleanup;
3720
3721	if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3722		goto err_frags_cleanup;
3723
3724	if (!defrag_skb)
3725		goto err_frags_cleanup;
3726
3727	if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3728		goto err_frags_cleanup;
3729
3730	ath11k_dp_rx_frags_cleanup(rx_tid, false);
3731	goto out_unlock;
3732
3733err_frags_cleanup:
3734	dev_kfree_skb_any(defrag_skb);
3735	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3736out_unlock:
3737	spin_unlock_bh(&ab->base_lock);
3738	return ret;
3739}
3740
3741static int
3742ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3743{
3744	struct ath11k_pdev_dp *dp = &ar->dp;
3745	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3746	struct sk_buff *msdu;
3747	struct ath11k_skb_rxcb *rxcb;
3748	struct hal_rx_desc *rx_desc;
3749	u8 *hdr_status;
3750	u16 msdu_len;
3751	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3752
3753	spin_lock_bh(&rx_ring->idr_lock);
3754	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3755	if (!msdu) {
3756		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3757			    buf_id);
3758		spin_unlock_bh(&rx_ring->idr_lock);
3759		return -EINVAL;
3760	}
3761
3762	idr_remove(&rx_ring->bufs_idr, buf_id);
3763	spin_unlock_bh(&rx_ring->idr_lock);
3764
3765	rxcb = ATH11K_SKB_RXCB(msdu);
3766	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3767			 msdu->len + skb_tailroom(msdu),
3768			 DMA_FROM_DEVICE);
3769
3770	if (drop) {
3771		dev_kfree_skb_any(msdu);
3772		return 0;
3773	}
3774
3775	rcu_read_lock();
3776	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3777		dev_kfree_skb_any(msdu);
3778		goto exit;
3779	}
3780
3781	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3782		dev_kfree_skb_any(msdu);
3783		goto exit;
3784	}
3785
3786	rx_desc = (struct hal_rx_desc *)msdu->data;
3787	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3788	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3789		hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3790		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3791		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3792				sizeof(struct ieee80211_hdr));
3793		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3794				sizeof(struct hal_rx_desc));
3795		dev_kfree_skb_any(msdu);
3796		goto exit;
3797	}
3798
3799	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3800
3801	if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3802		dev_kfree_skb_any(msdu);
3803		ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3804					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3805	}
3806exit:
3807	rcu_read_unlock();
3808	return 0;
3809}
3810
3811int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3812			     int budget)
3813{
3814	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3815	struct dp_link_desc_bank *link_desc_banks;
3816	enum hal_rx_buf_return_buf_manager rbm;
3817	int tot_n_bufs_reaped, quota, ret, i;
3818	int n_bufs_reaped[MAX_RADIOS] = {0};
3819	struct dp_rxdma_ring *rx_ring;
3820	struct dp_srng *reo_except;
3821	u32 desc_bank, num_msdus;
3822	struct hal_srng *srng;
3823	struct ath11k_dp *dp;
3824	void *link_desc_va;
3825	int buf_id, mac_id;
3826	struct ath11k *ar;
3827	dma_addr_t paddr;
3828	u32 *desc;
3829	bool is_frag;
3830	u8 drop = 0;
3831
3832	tot_n_bufs_reaped = 0;
3833	quota = budget;
3834
3835	dp = &ab->dp;
3836	reo_except = &dp->reo_except_ring;
3837	link_desc_banks = dp->link_desc_banks;
3838
3839	srng = &ab->hal.srng_list[reo_except->ring_id];
3840
3841	spin_lock_bh(&srng->lock);
3842
3843	ath11k_hal_srng_access_begin(ab, srng);
3844
3845	while (budget &&
3846	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3847		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3848
3849		ab->soc_stats.err_ring_pkts++;
3850		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3851						    &desc_bank);
3852		if (ret) {
3853			ath11k_warn(ab, "failed to parse error reo desc %d\n",
3854				    ret);
3855			continue;
3856		}
3857#if defined(__linux__)
3858		link_desc_va = link_desc_banks[desc_bank].vaddr +
3859#elif defined(__FreeBSD__)
3860		link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
3861#endif
3862			       (paddr - link_desc_banks[desc_bank].paddr);
3863		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3864						 &rbm);
3865		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3866		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3867			ab->soc_stats.invalid_rbm++;
3868			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3869			ath11k_dp_rx_link_desc_return(ab, desc,
3870						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3871			continue;
3872		}
3873
3874		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3875
3876		/* Process only rx fragments with one msdu per link desc below, and drop
3877		 * msdu's indicated due to error reasons.
3878		 */
3879		if (!is_frag || num_msdus > 1) {
3880			drop = 1;
3881			/* Return the link desc back to wbm idle list */
3882			ath11k_dp_rx_link_desc_return(ab, desc,
3883						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3884		}
3885
3886		for (i = 0; i < num_msdus; i++) {
3887			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3888					   msdu_cookies[i]);
3889
3890			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3891					   msdu_cookies[i]);
3892
3893			ar = ab->pdevs[mac_id].ar;
3894
3895			if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3896				n_bufs_reaped[mac_id]++;
3897				tot_n_bufs_reaped++;
3898			}
3899		}
3900
3901		if (tot_n_bufs_reaped >= quota) {
3902			tot_n_bufs_reaped = quota;
3903			goto exit;
3904		}
3905
3906		budget = quota - tot_n_bufs_reaped;
3907	}
3908
3909exit:
3910	ath11k_hal_srng_access_end(ab, srng);
3911
3912	spin_unlock_bh(&srng->lock);
3913
3914	for (i = 0; i <  ab->num_radios; i++) {
3915		if (!n_bufs_reaped[i])
3916			continue;
3917
3918		ar = ab->pdevs[i].ar;
3919		rx_ring = &ar->dp.rx_refill_buf_ring;
3920
3921		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3922					   ab->hw_params.hal_params->rx_buf_rbm);
3923	}
3924
3925	return tot_n_bufs_reaped;
3926}
3927
3928static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3929					     int msdu_len,
3930					     struct sk_buff_head *msdu_list)
3931{
3932	struct sk_buff *skb, *tmp;
3933	struct ath11k_skb_rxcb *rxcb;
3934	int n_buffs;
3935
3936	n_buffs = DIV_ROUND_UP(msdu_len,
3937			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3938
3939	skb_queue_walk_safe(msdu_list, skb, tmp) {
3940		rxcb = ATH11K_SKB_RXCB(skb);
3941		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3942		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3943			if (!n_buffs)
3944				break;
3945			__skb_unlink(skb, msdu_list);
3946			dev_kfree_skb_any(skb);
3947			n_buffs--;
3948		}
3949	}
3950}
3951
3952static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3953				      struct ieee80211_rx_status *status,
3954				      struct sk_buff_head *msdu_list)
3955{
3956	u16 msdu_len;
3957	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3958	struct rx_attention *rx_attention;
3959	u8 l3pad_bytes;
3960	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3961	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3962
3963	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3964
3965	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3966		/* First buffer will be freed by the caller, so deduct it's length */
3967		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3968		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3969		return -EINVAL;
3970	}
3971
3972	rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3973	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3974		ath11k_warn(ar->ab,
3975			    "msdu_done bit not set in null_q_des processing\n");
3976		__skb_queue_purge(msdu_list);
3977		return -EIO;
3978	}
3979
3980	/* Handle NULL queue descriptor violations arising out a missing
3981	 * REO queue for a given peer or a given TID. This typically
3982	 * may happen if a packet is received on a QOS enabled TID before the
3983	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3984	 * it may also happen for MC/BC frames if they are not routed to the
3985	 * non-QOS TID queue, in the absence of any other default TID queue.
3986	 * This error can show up both in a REO destination or WBM release ring.
3987	 */
3988
3989	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3990	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3991
3992	if (rxcb->is_frag) {
3993		skb_pull(msdu, hal_rx_desc_sz);
3994	} else {
3995		l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3996
3997		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3998			return -EINVAL;
3999
4000		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4001		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4002	}
4003	ath11k_dp_rx_h_ppdu(ar, desc, status);
4004
4005	ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
4006
4007	rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
4008
4009	/* Please note that caller will having the access to msdu and completing
4010	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
4011	 */
4012
4013	return 0;
4014}
4015
4016static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
4017				   struct ieee80211_rx_status *status,
4018				   struct sk_buff_head *msdu_list)
4019{
4020	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4021	bool drop = false;
4022
4023	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
4024
4025	switch (rxcb->err_code) {
4026	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
4027		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
4028			drop = true;
4029		break;
4030	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
4031		/* TODO: Do not drop PN failed packets in the driver;
4032		 * instead, it is good to drop such packets in mac80211
4033		 * after incrementing the replay counters.
4034		 */
4035		fallthrough;
4036	default:
4037		/* TODO: Review other errors and process them to mac80211
4038		 * as appropriate.
4039		 */
4040		drop = true;
4041		break;
4042	}
4043
4044	return drop;
4045}
4046
4047static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
4048					struct ieee80211_rx_status *status)
4049{
4050	u16 msdu_len;
4051	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4052	u8 l3pad_bytes;
4053	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4054	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4055
4056	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4057	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4058
4059	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4060	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4061	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4062	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4063
4064	ath11k_dp_rx_h_ppdu(ar, desc, status);
4065
4066	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4067			 RX_FLAG_DECRYPTED);
4068
4069	ath11k_dp_rx_h_undecap(ar, msdu, desc,
4070			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4071}
4072
4073static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
4074				     struct ieee80211_rx_status *status)
4075{
4076	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4077	bool drop = false;
4078
4079	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4080
4081	switch (rxcb->err_code) {
4082	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4083		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4084		break;
4085	default:
4086		/* TODO: Review other rxdma error code to check if anything is
4087		 * worth reporting to mac80211
4088		 */
4089		drop = true;
4090		break;
4091	}
4092
4093	return drop;
4094}
4095
4096static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4097				 struct napi_struct *napi,
4098				 struct sk_buff *msdu,
4099				 struct sk_buff_head *msdu_list)
4100{
4101	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4102	struct ieee80211_rx_status rxs = {0};
4103	bool drop = true;
4104
4105	switch (rxcb->err_rel_src) {
4106	case HAL_WBM_REL_SRC_MODULE_REO:
4107		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4108		break;
4109	case HAL_WBM_REL_SRC_MODULE_RXDMA:
4110		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4111		break;
4112	default:
4113		/* msdu will get freed */
4114		break;
4115	}
4116
4117	if (drop) {
4118		dev_kfree_skb_any(msdu);
4119		return;
4120	}
4121
4122	ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4123}
4124
4125int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4126				 struct napi_struct *napi, int budget)
4127{
4128	struct ath11k *ar;
4129	struct ath11k_dp *dp = &ab->dp;
4130	struct dp_rxdma_ring *rx_ring;
4131	struct hal_rx_wbm_rel_info err_info;
4132	struct hal_srng *srng;
4133	struct sk_buff *msdu;
4134	struct sk_buff_head msdu_list[MAX_RADIOS];
4135	struct ath11k_skb_rxcb *rxcb;
4136	u32 *rx_desc;
4137	int buf_id, mac_id;
4138	int num_buffs_reaped[MAX_RADIOS] = {0};
4139	int total_num_buffs_reaped = 0;
4140	int ret, i;
4141
4142	for (i = 0; i < ab->num_radios; i++)
4143		__skb_queue_head_init(&msdu_list[i]);
4144
4145	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4146
4147	spin_lock_bh(&srng->lock);
4148
4149	ath11k_hal_srng_access_begin(ab, srng);
4150
4151	while (budget) {
4152		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4153		if (!rx_desc)
4154			break;
4155
4156		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4157		if (ret) {
4158			ath11k_warn(ab,
4159				    "failed to parse rx error in wbm_rel ring desc %d\n",
4160				    ret);
4161			continue;
4162		}
4163
4164		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4165		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4166
4167		ar = ab->pdevs[mac_id].ar;
4168		rx_ring = &ar->dp.rx_refill_buf_ring;
4169
4170		spin_lock_bh(&rx_ring->idr_lock);
4171		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4172		if (!msdu) {
4173			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4174				    buf_id, mac_id);
4175			spin_unlock_bh(&rx_ring->idr_lock);
4176			continue;
4177		}
4178
4179		idr_remove(&rx_ring->bufs_idr, buf_id);
4180		spin_unlock_bh(&rx_ring->idr_lock);
4181
4182		rxcb = ATH11K_SKB_RXCB(msdu);
4183		dma_unmap_single(ab->dev, rxcb->paddr,
4184				 msdu->len + skb_tailroom(msdu),
4185				 DMA_FROM_DEVICE);
4186
4187		num_buffs_reaped[mac_id]++;
4188		total_num_buffs_reaped++;
4189		budget--;
4190
4191		if (err_info.push_reason !=
4192		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4193			dev_kfree_skb_any(msdu);
4194			continue;
4195		}
4196
4197		rxcb->err_rel_src = err_info.err_rel_src;
4198		rxcb->err_code = err_info.err_code;
4199		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4200		__skb_queue_tail(&msdu_list[mac_id], msdu);
4201	}
4202
4203	ath11k_hal_srng_access_end(ab, srng);
4204
4205	spin_unlock_bh(&srng->lock);
4206
4207	if (!total_num_buffs_reaped)
4208		goto done;
4209
4210	for (i = 0; i <  ab->num_radios; i++) {
4211		if (!num_buffs_reaped[i])
4212			continue;
4213
4214		ar = ab->pdevs[i].ar;
4215		rx_ring = &ar->dp.rx_refill_buf_ring;
4216
4217		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4218					   ab->hw_params.hal_params->rx_buf_rbm);
4219	}
4220
4221	rcu_read_lock();
4222	for (i = 0; i <  ab->num_radios; i++) {
4223		if (!rcu_dereference(ab->pdevs_active[i])) {
4224			__skb_queue_purge(&msdu_list[i]);
4225			continue;
4226		}
4227
4228		ar = ab->pdevs[i].ar;
4229
4230		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4231			__skb_queue_purge(&msdu_list[i]);
4232			continue;
4233		}
4234
4235		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4236			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4237	}
4238	rcu_read_unlock();
4239done:
4240	return total_num_buffs_reaped;
4241}
4242
4243int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4244{
4245	struct ath11k *ar;
4246	struct dp_srng *err_ring;
4247	struct dp_rxdma_ring *rx_ring;
4248	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4249	struct hal_srng *srng;
4250	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4251	enum hal_rx_buf_return_buf_manager rbm;
4252	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4253	struct ath11k_skb_rxcb *rxcb;
4254	struct sk_buff *skb;
4255	struct hal_reo_entrance_ring *entr_ring;
4256	void *desc;
4257	int num_buf_freed = 0;
4258	int quota = budget;
4259	dma_addr_t paddr;
4260	u32 desc_bank;
4261	void *link_desc_va;
4262	int num_msdus;
4263	int i;
4264	int buf_id;
4265
4266	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4267	err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4268									  mac_id)];
4269	rx_ring = &ar->dp.rx_refill_buf_ring;
4270
4271	srng = &ab->hal.srng_list[err_ring->ring_id];
4272
4273	spin_lock_bh(&srng->lock);
4274
4275	ath11k_hal_srng_access_begin(ab, srng);
4276
4277	while (quota-- &&
4278	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4279		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4280
4281		entr_ring = (struct hal_reo_entrance_ring *)desc;
4282		rxdma_err_code =
4283			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4284				  entr_ring->info1);
4285		ab->soc_stats.rxdma_error[rxdma_err_code]++;
4286
4287#if defined(__linux__)
4288		link_desc_va = link_desc_banks[desc_bank].vaddr +
4289#elif defined(__FreeBSD__)
4290		link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
4291#endif
4292			       (paddr - link_desc_banks[desc_bank].paddr);
4293		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4294						 msdu_cookies, &rbm);
4295
4296		for (i = 0; i < num_msdus; i++) {
4297			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4298					   msdu_cookies[i]);
4299
4300			spin_lock_bh(&rx_ring->idr_lock);
4301			skb = idr_find(&rx_ring->bufs_idr, buf_id);
4302			if (!skb) {
4303				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4304					    buf_id);
4305				spin_unlock_bh(&rx_ring->idr_lock);
4306				continue;
4307			}
4308
4309			idr_remove(&rx_ring->bufs_idr, buf_id);
4310			spin_unlock_bh(&rx_ring->idr_lock);
4311
4312			rxcb = ATH11K_SKB_RXCB(skb);
4313			dma_unmap_single(ab->dev, rxcb->paddr,
4314					 skb->len + skb_tailroom(skb),
4315					 DMA_FROM_DEVICE);
4316			dev_kfree_skb_any(skb);
4317
4318			num_buf_freed++;
4319		}
4320
4321		ath11k_dp_rx_link_desc_return(ab, desc,
4322					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4323	}
4324
4325	ath11k_hal_srng_access_end(ab, srng);
4326
4327	spin_unlock_bh(&srng->lock);
4328
4329	if (num_buf_freed)
4330		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4331					   ab->hw_params.hal_params->rx_buf_rbm);
4332
4333	return budget - quota;
4334}
4335
4336void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4337{
4338	struct ath11k_dp *dp = &ab->dp;
4339	struct hal_srng *srng;
4340	struct dp_reo_cmd *cmd, *tmp;
4341	bool found = false;
4342	u32 *reo_desc;
4343	u16 tag;
4344	struct hal_reo_status reo_status;
4345
4346	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4347
4348	memset(&reo_status, 0, sizeof(reo_status));
4349
4350	spin_lock_bh(&srng->lock);
4351
4352	ath11k_hal_srng_access_begin(ab, srng);
4353
4354	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4355		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4356
4357		switch (tag) {
4358		case HAL_REO_GET_QUEUE_STATS_STATUS:
4359			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4360							  &reo_status);
4361			break;
4362		case HAL_REO_FLUSH_QUEUE_STATUS:
4363			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4364							  &reo_status);
4365			break;
4366		case HAL_REO_FLUSH_CACHE_STATUS:
4367			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4368							  &reo_status);
4369			break;
4370		case HAL_REO_UNBLOCK_CACHE_STATUS:
4371			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4372							  &reo_status);
4373			break;
4374		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4375			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4376								 &reo_status);
4377			break;
4378		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4379			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4380								  &reo_status);
4381			break;
4382		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4383			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4384								  &reo_status);
4385			break;
4386		default:
4387			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4388			continue;
4389		}
4390
4391		spin_lock_bh(&dp->reo_cmd_lock);
4392		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4393			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4394				found = true;
4395				list_del(&cmd->list);
4396				break;
4397			}
4398		}
4399		spin_unlock_bh(&dp->reo_cmd_lock);
4400
4401		if (found) {
4402			cmd->handler(dp, (void *)&cmd->data,
4403				     reo_status.uniform_hdr.cmd_status);
4404			kfree(cmd);
4405		}
4406
4407		found = false;
4408	}
4409
4410	ath11k_hal_srng_access_end(ab, srng);
4411
4412	spin_unlock_bh(&srng->lock);
4413}
4414
4415void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4416{
4417	struct ath11k *ar = ab->pdevs[mac_id].ar;
4418
4419	ath11k_dp_rx_pdev_srng_free(ar);
4420	ath11k_dp_rxdma_pdev_buf_free(ar);
4421}
4422
4423int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4424{
4425	struct ath11k *ar = ab->pdevs[mac_id].ar;
4426	struct ath11k_pdev_dp *dp = &ar->dp;
4427	u32 ring_id;
4428	int i;
4429	int ret;
4430
4431	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4432	if (ret) {
4433		ath11k_warn(ab, "failed to setup rx srngs\n");
4434		return ret;
4435	}
4436
4437	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4438	if (ret) {
4439		ath11k_warn(ab, "failed to setup rxdma ring\n");
4440		return ret;
4441	}
4442
4443	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4444	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4445	if (ret) {
4446		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4447			    ret);
4448		return ret;
4449	}
4450
4451	if (ab->hw_params.rx_mac_buf_ring) {
4452		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4453			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4454			ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4455							  mac_id + i, HAL_RXDMA_BUF);
4456			if (ret) {
4457				ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4458					    i, ret);
4459				return ret;
4460			}
4461		}
4462	}
4463
4464	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4465		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4466		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4467						  mac_id + i, HAL_RXDMA_DST);
4468		if (ret) {
4469			ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4470				    i, ret);
4471			return ret;
4472		}
4473	}
4474
4475	if (!ab->hw_params.rxdma1_enable)
4476		goto config_refill_ring;
4477
4478	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4479	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4480					  mac_id, HAL_RXDMA_MONITOR_BUF);
4481	if (ret) {
4482		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4483			    ret);
4484		return ret;
4485	}
4486	ret = ath11k_dp_tx_htt_srng_setup(ab,
4487					  dp->rxdma_mon_dst_ring.ring_id,
4488					  mac_id, HAL_RXDMA_MONITOR_DST);
4489	if (ret) {
4490		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4491			    ret);
4492		return ret;
4493	}
4494	ret = ath11k_dp_tx_htt_srng_setup(ab,
4495					  dp->rxdma_mon_desc_ring.ring_id,
4496					  mac_id, HAL_RXDMA_MONITOR_DESC);
4497	if (ret) {
4498		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4499			    ret);
4500		return ret;
4501	}
4502
4503config_refill_ring:
4504	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4505		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4506		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4507						  HAL_RXDMA_MONITOR_STATUS);
4508		if (ret) {
4509			ath11k_warn(ab,
4510				    "failed to configure mon_status_refill_ring%d %d\n",
4511				    i, ret);
4512			return ret;
4513		}
4514	}
4515
4516	return 0;
4517}
4518
4519static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4520{
4521	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4522		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4523		*total_len -= *frag_len;
4524	} else {
4525		*frag_len = *total_len;
4526		*total_len = 0;
4527	}
4528}
4529
4530static
4531int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4532					  void *p_last_buf_addr_info,
4533					  u8 mac_id)
4534{
4535	struct ath11k_pdev_dp *dp = &ar->dp;
4536	struct dp_srng *dp_srng;
4537	void *hal_srng;
4538	void *src_srng_desc;
4539	int ret = 0;
4540
4541	if (ar->ab->hw_params.rxdma1_enable) {
4542		dp_srng = &dp->rxdma_mon_desc_ring;
4543		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4544	} else {
4545		dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4546		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4547	}
4548
4549	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4550
4551	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4552
4553	if (src_srng_desc) {
4554		struct ath11k_buffer_addr *src_desc =
4555				(struct ath11k_buffer_addr *)src_srng_desc;
4556
4557		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4558	} else {
4559		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4560			   "Monitor Link Desc Ring %d Full", mac_id);
4561		ret = -ENOMEM;
4562	}
4563
4564	ath11k_hal_srng_access_end(ar->ab, hal_srng);
4565	return ret;
4566}
4567
4568static
4569void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4570					 dma_addr_t *paddr, u32 *sw_cookie,
4571					 u8 *rbm,
4572					 void **pp_buf_addr_info)
4573{
4574	struct hal_rx_msdu_link *msdu_link =
4575			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
4576	struct ath11k_buffer_addr *buf_addr_info;
4577
4578	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4579
4580	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4581
4582	*pp_buf_addr_info = (void *)buf_addr_info;
4583}
4584
4585static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4586{
4587	if (skb->len > len) {
4588		skb_trim(skb, len);
4589	} else {
4590		if (skb_tailroom(skb) < len - skb->len) {
4591			if ((pskb_expand_head(skb, 0,
4592					      len - skb->len - skb_tailroom(skb),
4593					      GFP_ATOMIC))) {
4594				dev_kfree_skb_any(skb);
4595				return -ENOMEM;
4596			}
4597		}
4598		skb_put(skb, (len - skb->len));
4599	}
4600	return 0;
4601}
4602
4603static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4604					void *msdu_link_desc,
4605					struct hal_rx_msdu_list *msdu_list,
4606					u16 *num_msdus)
4607{
4608	struct hal_rx_msdu_details *msdu_details = NULL;
4609	struct rx_msdu_desc *msdu_desc_info = NULL;
4610	struct hal_rx_msdu_link *msdu_link = NULL;
4611	int i;
4612	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4613	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4614	u8  tmp  = 0;
4615
4616	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4617	msdu_details = &msdu_link->msdu_link[0];
4618
4619	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4620		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4621			      msdu_details[i].buf_addr_info.info0) == 0) {
4622			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4623			msdu_desc_info->info0 |= last;
4624			;
4625			break;
4626		}
4627		msdu_desc_info = &msdu_details[i].rx_msdu_info;
4628
4629		if (!i)
4630			msdu_desc_info->info0 |= first;
4631		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4632			msdu_desc_info->info0 |= last;
4633		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4634		msdu_list->msdu_info[i].msdu_len =
4635			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4636		msdu_list->sw_cookie[i] =
4637			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4638				  msdu_details[i].buf_addr_info.info1);
4639		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4640				msdu_details[i].buf_addr_info.info1);
4641		msdu_list->rbm[i] = tmp;
4642	}
4643	*num_msdus = i;
4644}
4645
4646static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4647					u32 *rx_bufs_used)
4648{
4649	u32 ret = 0;
4650
4651	if ((*ppdu_id < msdu_ppdu_id) &&
4652	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4653		*ppdu_id = msdu_ppdu_id;
4654		ret = msdu_ppdu_id;
4655	} else if ((*ppdu_id > msdu_ppdu_id) &&
4656		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4657		/* mon_dst is behind than mon_status
4658		 * skip dst_ring and free it
4659		 */
4660		*rx_bufs_used += 1;
4661		*ppdu_id = msdu_ppdu_id;
4662		ret = msdu_ppdu_id;
4663	}
4664	return ret;
4665}
4666
4667static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4668				      bool *is_frag, u32 *total_len,
4669				      u32 *frag_len, u32 *msdu_cnt)
4670{
4671	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4672		if (!*is_frag) {
4673			*total_len = info->msdu_len;
4674			*is_frag = true;
4675		}
4676		ath11k_dp_mon_set_frag_len(total_len,
4677					   frag_len);
4678	} else {
4679		if (*is_frag) {
4680			ath11k_dp_mon_set_frag_len(total_len,
4681						   frag_len);
4682		} else {
4683			*frag_len = info->msdu_len;
4684		}
4685		*is_frag = false;
4686		*msdu_cnt -= 1;
4687	}
4688}
4689
4690static u32
4691ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4692			  void *ring_entry, struct sk_buff **head_msdu,
4693			  struct sk_buff **tail_msdu, u32 *npackets,
4694			  u32 *ppdu_id)
4695{
4696	struct ath11k_pdev_dp *dp = &ar->dp;
4697	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4698	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4699	struct sk_buff *msdu = NULL, *last = NULL;
4700	struct hal_rx_msdu_list msdu_list;
4701	void *p_buf_addr_info, *p_last_buf_addr_info;
4702	struct hal_rx_desc *rx_desc;
4703	void *rx_msdu_link_desc;
4704	dma_addr_t paddr;
4705	u16 num_msdus = 0;
4706	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4707	u32 rx_bufs_used = 0, i = 0;
4708	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4709	u32 total_len = 0, frag_len = 0;
4710	bool is_frag, is_first_msdu;
4711	bool drop_mpdu = false;
4712	struct ath11k_skb_rxcb *rxcb;
4713	struct hal_reo_entrance_ring *ent_desc =
4714			(struct hal_reo_entrance_ring *)ring_entry;
4715	int buf_id;
4716	u32 rx_link_buf_info[2];
4717	u8 rbm;
4718
4719	if (!ar->ab->hw_params.rxdma1_enable)
4720		rx_ring = &dp->rx_refill_buf_ring;
4721
4722	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4723					    &sw_cookie,
4724					    &p_last_buf_addr_info, &rbm,
4725					    &msdu_cnt);
4726
4727	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4728		      ent_desc->info1) ==
4729		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4730		u8 rxdma_err =
4731			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4732				  ent_desc->info1);
4733		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4734		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4735		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4736			drop_mpdu = true;
4737			pmon->rx_mon_stats.dest_mpdu_drop++;
4738		}
4739	}
4740
4741	is_frag = false;
4742	is_first_msdu = true;
4743
4744	do {
4745		if (pmon->mon_last_linkdesc_paddr == paddr) {
4746			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4747			return rx_bufs_used;
4748		}
4749
4750		if (ar->ab->hw_params.rxdma1_enable)
4751			rx_msdu_link_desc =
4752#if defined(__linux__)
4753				(void *)pmon->link_desc_banks[sw_cookie].vaddr +
4754#elif defined(__FreeBSD__)
4755				(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
4756#endif
4757				(paddr - pmon->link_desc_banks[sw_cookie].paddr);
4758		else
4759			rx_msdu_link_desc =
4760#if defined(__linux__)
4761				(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4762#elif defined(__FreeBSD__)
4763				(u8 *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4764#endif
4765				(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4766
4767		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4768					    &num_msdus);
4769
4770		for (i = 0; i < num_msdus; i++) {
4771			u32 l2_hdr_offset;
4772
4773			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4774				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4775					   "i %d last_cookie %d is same\n",
4776					   i, pmon->mon_last_buf_cookie);
4777				drop_mpdu = true;
4778				pmon->rx_mon_stats.dup_mon_buf_cnt++;
4779				continue;
4780			}
4781			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4782					   msdu_list.sw_cookie[i]);
4783
4784			spin_lock_bh(&rx_ring->idr_lock);
4785			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4786			spin_unlock_bh(&rx_ring->idr_lock);
4787			if (!msdu) {
4788				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4789					   "msdu_pop: invalid buf_id %d\n", buf_id);
4790				break;
4791			}
4792			rxcb = ATH11K_SKB_RXCB(msdu);
4793			if (!rxcb->unmapped) {
4794				dma_unmap_single(ar->ab->dev, rxcb->paddr,
4795						 msdu->len +
4796						 skb_tailroom(msdu),
4797						 DMA_FROM_DEVICE);
4798				rxcb->unmapped = 1;
4799			}
4800			if (drop_mpdu) {
4801				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4802					   "i %d drop msdu %p *ppdu_id %x\n",
4803					   i, msdu, *ppdu_id);
4804				dev_kfree_skb_any(msdu);
4805				msdu = NULL;
4806				goto next_msdu;
4807			}
4808
4809			rx_desc = (struct hal_rx_desc *)msdu->data;
4810
4811			rx_pkt_offset = sizeof(struct hal_rx_desc);
4812			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4813
4814			if (is_first_msdu) {
4815				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4816					drop_mpdu = true;
4817					dev_kfree_skb_any(msdu);
4818					msdu = NULL;
4819					pmon->mon_last_linkdesc_paddr = paddr;
4820					goto next_msdu;
4821				}
4822
4823				msdu_ppdu_id =
4824					ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4825
4826				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4827								 ppdu_id,
4828								 &rx_bufs_used)) {
4829					if (rx_bufs_used) {
4830						drop_mpdu = true;
4831						dev_kfree_skb_any(msdu);
4832						msdu = NULL;
4833						goto next_msdu;
4834					}
4835					return rx_bufs_used;
4836				}
4837				pmon->mon_last_linkdesc_paddr = paddr;
4838				is_first_msdu = false;
4839			}
4840			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4841						  &is_frag, &total_len,
4842						  &frag_len, &msdu_cnt);
4843			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4844
4845			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4846
4847			if (!(*head_msdu))
4848				*head_msdu = msdu;
4849			else if (last)
4850				last->next = msdu;
4851
4852			last = msdu;
4853next_msdu:
4854			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4855			rx_bufs_used++;
4856			spin_lock_bh(&rx_ring->idr_lock);
4857			idr_remove(&rx_ring->bufs_idr, buf_id);
4858			spin_unlock_bh(&rx_ring->idr_lock);
4859		}
4860
4861		ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4862
4863		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4864						    &sw_cookie, &rbm,
4865						    &p_buf_addr_info);
4866
4867		if (ar->ab->hw_params.rxdma1_enable) {
4868			if (ath11k_dp_rx_monitor_link_desc_return(ar,
4869								  p_last_buf_addr_info,
4870								  dp->mac_id))
4871				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4872					   "dp_rx_monitor_link_desc_return failed");
4873		} else {
4874			ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4875						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4876		}
4877
4878		p_last_buf_addr_info = p_buf_addr_info;
4879
4880	} while (paddr && msdu_cnt);
4881
4882	if (last)
4883		last->next = NULL;
4884
4885	*tail_msdu = msdu;
4886
4887	if (msdu_cnt == 0)
4888		*npackets = 1;
4889
4890	return rx_bufs_used;
4891}
4892
4893static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4894{
4895	u32 rx_pkt_offset, l2_hdr_offset;
4896
4897	rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4898	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4899						      (struct hal_rx_desc *)msdu->data);
4900	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4901}
4902
4903static struct sk_buff *
4904ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4905			    u32 mac_id, struct sk_buff *head_msdu,
4906			    struct sk_buff *last_msdu,
4907			    struct ieee80211_rx_status *rxs, bool *fcs_err)
4908{
4909	struct ath11k_base *ab = ar->ab;
4910	struct sk_buff *msdu, *prev_buf;
4911	struct hal_rx_desc *rx_desc;
4912	char *hdr_desc;
4913	u8 *dest, decap_format;
4914	struct ieee80211_hdr_3addr *wh;
4915	struct rx_attention *rx_attention;
4916	u32 err_bitmap;
4917
4918	if (!head_msdu)
4919		goto err_merge_fail;
4920
4921	rx_desc = (struct hal_rx_desc *)head_msdu->data;
4922	rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4923	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4924
4925	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4926		*fcs_err = true;
4927
4928	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4929		return NULL;
4930
4931	decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4932
4933	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4934
4935	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4936		ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4937
4938		prev_buf = head_msdu;
4939		msdu = head_msdu->next;
4940
4941		while (msdu) {
4942			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4943
4944			prev_buf = msdu;
4945			msdu = msdu->next;
4946		}
4947
4948		prev_buf->next = NULL;
4949
4950		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4951	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4952		u8 qos_pkt = 0;
4953
4954		rx_desc = (struct hal_rx_desc *)head_msdu->data;
4955		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4956
4957		/* Base size */
4958		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4959
4960		if (ieee80211_is_data_qos(wh->frame_control))
4961			qos_pkt = 1;
4962
4963		msdu = head_msdu;
4964
4965		while (msdu) {
4966			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4967			if (qos_pkt) {
4968				dest = skb_push(msdu, sizeof(__le16));
4969				if (!dest)
4970					goto err_merge_fail;
4971				memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4972			}
4973			prev_buf = msdu;
4974			msdu = msdu->next;
4975		}
4976		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4977		if (!dest)
4978			goto err_merge_fail;
4979
4980		ath11k_dbg(ab, ATH11K_DBG_DATA,
4981			   "mpdu_buf %p mpdu_buf->len %u",
4982			   prev_buf, prev_buf->len);
4983	} else {
4984		ath11k_dbg(ab, ATH11K_DBG_DATA,
4985			   "decap format %d is not supported!\n",
4986			   decap_format);
4987		goto err_merge_fail;
4988	}
4989
4990	return head_msdu;
4991
4992err_merge_fail:
4993	return NULL;
4994}
4995
4996static void
4997ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4998				u8 *rtap_buf)
4999{
5000	u32 rtap_len = 0;
5001
5002	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5003	rtap_len += 2;
5004
5005	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5006	rtap_len += 2;
5007
5008	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5009	rtap_len += 2;
5010
5011	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5012	rtap_len += 2;
5013
5014	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5015	rtap_len += 2;
5016
5017	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5018}
5019
5020static void
5021ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
5022				   u8 *rtap_buf)
5023{
5024	u32 rtap_len = 0;
5025
5026	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5027	rtap_len += 2;
5028
5029	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5030	rtap_len += 2;
5031
5032	rtap_buf[rtap_len] = rx_status->he_RU[0];
5033	rtap_len += 1;
5034
5035	rtap_buf[rtap_len] = rx_status->he_RU[1];
5036	rtap_len += 1;
5037
5038	rtap_buf[rtap_len] = rx_status->he_RU[2];
5039	rtap_len += 1;
5040
5041	rtap_buf[rtap_len] = rx_status->he_RU[3];
5042}
5043
5044static void ath11k_update_radiotap(struct ath11k *ar,
5045				   struct hal_rx_mon_ppdu_info *ppduinfo,
5046				   struct sk_buff *mon_skb,
5047				   struct ieee80211_rx_status *rxs)
5048{
5049	struct ieee80211_supported_band *sband;
5050	u8 *ptr = NULL;
5051
5052	rxs->flag |= RX_FLAG_MACTIME_START;
5053	rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
5054
5055	if (ppduinfo->nss)
5056		rxs->nss = ppduinfo->nss;
5057
5058	if (ppduinfo->he_mu_flags) {
5059		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
5060		rxs->encoding = RX_ENC_HE;
5061		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
5062		ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
5063	} else if (ppduinfo->he_flags) {
5064		rxs->flag |= RX_FLAG_RADIOTAP_HE;
5065		rxs->encoding = RX_ENC_HE;
5066		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
5067		ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
5068		rxs->rate_idx = ppduinfo->rate;
5069	} else if (ppduinfo->vht_flags) {
5070		rxs->encoding = RX_ENC_VHT;
5071		rxs->rate_idx = ppduinfo->rate;
5072	} else if (ppduinfo->ht_flags) {
5073		rxs->encoding = RX_ENC_HT;
5074		rxs->rate_idx = ppduinfo->rate;
5075	} else {
5076		rxs->encoding = RX_ENC_LEGACY;
5077		sband = &ar->mac.sbands[rxs->band];
5078		rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5079							  ppduinfo->cck_flag);
5080	}
5081
5082	rxs->mactime = ppduinfo->tsft;
5083}
5084
5085static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5086				    struct sk_buff *head_msdu,
5087				    struct hal_rx_mon_ppdu_info *ppduinfo,
5088				    struct sk_buff *tail_msdu,
5089				    struct napi_struct *napi)
5090{
5091	struct ath11k_pdev_dp *dp = &ar->dp;
5092	struct sk_buff *mon_skb, *skb_next, *header;
5093	struct ieee80211_rx_status *rxs = &dp->rx_status;
5094	bool fcs_err = false;
5095
5096	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5097					      tail_msdu, rxs, &fcs_err);
5098
5099	if (!mon_skb)
5100		goto mon_deliver_fail;
5101
5102	header = mon_skb;
5103
5104	rxs->flag = 0;
5105
5106	if (fcs_err)
5107		rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5108
5109	do {
5110		skb_next = mon_skb->next;
5111		if (!skb_next)
5112			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5113		else
5114			rxs->flag |= RX_FLAG_AMSDU_MORE;
5115
5116		if (mon_skb == header) {
5117			header = NULL;
5118			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5119		} else {
5120			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5121		}
5122		rxs->flag |= RX_FLAG_ONLY_MONITOR;
5123		ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5124
5125		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5126		mon_skb = skb_next;
5127	} while (mon_skb);
5128	rxs->flag = 0;
5129
5130	return 0;
5131
5132mon_deliver_fail:
5133	mon_skb = head_msdu;
5134	while (mon_skb) {
5135		skb_next = mon_skb->next;
5136		dev_kfree_skb_any(mon_skb);
5137		mon_skb = skb_next;
5138	}
5139	return -EINVAL;
5140}
5141
5142/* The destination ring processing is stuck if the destination is not
5143 * moving while status ring moves 16 PPDU. The destination ring processing
5144 * skips this destination ring PPDU as a workaround.
5145 */
5146#define MON_DEST_RING_STUCK_MAX_CNT 16
5147
5148static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5149					  u32 quota, struct napi_struct *napi)
5150{
5151	struct ath11k_pdev_dp *dp = &ar->dp;
5152	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5153	const struct ath11k_hw_hal_params *hal_params;
5154	void *ring_entry;
5155	void *mon_dst_srng;
5156	u32 ppdu_id;
5157	u32 rx_bufs_used;
5158	u32 ring_id;
5159	struct ath11k_pdev_mon_stats *rx_mon_stats;
5160	u32	 npackets = 0;
5161	u32 mpdu_rx_bufs_used;
5162
5163	if (ar->ab->hw_params.rxdma1_enable)
5164		ring_id = dp->rxdma_mon_dst_ring.ring_id;
5165	else
5166		ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5167
5168	mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5169
5170	if (!mon_dst_srng) {
5171		ath11k_warn(ar->ab,
5172			    "HAL Monitor Destination Ring Init Failed -- %p",
5173			    mon_dst_srng);
5174		return;
5175	}
5176
5177	spin_lock_bh(&pmon->mon_lock);
5178
5179	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5180
5181	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5182	rx_bufs_used = 0;
5183	rx_mon_stats = &pmon->rx_mon_stats;
5184
5185	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5186		struct sk_buff *head_msdu, *tail_msdu;
5187
5188		head_msdu = NULL;
5189		tail_msdu = NULL;
5190
5191		mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5192							      &head_msdu,
5193							      &tail_msdu,
5194							      &npackets, &ppdu_id);
5195
5196		rx_bufs_used += mpdu_rx_bufs_used;
5197
5198		if (mpdu_rx_bufs_used) {
5199			dp->mon_dest_ring_stuck_cnt = 0;
5200		} else {
5201			dp->mon_dest_ring_stuck_cnt++;
5202			rx_mon_stats->dest_mon_not_reaped++;
5203		}
5204
5205		if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5206			rx_mon_stats->dest_mon_stuck++;
5207			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5208				   "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5209				   pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5210				   dp->mon_dest_ring_stuck_cnt,
5211				   rx_mon_stats->dest_mon_not_reaped,
5212				   rx_mon_stats->dest_mon_stuck);
5213			pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5214			continue;
5215		}
5216
5217		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5218			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5219			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5220				   "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5221				   ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5222				   rx_mon_stats->dest_mon_not_reaped,
5223				   rx_mon_stats->dest_mon_stuck);
5224			break;
5225		}
5226		if (head_msdu && tail_msdu) {
5227			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5228						 &pmon->mon_ppdu_info,
5229						 tail_msdu, napi);
5230			rx_mon_stats->dest_mpdu_done++;
5231		}
5232
5233		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5234								mon_dst_srng);
5235	}
5236	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5237
5238	spin_unlock_bh(&pmon->mon_lock);
5239
5240	if (rx_bufs_used) {
5241		rx_mon_stats->dest_ppdu_done++;
5242		hal_params = ar->ab->hw_params.hal_params;
5243
5244		if (ar->ab->hw_params.rxdma1_enable)
5245			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5246						   &dp->rxdma_mon_buf_ring,
5247						   rx_bufs_used,
5248						   hal_params->rx_buf_rbm);
5249		else
5250			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5251						   &dp->rx_refill_buf_ring,
5252						   rx_bufs_used,
5253						   hal_params->rx_buf_rbm);
5254	}
5255}
5256
5257int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5258				    struct napi_struct *napi, int budget)
5259{
5260	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5261	enum hal_rx_mon_status hal_status;
5262	struct sk_buff *skb;
5263	struct sk_buff_head skb_list;
5264	struct ath11k_peer *peer;
5265	struct ath11k_sta *arsta;
5266	int num_buffs_reaped = 0;
5267	u32 rx_buf_sz;
5268	u16 log_type;
5269	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5270	struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5271	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5272
5273	__skb_queue_head_init(&skb_list);
5274
5275	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5276							     &skb_list);
5277	if (!num_buffs_reaped)
5278		goto exit;
5279
5280	memset(ppdu_info, 0, sizeof(*ppdu_info));
5281	ppdu_info->peer_id = HAL_INVALID_PEERID;
5282
5283	while ((skb = __skb_dequeue(&skb_list))) {
5284		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5285			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5286			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5287		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5288			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5289			rx_buf_sz = DP_RX_BUFFER_SIZE;
5290		} else {
5291			log_type = ATH11K_PKTLOG_TYPE_INVALID;
5292			rx_buf_sz = 0;
5293		}
5294
5295		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5296			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5297
5298		memset(ppdu_info, 0, sizeof(*ppdu_info));
5299		ppdu_info->peer_id = HAL_INVALID_PEERID;
5300		hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5301
5302		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5303		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5304		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5305			rx_mon_stats->status_ppdu_done++;
5306			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5307			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5308			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5309		}
5310
5311		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5312		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5313			dev_kfree_skb_any(skb);
5314			continue;
5315		}
5316
5317		rcu_read_lock();
5318		spin_lock_bh(&ab->base_lock);
5319		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5320
5321		if (!peer || !peer->sta) {
5322			ath11k_dbg(ab, ATH11K_DBG_DATA,
5323				   "failed to find the peer with peer_id %d\n",
5324				   ppdu_info->peer_id);
5325			goto next_skb;
5326		}
5327
5328		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
5329		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5330
5331		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5332			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5333
5334next_skb:
5335		spin_unlock_bh(&ab->base_lock);
5336		rcu_read_unlock();
5337
5338		dev_kfree_skb_any(skb);
5339		memset(ppdu_info, 0, sizeof(*ppdu_info));
5340		ppdu_info->peer_id = HAL_INVALID_PEERID;
5341	}
5342exit:
5343	return num_buffs_reaped;
5344}
5345
5346static u32
5347ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5348			       void *ring_entry, struct sk_buff **head_msdu,
5349			       struct sk_buff **tail_msdu,
5350			       struct hal_sw_mon_ring_entries *sw_mon_entries)
5351{
5352	struct ath11k_pdev_dp *dp = &ar->dp;
5353	struct ath11k_mon_data *pmon = &dp->mon_data;
5354	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5355	struct sk_buff *msdu = NULL, *last = NULL;
5356	struct hal_sw_monitor_ring *sw_desc = ring_entry;
5357	struct hal_rx_msdu_list msdu_list;
5358	struct hal_rx_desc *rx_desc;
5359	struct ath11k_skb_rxcb *rxcb;
5360	void *rx_msdu_link_desc;
5361	void *p_buf_addr_info, *p_last_buf_addr_info;
5362	int buf_id, i = 0;
5363	u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5364	u32 rx_bufs_used = 0, msdu_cnt = 0;
5365	u32 total_len = 0, frag_len = 0, sw_cookie;
5366	u16 num_msdus = 0;
5367	u8 rxdma_err, rbm;
5368	bool is_frag, is_first_msdu;
5369	bool drop_mpdu = false;
5370
5371	ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5372
5373	sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5374	sw_mon_entries->end_of_ppdu = false;
5375	sw_mon_entries->drop_ppdu = false;
5376	p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5377	msdu_cnt = sw_mon_entries->msdu_cnt;
5378
5379	sw_mon_entries->end_of_ppdu =
5380		FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5381	if (sw_mon_entries->end_of_ppdu)
5382		return rx_bufs_used;
5383
5384	if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5385		      sw_desc->info0) ==
5386		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5387		rxdma_err =
5388			FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5389				  sw_desc->info0);
5390		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5391		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5392		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5393			pmon->rx_mon_stats.dest_mpdu_drop++;
5394			drop_mpdu = true;
5395		}
5396	}
5397
5398	is_frag = false;
5399	is_first_msdu = true;
5400
5401	do {
5402		rx_msdu_link_desc =
5403			(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5404			(sw_mon_entries->mon_dst_paddr -
5405			 pmon->link_desc_banks[sw_cookie].paddr);
5406
5407		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5408					    &num_msdus);
5409
5410		for (i = 0; i < num_msdus; i++) {
5411			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5412					   msdu_list.sw_cookie[i]);
5413
5414			spin_lock_bh(&rx_ring->idr_lock);
5415			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5416			if (!msdu) {
5417				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5418					   "full mon msdu_pop: invalid buf_id %d\n",
5419					    buf_id);
5420				spin_unlock_bh(&rx_ring->idr_lock);
5421				break;
5422			}
5423			idr_remove(&rx_ring->bufs_idr, buf_id);
5424			spin_unlock_bh(&rx_ring->idr_lock);
5425
5426			rxcb = ATH11K_SKB_RXCB(msdu);
5427			if (!rxcb->unmapped) {
5428				dma_unmap_single(ar->ab->dev, rxcb->paddr,
5429						 msdu->len +
5430						 skb_tailroom(msdu),
5431						 DMA_FROM_DEVICE);
5432				rxcb->unmapped = 1;
5433			}
5434			if (drop_mpdu) {
5435				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5436					   "full mon: i %d drop msdu %p *ppdu_id %x\n",
5437					   i, msdu, sw_mon_entries->ppdu_id);
5438				dev_kfree_skb_any(msdu);
5439				msdu_cnt--;
5440				goto next_msdu;
5441			}
5442
5443			rx_desc = (struct hal_rx_desc *)msdu->data;
5444
5445			rx_pkt_offset = sizeof(struct hal_rx_desc);
5446			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5447
5448			if (is_first_msdu) {
5449				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5450					drop_mpdu = true;
5451					dev_kfree_skb_any(msdu);
5452					msdu = NULL;
5453					goto next_msdu;
5454				}
5455				is_first_msdu = false;
5456			}
5457
5458			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5459						  &is_frag, &total_len,
5460						  &frag_len, &msdu_cnt);
5461
5462			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5463
5464			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5465
5466			if (!(*head_msdu))
5467				*head_msdu = msdu;
5468			else if (last)
5469				last->next = msdu;
5470
5471			last = msdu;
5472next_msdu:
5473			rx_bufs_used++;
5474		}
5475
5476		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5477						    &sw_mon_entries->mon_dst_paddr,
5478						    &sw_mon_entries->mon_dst_sw_cookie,
5479						    &rbm,
5480						    &p_buf_addr_info);
5481
5482		if (ath11k_dp_rx_monitor_link_desc_return(ar,
5483							  p_last_buf_addr_info,
5484							  dp->mac_id))
5485			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5486				   "full mon: dp_rx_monitor_link_desc_return failed\n");
5487
5488		p_last_buf_addr_info = p_buf_addr_info;
5489
5490	} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5491
5492	if (last)
5493		last->next = NULL;
5494
5495	*tail_msdu = msdu;
5496
5497	return rx_bufs_used;
5498}
5499
5500static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5501					      struct dp_full_mon_mpdu *mon_mpdu,
5502					      struct sk_buff *head,
5503					      struct sk_buff *tail)
5504{
5505	mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5506	if (!mon_mpdu)
5507		return -ENOMEM;
5508
5509	list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5510	mon_mpdu->head = head;
5511	mon_mpdu->tail = tail;
5512
5513	return 0;
5514}
5515
5516static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5517					    struct dp_full_mon_mpdu *mon_mpdu)
5518{
5519	struct dp_full_mon_mpdu *tmp;
5520	struct sk_buff *tmp_msdu, *skb_next;
5521
5522	if (list_empty(&dp->dp_full_mon_mpdu_list))
5523		return;
5524
5525	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5526		list_del(&mon_mpdu->list);
5527
5528		tmp_msdu = mon_mpdu->head;
5529		while (tmp_msdu) {
5530			skb_next = tmp_msdu->next;
5531			dev_kfree_skb_any(tmp_msdu);
5532			tmp_msdu = skb_next;
5533		}
5534
5535		kfree(mon_mpdu);
5536	}
5537}
5538
5539static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5540					      int mac_id,
5541					      struct ath11k_mon_data *pmon,
5542					      struct napi_struct *napi)
5543{
5544	struct ath11k_pdev_mon_stats *rx_mon_stats;
5545	struct dp_full_mon_mpdu *tmp;
5546	struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5547	struct sk_buff *head_msdu, *tail_msdu;
5548	struct ath11k_base *ab = ar->ab;
5549	struct ath11k_dp *dp = &ab->dp;
5550	int ret;
5551
5552	rx_mon_stats = &pmon->rx_mon_stats;
5553
5554	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5555		list_del(&mon_mpdu->list);
5556		head_msdu = mon_mpdu->head;
5557		tail_msdu = mon_mpdu->tail;
5558		if (head_msdu && tail_msdu) {
5559			ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5560						       &pmon->mon_ppdu_info,
5561						       tail_msdu, napi);
5562			rx_mon_stats->dest_mpdu_done++;
5563			ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5564		}
5565		kfree(mon_mpdu);
5566	}
5567
5568	return ret;
5569}
5570
5571static int
5572ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5573					  struct napi_struct *napi, int budget)
5574{
5575	struct ath11k *ar = ab->pdevs[mac_id].ar;
5576	struct ath11k_pdev_dp *dp = &ar->dp;
5577	struct ath11k_mon_data *pmon = &dp->mon_data;
5578	struct hal_sw_mon_ring_entries *sw_mon_entries;
5579	int quota = 0, work = 0, count;
5580
5581	sw_mon_entries = &pmon->sw_mon_entries;
5582
5583	while (pmon->hold_mon_dst_ring) {
5584		quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5585							napi, 1);
5586		if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5587			count = sw_mon_entries->status_buf_count;
5588			if (count > 1) {
5589				quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5590									 napi, count);
5591			}
5592
5593			ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5594							   pmon, napi);
5595			pmon->hold_mon_dst_ring = false;
5596		} else if (!pmon->mon_status_paddr ||
5597			   pmon->buf_state == DP_MON_STATUS_LEAD) {
5598			sw_mon_entries->drop_ppdu = true;
5599			pmon->hold_mon_dst_ring = false;
5600		}
5601
5602		if (!quota)
5603			break;
5604
5605		work += quota;
5606	}
5607
5608	if (sw_mon_entries->drop_ppdu)
5609		ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5610
5611	return work;
5612}
5613
5614static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5615					 struct napi_struct *napi, int budget)
5616{
5617	struct ath11k *ar = ab->pdevs[mac_id].ar;
5618	struct ath11k_pdev_dp *dp = &ar->dp;
5619	struct ath11k_mon_data *pmon = &dp->mon_data;
5620	struct hal_sw_mon_ring_entries *sw_mon_entries;
5621	struct ath11k_pdev_mon_stats *rx_mon_stats;
5622	struct sk_buff *head_msdu, *tail_msdu;
5623	void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5624	void *ring_entry;
5625	u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5626	int quota = 0, ret;
5627	bool break_dst_ring = false;
5628
5629	spin_lock_bh(&pmon->mon_lock);
5630
5631	sw_mon_entries = &pmon->sw_mon_entries;
5632	rx_mon_stats = &pmon->rx_mon_stats;
5633
5634	if (pmon->hold_mon_dst_ring) {
5635		spin_unlock_bh(&pmon->mon_lock);
5636		goto reap_status_ring;
5637	}
5638
5639	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5640	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5641		head_msdu = NULL;
5642		tail_msdu = NULL;
5643
5644		mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5645								   &head_msdu,
5646								   &tail_msdu,
5647								   sw_mon_entries);
5648		rx_bufs_used += mpdu_rx_bufs_used;
5649
5650		if (!sw_mon_entries->end_of_ppdu) {
5651			if (head_msdu) {
5652				ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5653									 pmon->mon_mpdu,
5654									 head_msdu,
5655									 tail_msdu);
5656				if (ret)
5657					break_dst_ring = true;
5658			}
5659
5660			goto next_entry;
5661		} else {
5662			if (!sw_mon_entries->ppdu_id &&
5663			    !sw_mon_entries->mon_status_paddr) {
5664				break_dst_ring = true;
5665				goto next_entry;
5666			}
5667		}
5668
5669		rx_mon_stats->dest_ppdu_done++;
5670		pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5671		pmon->buf_state = DP_MON_STATUS_LAG;
5672		pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5673		pmon->hold_mon_dst_ring = true;
5674next_entry:
5675		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5676								mon_dst_srng);
5677		if (break_dst_ring)
5678			break;
5679	}
5680
5681	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5682	spin_unlock_bh(&pmon->mon_lock);
5683
5684	if (rx_bufs_used) {
5685		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5686					   &dp->rxdma_mon_buf_ring,
5687					   rx_bufs_used,
5688					   HAL_RX_BUF_RBM_SW3_BM);
5689	}
5690
5691reap_status_ring:
5692	quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5693							  napi, budget);
5694
5695	return quota;
5696}
5697
5698int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5699				   struct napi_struct *napi, int budget)
5700{
5701	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5702	int ret = 0;
5703
5704	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5705	    ab->hw_params.full_monitor_mode)
5706		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5707	else
5708		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5709
5710	return ret;
5711}
5712
5713static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5714{
5715	struct ath11k_pdev_dp *dp = &ar->dp;
5716	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5717
5718	skb_queue_head_init(&pmon->rx_status_q);
5719
5720	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5721
5722	memset(&pmon->rx_mon_stats, 0,
5723	       sizeof(pmon->rx_mon_stats));
5724	return 0;
5725}
5726
5727int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5728{
5729	struct ath11k_pdev_dp *dp = &ar->dp;
5730	struct ath11k_mon_data *pmon = &dp->mon_data;
5731	struct hal_srng *mon_desc_srng = NULL;
5732	struct dp_srng *dp_srng;
5733	int ret = 0;
5734	u32 n_link_desc = 0;
5735
5736	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5737	if (ret) {
5738		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5739		return ret;
5740	}
5741
5742	/* if rxdma1_enable is false, no need to setup
5743	 * rxdma_mon_desc_ring.
5744	 */
5745	if (!ar->ab->hw_params.rxdma1_enable)
5746		return 0;
5747
5748	dp_srng = &dp->rxdma_mon_desc_ring;
5749	n_link_desc = dp_srng->size /
5750		ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5751	mon_desc_srng =
5752		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5753
5754	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5755					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5756					n_link_desc);
5757	if (ret) {
5758		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5759		return ret;
5760	}
5761	pmon->mon_last_linkdesc_paddr = 0;
5762	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5763	spin_lock_init(&pmon->mon_lock);
5764
5765	return 0;
5766}
5767
5768static int ath11k_dp_mon_link_free(struct ath11k *ar)
5769{
5770	struct ath11k_pdev_dp *dp = &ar->dp;
5771	struct ath11k_mon_data *pmon = &dp->mon_data;
5772
5773	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5774				    HAL_RXDMA_MONITOR_DESC,
5775				    &dp->rxdma_mon_desc_ring);
5776	return 0;
5777}
5778
5779int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5780{
5781	ath11k_dp_mon_link_free(ar);
5782	return 0;
5783}
5784
5785int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5786{
5787	/* start reap timer */
5788	mod_timer(&ab->mon_reap_timer,
5789		  jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5790
5791	return 0;
5792}
5793
5794int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5795{
5796	int ret;
5797
5798	if (stop_timer)
5799		del_timer_sync(&ab->mon_reap_timer);
5800
5801	/* reap all the monitor related rings */
5802	ret = ath11k_dp_purge_mon_ring(ab);
5803	if (ret) {
5804		ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5805		return ret;
5806	}
5807
5808	return 0;
5809}
5810