1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/ieee80211.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <crypto/hash.h>
11#include "core.h"
12#include "debug.h"
13#include "hal_desc.h"
14#include "hw.h"
15#include "dp_rx.h"
16#include "hal_rx.h"
17#include "dp_tx.h"
18#include "peer.h"
19#include "dp_mon.h"
20
21#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
24						    struct hal_rx_desc *desc)
25{
26	if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
27		return HAL_ENCRYPT_TYPE_OPEN;
28
29	return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
30}
31
32u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
33			     struct hal_rx_desc *desc)
34{
35	return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
36}
37
38static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
39					  struct hal_rx_desc *desc)
40{
41	return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
42}
43
44static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
45					  struct hal_rx_desc *desc)
46{
47	return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
48}
49
50static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
51				    struct hal_rx_desc *desc)
52{
53	return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
54}
55
56static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
57				      struct sk_buff *skb)
58{
59	struct ieee80211_hdr *hdr;
60
61	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
62	return ieee80211_has_morefrags(hdr->frame_control);
63}
64
65static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
66				  struct sk_buff *skb)
67{
68	struct ieee80211_hdr *hdr;
69
70	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
71	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
72}
73
74static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
75				 struct hal_rx_desc *desc)
76{
77	return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
78}
79
80static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
81				     struct hal_rx_desc *desc)
82{
83	return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
84}
85
86static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
87					 struct hal_rx_desc *desc)
88{
89	return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
90}
91
92static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
93					 struct hal_rx_desc *desc)
94{
95	return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
96}
97
98static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
99					struct hal_rx_desc *desc)
100{
101	return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
102}
103
104u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
105			    struct hal_rx_desc *desc)
106{
107	return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
108}
109
110static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
111				   struct hal_rx_desc *desc)
112{
113	return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
114}
115
116static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
117			     struct hal_rx_desc *desc)
118{
119	return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
120}
121
122static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
123				  struct hal_rx_desc *desc)
124{
125	return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
126}
127
128static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
129			       struct hal_rx_desc *desc)
130{
131	return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
132}
133
134static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
135			       struct hal_rx_desc *desc)
136{
137	return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
138}
139
140static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
141				  struct hal_rx_desc *desc)
142{
143	return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
144}
145
146static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
147			     struct hal_rx_desc *desc)
148{
149	return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
150}
151
152static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
153			     struct hal_rx_desc *desc)
154{
155	return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
156}
157
158static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
159				  struct hal_rx_desc *desc)
160{
161	return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
162}
163
164u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
165			struct hal_rx_desc *desc)
166{
167	return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
168}
169
170static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
171				      struct hal_rx_desc *desc)
172{
173	return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
174}
175
176static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
177				     struct hal_rx_desc *desc)
178{
179	return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
180}
181
182static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
183					   struct hal_rx_desc *fdesc,
184					   struct hal_rx_desc *ldesc)
185{
186	ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
187}
188
189static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
190					  struct hal_rx_desc *desc,
191					  u16 len)
192{
193	ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
194}
195
196static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
197				      struct hal_rx_desc *desc)
198{
199	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
200		ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
201}
202
203static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
204					     struct hal_rx_desc *desc)
205{
206	return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
207}
208
209static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
210						 struct hal_rx_desc *desc)
211{
212	return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
213}
214
215static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
216					    struct hal_rx_desc *desc,
217					    struct ieee80211_hdr *hdr)
218{
219	ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
220}
221
222static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
223						struct hal_rx_desc *desc,
224						u8 *crypto_hdr,
225						enum hal_encrypt_type enctype)
226{
227	ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
228}
229
230static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
231						struct hal_rx_desc *desc)
232{
233	return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
234}
235
236static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
237{
238	int i, reaped = 0;
239	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
240
241	do {
242		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
243			reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
244							     DP_MON_SERVICE_BUDGET,
245							     ATH12K_DP_RX_MONITOR_MODE);
246
247		/* nothing more to reap */
248		if (reaped < DP_MON_SERVICE_BUDGET)
249			return 0;
250
251	} while (time_before(jiffies, timeout));
252
253	ath12k_warn(ab, "dp mon ring purge timeout");
254
255	return -ETIMEDOUT;
256}
257
258/* Returns number of Rx buffers replenished */
259int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
260				struct dp_rxdma_ring *rx_ring,
261				int req_entries,
262				enum hal_rx_buf_return_buf_manager mgr,
263				bool hw_cc)
264{
265	struct ath12k_buffer_addr *desc;
266	struct hal_srng *srng;
267	struct sk_buff *skb;
268	int num_free;
269	int num_remain;
270	int buf_id;
271	u32 cookie;
272	dma_addr_t paddr;
273	struct ath12k_dp *dp = &ab->dp;
274	struct ath12k_rx_desc_info *rx_desc;
275
276	req_entries = min(req_entries, rx_ring->bufs_max);
277
278	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
279
280	spin_lock_bh(&srng->lock);
281
282	ath12k_hal_srng_access_begin(ab, srng);
283
284	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
285	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
286		req_entries = num_free;
287
288	req_entries = min(num_free, req_entries);
289	num_remain = req_entries;
290
291	while (num_remain > 0) {
292		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
293				    DP_RX_BUFFER_ALIGN_SIZE);
294		if (!skb)
295			break;
296
297		if (!IS_ALIGNED((unsigned long)skb->data,
298				DP_RX_BUFFER_ALIGN_SIZE)) {
299			skb_pull(skb,
300				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
301				 skb->data);
302		}
303
304		paddr = dma_map_single(ab->dev, skb->data,
305				       skb->len + skb_tailroom(skb),
306				       DMA_FROM_DEVICE);
307		if (dma_mapping_error(ab->dev, paddr))
308			goto fail_free_skb;
309
310		if (hw_cc) {
311			spin_lock_bh(&dp->rx_desc_lock);
312
313			/* Get desc from free list and store in used list
314			 * for cleanup purposes
315			 *
316			 * TODO: pass the removed descs rather than
317			 * add/read to optimize
318			 */
319			rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
320							   struct ath12k_rx_desc_info,
321							   list);
322			if (!rx_desc) {
323				spin_unlock_bh(&dp->rx_desc_lock);
324				goto fail_dma_unmap;
325			}
326
327			rx_desc->skb = skb;
328			cookie = rx_desc->cookie;
329			list_del(&rx_desc->list);
330			list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
331
332			spin_unlock_bh(&dp->rx_desc_lock);
333		} else {
334			spin_lock_bh(&rx_ring->idr_lock);
335			buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
336					   rx_ring->bufs_max * 3, GFP_ATOMIC);
337			spin_unlock_bh(&rx_ring->idr_lock);
338			if (buf_id < 0)
339				goto fail_dma_unmap;
340			cookie = u32_encode_bits(mac_id,
341						 DP_RXDMA_BUF_COOKIE_PDEV_ID) |
342				 u32_encode_bits(buf_id,
343						 DP_RXDMA_BUF_COOKIE_BUF_ID);
344		}
345
346		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
347		if (!desc)
348			goto fail_buf_unassign;
349
350		ATH12K_SKB_RXCB(skb)->paddr = paddr;
351
352		num_remain--;
353
354		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
355	}
356
357	ath12k_hal_srng_access_end(ab, srng);
358
359	spin_unlock_bh(&srng->lock);
360
361	return req_entries - num_remain;
362
363fail_buf_unassign:
364	if (hw_cc) {
365		spin_lock_bh(&dp->rx_desc_lock);
366		list_del(&rx_desc->list);
367		list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
368		rx_desc->skb = NULL;
369		spin_unlock_bh(&dp->rx_desc_lock);
370	} else {
371		spin_lock_bh(&rx_ring->idr_lock);
372		idr_remove(&rx_ring->bufs_idr, buf_id);
373		spin_unlock_bh(&rx_ring->idr_lock);
374	}
375fail_dma_unmap:
376	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
377			 DMA_FROM_DEVICE);
378fail_free_skb:
379	dev_kfree_skb_any(skb);
380
381	ath12k_hal_srng_access_end(ab, srng);
382
383	spin_unlock_bh(&srng->lock);
384
385	return req_entries - num_remain;
386}
387
388static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
389					 struct dp_rxdma_ring *rx_ring)
390{
391	struct sk_buff *skb;
392	int buf_id;
393
394	spin_lock_bh(&rx_ring->idr_lock);
395	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
396		idr_remove(&rx_ring->bufs_idr, buf_id);
397		/* TODO: Understand where internal driver does this dma_unmap
398		 * of rxdma_buffer.
399		 */
400		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
401				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
402		dev_kfree_skb_any(skb);
403	}
404
405	idr_destroy(&rx_ring->bufs_idr);
406	spin_unlock_bh(&rx_ring->idr_lock);
407
408	return 0;
409}
410
411static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
412{
413	struct ath12k_dp *dp = &ab->dp;
414	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
415
416	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
417
418	rx_ring = &dp->rxdma_mon_buf_ring;
419	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
420
421	rx_ring = &dp->tx_mon_buf_ring;
422	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
423
424	return 0;
425}
426
427static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
428					  struct dp_rxdma_ring *rx_ring,
429					  u32 ringtype)
430{
431	int num_entries;
432
433	num_entries = rx_ring->refill_buf_ring.size /
434		ath12k_hal_srng_get_entrysize(ab, ringtype);
435
436	rx_ring->bufs_max = num_entries;
437	if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
438		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
439	else
440		ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
441					    ab->hw_params->hal_params->rx_buf_rbm,
442					    ringtype == HAL_RXDMA_BUF);
443	return 0;
444}
445
446static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
447{
448	struct ath12k_dp *dp = &ab->dp;
449	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
450	int ret;
451
452	ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
453					     HAL_RXDMA_BUF);
454	if (ret) {
455		ath12k_warn(ab,
456			    "failed to setup HAL_RXDMA_BUF\n");
457		return ret;
458	}
459
460	if (ab->hw_params->rxdma1_enable) {
461		rx_ring = &dp->rxdma_mon_buf_ring;
462		ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
463						     HAL_RXDMA_MONITOR_BUF);
464		if (ret) {
465			ath12k_warn(ab,
466				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
467			return ret;
468		}
469
470		rx_ring = &dp->tx_mon_buf_ring;
471		ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
472						     HAL_TX_MONITOR_BUF);
473		if (ret) {
474			ath12k_warn(ab,
475				    "failed to setup HAL_TX_MONITOR_BUF\n");
476			return ret;
477		}
478	}
479
480	return 0;
481}
482
483static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
484{
485	struct ath12k_pdev_dp *dp = &ar->dp;
486	struct ath12k_base *ab = ar->ab;
487	int i;
488
489	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
490		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
491		ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
492	}
493}
494
495void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
496{
497	struct ath12k_dp *dp = &ab->dp;
498	int i;
499
500	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
501		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
502}
503
504int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
505{
506	struct ath12k_dp *dp = &ab->dp;
507	int ret;
508	int i;
509
510	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
511		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
512					   HAL_REO_DST, i, 0,
513					   DP_REO_DST_RING_SIZE);
514		if (ret) {
515			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
516			goto err_reo_cleanup;
517		}
518	}
519
520	return 0;
521
522err_reo_cleanup:
523	ath12k_dp_rx_pdev_reo_cleanup(ab);
524
525	return ret;
526}
527
528static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
529{
530	struct ath12k_pdev_dp *dp = &ar->dp;
531	struct ath12k_base *ab = ar->ab;
532	int i;
533	int ret;
534	u32 mac_id = dp->mac_id;
535
536	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
537		ret = ath12k_dp_srng_setup(ar->ab,
538					   &dp->rxdma_mon_dst_ring[i],
539					   HAL_RXDMA_MONITOR_DST,
540					   0, mac_id + i,
541					   DP_RXDMA_MONITOR_DST_RING_SIZE);
542		if (ret) {
543			ath12k_warn(ar->ab,
544				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
545			return ret;
546		}
547
548		ret = ath12k_dp_srng_setup(ar->ab,
549					   &dp->tx_mon_dst_ring[i],
550					   HAL_TX_MONITOR_DST,
551					   0, mac_id + i,
552					   DP_TX_MONITOR_DEST_RING_SIZE);
553		if (ret) {
554			ath12k_warn(ar->ab,
555				    "failed to setup HAL_TX_MONITOR_DST\n");
556			return ret;
557		}
558	}
559
560	return 0;
561}
562
563void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
564{
565	struct ath12k_dp *dp = &ab->dp;
566	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
567	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
568
569	spin_lock_bh(&dp->reo_cmd_lock);
570	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
571		list_del(&cmd->list);
572		dma_unmap_single(ab->dev, cmd->data.paddr,
573				 cmd->data.size, DMA_BIDIRECTIONAL);
574		kfree(cmd->data.vaddr);
575		kfree(cmd);
576	}
577
578	list_for_each_entry_safe(cmd_cache, tmp_cache,
579				 &dp->reo_cmd_cache_flush_list, list) {
580		list_del(&cmd_cache->list);
581		dp->reo_cmd_cache_flush_count--;
582		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
583				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
584		kfree(cmd_cache->data.vaddr);
585		kfree(cmd_cache);
586	}
587	spin_unlock_bh(&dp->reo_cmd_lock);
588}
589
590static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
591				   enum hal_reo_cmd_status status)
592{
593	struct ath12k_dp_rx_tid *rx_tid = ctx;
594
595	if (status != HAL_REO_CMD_SUCCESS)
596		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
597			    rx_tid->tid, status);
598
599	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
600			 DMA_BIDIRECTIONAL);
601	kfree(rx_tid->vaddr);
602	rx_tid->vaddr = NULL;
603}
604
605static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
606				  enum hal_reo_cmd_type type,
607				  struct ath12k_hal_reo_cmd *cmd,
608				  void (*cb)(struct ath12k_dp *dp, void *ctx,
609					     enum hal_reo_cmd_status status))
610{
611	struct ath12k_dp *dp = &ab->dp;
612	struct ath12k_dp_rx_reo_cmd *dp_cmd;
613	struct hal_srng *cmd_ring;
614	int cmd_num;
615
616	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
617	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
618
619	/* cmd_num should start from 1, during failure return the error code */
620	if (cmd_num < 0)
621		return cmd_num;
622
623	/* reo cmd ring descriptors has cmd_num starting from 1 */
624	if (cmd_num == 0)
625		return -EINVAL;
626
627	if (!cb)
628		return 0;
629
630	/* Can this be optimized so that we keep the pending command list only
631	 * for tid delete command to free up the resource on the command status
632	 * indication?
633	 */
634	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
635
636	if (!dp_cmd)
637		return -ENOMEM;
638
639	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
640	dp_cmd->cmd_num = cmd_num;
641	dp_cmd->handler = cb;
642
643	spin_lock_bh(&dp->reo_cmd_lock);
644	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
645	spin_unlock_bh(&dp->reo_cmd_lock);
646
647	return 0;
648}
649
650static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
651				      struct ath12k_dp_rx_tid *rx_tid)
652{
653	struct ath12k_hal_reo_cmd cmd = {0};
654	unsigned long tot_desc_sz, desc_sz;
655	int ret;
656
657	tot_desc_sz = rx_tid->size;
658	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
659
660	while (tot_desc_sz > desc_sz) {
661		tot_desc_sz -= desc_sz;
662		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
663		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
664		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
665					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
666					     NULL);
667		if (ret)
668			ath12k_warn(ab,
669				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
670				    rx_tid->tid, ret);
671	}
672
673	memset(&cmd, 0, sizeof(cmd));
674	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
675	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
676	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
677	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
678				     HAL_REO_CMD_FLUSH_CACHE,
679				     &cmd, ath12k_dp_reo_cmd_free);
680	if (ret) {
681		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
682			   rx_tid->tid, ret);
683		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
684				 DMA_BIDIRECTIONAL);
685		kfree(rx_tid->vaddr);
686		rx_tid->vaddr = NULL;
687	}
688}
689
690static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
691				      enum hal_reo_cmd_status status)
692{
693	struct ath12k_base *ab = dp->ab;
694	struct ath12k_dp_rx_tid *rx_tid = ctx;
695	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
696
697	if (status == HAL_REO_CMD_DRAIN) {
698		goto free_desc;
699	} else if (status != HAL_REO_CMD_SUCCESS) {
700		/* Shouldn't happen! Cleanup in case of other failure? */
701		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
702			    rx_tid->tid, status);
703		return;
704	}
705
706	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
707	if (!elem)
708		goto free_desc;
709
710	elem->ts = jiffies;
711	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
712
713	spin_lock_bh(&dp->reo_cmd_lock);
714	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
715	dp->reo_cmd_cache_flush_count++;
716
717	/* Flush and invalidate aged REO desc from HW cache */
718	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
719				 list) {
720		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
721		    time_after(jiffies, elem->ts +
722			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
723			list_del(&elem->list);
724			dp->reo_cmd_cache_flush_count--;
725
726			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
727			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
728			 * is used in only two contexts, one is in this function called
729			 * from napi and the other in ath12k_dp_free during core destroy.
730			 * Before dp_free, the irqs would be disabled and would wait to
731			 * synchronize. Hence there wouldn���t be any race against add or
732			 * delete to this list. Hence unlock-lock is safe here.
733			 */
734			spin_unlock_bh(&dp->reo_cmd_lock);
735
736			ath12k_dp_reo_cache_flush(ab, &elem->data);
737			kfree(elem);
738			spin_lock_bh(&dp->reo_cmd_lock);
739		}
740	}
741	spin_unlock_bh(&dp->reo_cmd_lock);
742
743	return;
744free_desc:
745	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
746			 DMA_BIDIRECTIONAL);
747	kfree(rx_tid->vaddr);
748	rx_tid->vaddr = NULL;
749}
750
751static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
752					  dma_addr_t paddr)
753{
754	struct ath12k_reo_queue_ref *qref;
755	struct ath12k_dp *dp = &ab->dp;
756
757	if (!ab->hw_params->reoq_lut_support)
758		return;
759
760	/* TODO: based on ML peer or not, select the LUT. below assumes non
761	 * ML peer
762	 */
763	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
764			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
765
766	qref->info0 = u32_encode_bits(lower_32_bits(paddr),
767				      BUFFER_ADDR_INFO0_ADDR);
768	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
769				      BUFFER_ADDR_INFO1_ADDR) |
770		      u32_encode_bits(tid, DP_REO_QREF_NUM);
771}
772
773static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
774{
775	struct ath12k_reo_queue_ref *qref;
776	struct ath12k_dp *dp = &ab->dp;
777
778	if (!ab->hw_params->reoq_lut_support)
779		return;
780
781	/* TODO: based on ML peer or not, select the LUT. below assumes non
782	 * ML peer
783	 */
784	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
785			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
786
787	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
788	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
789		      u32_encode_bits(tid, DP_REO_QREF_NUM);
790}
791
792void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
793				  struct ath12k_peer *peer, u8 tid)
794{
795	struct ath12k_hal_reo_cmd cmd = {0};
796	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
797	int ret;
798
799	if (!rx_tid->active)
800		return;
801
802	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
803	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
804	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
805	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
806	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
807				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
808				     ath12k_dp_rx_tid_del_func);
809	if (ret) {
810		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
811			   tid, ret);
812		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
813				 DMA_BIDIRECTIONAL);
814		kfree(rx_tid->vaddr);
815		rx_tid->vaddr = NULL;
816	}
817
818	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
819
820	rx_tid->active = false;
821}
822
823/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
824 * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
825 * that.
826 */
827static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
828					 struct hal_reo_dest_ring *ring,
829					 enum hal_wbm_rel_bm_act action)
830{
831	struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
832	struct hal_wbm_release_ring *desc;
833	struct ath12k_dp *dp = &ab->dp;
834	struct hal_srng *srng;
835	int ret = 0;
836
837	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
838
839	spin_lock_bh(&srng->lock);
840
841	ath12k_hal_srng_access_begin(ab, srng);
842
843	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
844	if (!desc) {
845		ret = -ENOBUFS;
846		goto exit;
847	}
848
849	ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
850
851exit:
852	ath12k_hal_srng_access_end(ab, srng);
853
854	spin_unlock_bh(&srng->lock);
855
856	return ret;
857}
858
859static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
860				       bool rel_link_desc)
861{
862	struct ath12k_base *ab = rx_tid->ab;
863
864	lockdep_assert_held(&ab->base_lock);
865
866	if (rx_tid->dst_ring_desc) {
867		if (rel_link_desc)
868			ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
869						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
870		kfree(rx_tid->dst_ring_desc);
871		rx_tid->dst_ring_desc = NULL;
872	}
873
874	rx_tid->cur_sn = 0;
875	rx_tid->last_frag_no = 0;
876	rx_tid->rx_frag_bitmap = 0;
877	__skb_queue_purge(&rx_tid->rx_frags);
878}
879
880void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
881{
882	struct ath12k_dp_rx_tid *rx_tid;
883	int i;
884
885	lockdep_assert_held(&ar->ab->base_lock);
886
887	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
888		rx_tid = &peer->rx_tid[i];
889
890		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
891		ath12k_dp_rx_frags_cleanup(rx_tid, true);
892
893		spin_unlock_bh(&ar->ab->base_lock);
894		del_timer_sync(&rx_tid->frag_timer);
895		spin_lock_bh(&ar->ab->base_lock);
896	}
897}
898
899static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
900					 struct ath12k_peer *peer,
901					 struct ath12k_dp_rx_tid *rx_tid,
902					 u32 ba_win_sz, u16 ssn,
903					 bool update_ssn)
904{
905	struct ath12k_hal_reo_cmd cmd = {0};
906	int ret;
907
908	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
909	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
910	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
911	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
912	cmd.ba_window_size = ba_win_sz;
913
914	if (update_ssn) {
915		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
916		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
917	}
918
919	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
920				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
921				     NULL);
922	if (ret) {
923		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
924			    rx_tid->tid, ret);
925		return ret;
926	}
927
928	rx_tid->ba_win_sz = ba_win_sz;
929
930	return 0;
931}
932
933int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
934				u8 tid, u32 ba_win_sz, u16 ssn,
935				enum hal_pn_type pn_type)
936{
937	struct ath12k_base *ab = ar->ab;
938	struct ath12k_dp *dp = &ab->dp;
939	struct hal_rx_reo_queue *addr_aligned;
940	struct ath12k_peer *peer;
941	struct ath12k_dp_rx_tid *rx_tid;
942	u32 hw_desc_sz;
943	void *vaddr;
944	dma_addr_t paddr;
945	int ret;
946
947	spin_lock_bh(&ab->base_lock);
948
949	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
950	if (!peer) {
951		spin_unlock_bh(&ab->base_lock);
952		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
953		return -ENOENT;
954	}
955
956	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
957		spin_unlock_bh(&ab->base_lock);
958		ath12k_warn(ab, "reo qref table is not setup\n");
959		return -EINVAL;
960	}
961
962	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
963		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
964			    peer->peer_id, tid);
965		spin_unlock_bh(&ab->base_lock);
966		return -EINVAL;
967	}
968
969	rx_tid = &peer->rx_tid[tid];
970	/* Update the tid queue if it is already setup */
971	if (rx_tid->active) {
972		paddr = rx_tid->paddr;
973		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
974						    ba_win_sz, ssn, true);
975		spin_unlock_bh(&ab->base_lock);
976		if (ret) {
977			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
978			return ret;
979		}
980
981		if (!ab->hw_params->reoq_lut_support) {
982			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
983								     peer_mac,
984								     paddr, tid, 1,
985								     ba_win_sz);
986			if (ret) {
987				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
988					    tid, ret);
989				return ret;
990			}
991		}
992
993		return 0;
994	}
995
996	rx_tid->tid = tid;
997
998	rx_tid->ba_win_sz = ba_win_sz;
999
1000	/* TODO: Optimize the memory allocation for qos tid based on
1001	 * the actual BA window size in REO tid update path.
1002	 */
1003	if (tid == HAL_DESC_REO_NON_QOS_TID)
1004		hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
1005	else
1006		hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1007
1008	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1009	if (!vaddr) {
1010		spin_unlock_bh(&ab->base_lock);
1011		return -ENOMEM;
1012	}
1013
1014	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1015
1016	ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1017				   ssn, pn_type);
1018
1019	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1020			       DMA_BIDIRECTIONAL);
1021
1022	ret = dma_mapping_error(ab->dev, paddr);
1023	if (ret) {
1024		spin_unlock_bh(&ab->base_lock);
1025		goto err_mem_free;
1026	}
1027
1028	rx_tid->vaddr = vaddr;
1029	rx_tid->paddr = paddr;
1030	rx_tid->size = hw_desc_sz;
1031	rx_tid->active = true;
1032
1033	if (ab->hw_params->reoq_lut_support) {
1034		/* Update the REO queue LUT at the corresponding peer id
1035		 * and tid with qaddr.
1036		 */
1037		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
1038		spin_unlock_bh(&ab->base_lock);
1039	} else {
1040		spin_unlock_bh(&ab->base_lock);
1041		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1042							     paddr, tid, 1, ba_win_sz);
1043	}
1044
1045	return ret;
1046
1047err_mem_free:
1048	kfree(vaddr);
1049
1050	return ret;
1051}
1052
1053int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1054			     struct ieee80211_ampdu_params *params)
1055{
1056	struct ath12k_base *ab = ar->ab;
1057	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1058	int vdev_id = arsta->arvif->vdev_id;
1059	int ret;
1060
1061	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
1062					  params->tid, params->buf_size,
1063					  params->ssn, arsta->pn_type);
1064	if (ret)
1065		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1066
1067	return ret;
1068}
1069
1070int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1071			    struct ieee80211_ampdu_params *params)
1072{
1073	struct ath12k_base *ab = ar->ab;
1074	struct ath12k_peer *peer;
1075	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1076	int vdev_id = arsta->arvif->vdev_id;
1077	bool active;
1078	int ret;
1079
1080	spin_lock_bh(&ab->base_lock);
1081
1082	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
1083	if (!peer) {
1084		spin_unlock_bh(&ab->base_lock);
1085		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1086		return -ENOENT;
1087	}
1088
1089	active = peer->rx_tid[params->tid].active;
1090
1091	if (!active) {
1092		spin_unlock_bh(&ab->base_lock);
1093		return 0;
1094	}
1095
1096	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1097	spin_unlock_bh(&ab->base_lock);
1098	if (ret) {
1099		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1100			    params->tid, ret);
1101		return ret;
1102	}
1103
1104	return ret;
1105}
1106
1107int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
1108				       const u8 *peer_addr,
1109				       enum set_key_cmd key_cmd,
1110				       struct ieee80211_key_conf *key)
1111{
1112	struct ath12k *ar = arvif->ar;
1113	struct ath12k_base *ab = ar->ab;
1114	struct ath12k_hal_reo_cmd cmd = {0};
1115	struct ath12k_peer *peer;
1116	struct ath12k_dp_rx_tid *rx_tid;
1117	u8 tid;
1118	int ret = 0;
1119
1120	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1121	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1122	 * for now.
1123	 */
1124	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1125		return 0;
1126
1127	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1128	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1129		    HAL_REO_CMD_UPD0_PN_SIZE |
1130		    HAL_REO_CMD_UPD0_PN_VALID |
1131		    HAL_REO_CMD_UPD0_PN_CHECK |
1132		    HAL_REO_CMD_UPD0_SVLD;
1133
1134	switch (key->cipher) {
1135	case WLAN_CIPHER_SUITE_TKIP:
1136	case WLAN_CIPHER_SUITE_CCMP:
1137	case WLAN_CIPHER_SUITE_CCMP_256:
1138	case WLAN_CIPHER_SUITE_GCMP:
1139	case WLAN_CIPHER_SUITE_GCMP_256:
1140		if (key_cmd == SET_KEY) {
1141			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1142			cmd.pn_size = 48;
1143		}
1144		break;
1145	default:
1146		break;
1147	}
1148
1149	spin_lock_bh(&ab->base_lock);
1150
1151	peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1152	if (!peer) {
1153		spin_unlock_bh(&ab->base_lock);
1154		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1155			    peer_addr);
1156		return -ENOENT;
1157	}
1158
1159	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1160		rx_tid = &peer->rx_tid[tid];
1161		if (!rx_tid->active)
1162			continue;
1163		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1164		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1165		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1166					     HAL_REO_CMD_UPDATE_RX_QUEUE,
1167					     &cmd, NULL);
1168		if (ret) {
1169			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1170				    tid, peer_addr, ret);
1171			break;
1172		}
1173	}
1174
1175	spin_unlock_bh(&ab->base_lock);
1176
1177	return ret;
1178}
1179
1180static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1181				      u16 peer_id)
1182{
1183	int i;
1184
1185	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1186		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1187			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1188				return i;
1189		} else {
1190			return i;
1191		}
1192	}
1193
1194	return -EINVAL;
1195}
1196
1197static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1198					   u16 tag, u16 len, const void *ptr,
1199					   void *data)
1200{
1201	const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1202	const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1203	const struct htt_ppdu_stats_user_rate *user_rate;
1204	struct htt_ppdu_stats_info *ppdu_info;
1205	struct htt_ppdu_user_stats *user_stats;
1206	int cur_user;
1207	u16 peer_id;
1208
1209	ppdu_info = data;
1210
1211	switch (tag) {
1212	case HTT_PPDU_STATS_TAG_COMMON:
1213		if (len < sizeof(struct htt_ppdu_stats_common)) {
1214			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1215				    len, tag);
1216			return -EINVAL;
1217		}
1218		memcpy(&ppdu_info->ppdu_stats.common, ptr,
1219		       sizeof(struct htt_ppdu_stats_common));
1220		break;
1221	case HTT_PPDU_STATS_TAG_USR_RATE:
1222		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1223			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1224				    len, tag);
1225			return -EINVAL;
1226		}
1227		user_rate = ptr;
1228		peer_id = le16_to_cpu(user_rate->sw_peer_id);
1229		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1230						      peer_id);
1231		if (cur_user < 0)
1232			return -EINVAL;
1233		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1234		user_stats->peer_id = peer_id;
1235		user_stats->is_valid_peer_id = true;
1236		memcpy(&user_stats->rate, ptr,
1237		       sizeof(struct htt_ppdu_stats_user_rate));
1238		user_stats->tlv_flags |= BIT(tag);
1239		break;
1240	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1241		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1242			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1243				    len, tag);
1244			return -EINVAL;
1245		}
1246
1247		cmplt_cmn = ptr;
1248		peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1249		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1250						      peer_id);
1251		if (cur_user < 0)
1252			return -EINVAL;
1253		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1254		user_stats->peer_id = peer_id;
1255		user_stats->is_valid_peer_id = true;
1256		memcpy(&user_stats->cmpltn_cmn, ptr,
1257		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1258		user_stats->tlv_flags |= BIT(tag);
1259		break;
1260	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1261		if (len <
1262		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1263			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1264				    len, tag);
1265			return -EINVAL;
1266		}
1267
1268		ba_status = ptr;
1269		peer_id = le16_to_cpu(ba_status->sw_peer_id);
1270		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1271						      peer_id);
1272		if (cur_user < 0)
1273			return -EINVAL;
1274		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1275		user_stats->peer_id = peer_id;
1276		user_stats->is_valid_peer_id = true;
1277		memcpy(&user_stats->ack_ba, ptr,
1278		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1279		user_stats->tlv_flags |= BIT(tag);
1280		break;
1281	}
1282	return 0;
1283}
1284
1285#if defined(__linux__)
1286static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1287#elif defined(__FreeBSD__)
1288static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const u8 *ptr, size_t len,
1289#endif
1290				  int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1291					      const void *ptr, void *data),
1292				  void *data)
1293{
1294	const struct htt_tlv *tlv;
1295#if defined(__linux__)
1296	const void *begin = ptr;
1297#elif defined(__FreeBSD__)
1298	const u8 *begin = ptr;
1299#endif
1300	u16 tlv_tag, tlv_len;
1301	int ret = -EINVAL;
1302
1303	while (len > 0) {
1304		if (len < sizeof(*tlv)) {
1305			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1306				   ptr - begin, len, sizeof(*tlv));
1307			return -EINVAL;
1308		}
1309#if defined(__linux__)
1310		tlv = (struct htt_tlv *)ptr;
1311#elif defined(__FreeBSD__)
1312		tlv = (const struct htt_tlv *)ptr;
1313#endif
1314		tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1315		tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1316		ptr += sizeof(*tlv);
1317		len -= sizeof(*tlv);
1318
1319		if (tlv_len > len) {
1320			ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1321				   tlv_tag, ptr - begin, len, tlv_len);
1322			return -EINVAL;
1323		}
1324		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1325		if (ret == -ENOMEM)
1326			return ret;
1327
1328		ptr += tlv_len;
1329		len -= tlv_len;
1330	}
1331	return 0;
1332}
1333
1334static void
1335ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1336				struct htt_ppdu_stats *ppdu_stats, u8 user)
1337{
1338	struct ath12k_base *ab = ar->ab;
1339	struct ath12k_peer *peer;
1340	struct ieee80211_sta *sta;
1341	struct ath12k_sta *arsta;
1342	struct htt_ppdu_stats_user_rate *user_rate;
1343	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1344	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1345	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1346	int ret;
1347	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1348	u32 v, succ_bytes = 0;
1349	u16 tones, rate = 0, succ_pkts = 0;
1350	u32 tx_duration = 0;
1351	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1352	bool is_ampdu = false;
1353
1354	if (!usr_stats)
1355		return;
1356
1357	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1358		return;
1359
1360	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1361		is_ampdu =
1362			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1363
1364	if (usr_stats->tlv_flags &
1365	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1366		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1367		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1368					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1369		tid = le32_get_bits(usr_stats->ack_ba.info,
1370				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1371	}
1372
1373	if (common->fes_duration_us)
1374		tx_duration = le32_to_cpu(common->fes_duration_us);
1375
1376	user_rate = &usr_stats->rate;
1377	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1378	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1379	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1380	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1381	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1382	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1383
1384	/* Note: If host configured fixed rates and in some other special
1385	 * cases, the broadcast/management frames are sent in different rates.
1386	 * Firmware rate's control to be skipped for this?
1387	 */
1388
1389	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1390		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1391		return;
1392	}
1393
1394	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1395		ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1396		return;
1397	}
1398
1399	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1400		ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1401			    mcs, nss);
1402		return;
1403	}
1404
1405	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1406		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1407							    flags,
1408							    &rate_idx,
1409							    &rate);
1410		if (ret < 0)
1411			return;
1412	}
1413
1414	rcu_read_lock();
1415	spin_lock_bh(&ab->base_lock);
1416	peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1417
1418	if (!peer || !peer->sta) {
1419		spin_unlock_bh(&ab->base_lock);
1420		rcu_read_unlock();
1421		return;
1422	}
1423
1424	sta = peer->sta;
1425	arsta = (struct ath12k_sta *)sta->drv_priv;
1426
1427	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1428
1429	switch (flags) {
1430	case WMI_RATE_PREAMBLE_OFDM:
1431		arsta->txrate.legacy = rate;
1432		break;
1433	case WMI_RATE_PREAMBLE_CCK:
1434		arsta->txrate.legacy = rate;
1435		break;
1436	case WMI_RATE_PREAMBLE_HT:
1437		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1438		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1439		if (sgi)
1440			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1441		break;
1442	case WMI_RATE_PREAMBLE_VHT:
1443		arsta->txrate.mcs = mcs;
1444		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1445		if (sgi)
1446			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1447		break;
1448	case WMI_RATE_PREAMBLE_HE:
1449		arsta->txrate.mcs = mcs;
1450		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1451		arsta->txrate.he_dcm = dcm;
1452		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1453		tones = le16_to_cpu(user_rate->ru_end) -
1454			le16_to_cpu(user_rate->ru_start) + 1;
1455		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1456		arsta->txrate.he_ru_alloc = v;
1457		break;
1458	}
1459
1460	arsta->txrate.nss = nss;
1461	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1462	arsta->tx_duration += tx_duration;
1463	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1464
1465	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1466	 * So skip peer stats update for mgmt packets.
1467	 */
1468	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1469		memset(peer_stats, 0, sizeof(*peer_stats));
1470		peer_stats->succ_pkts = succ_pkts;
1471		peer_stats->succ_bytes = succ_bytes;
1472		peer_stats->is_ampdu = is_ampdu;
1473		peer_stats->duration = tx_duration;
1474		peer_stats->ba_fails =
1475			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1476			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1477	}
1478
1479	spin_unlock_bh(&ab->base_lock);
1480	rcu_read_unlock();
1481}
1482
1483static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1484					 struct htt_ppdu_stats *ppdu_stats)
1485{
1486	u8 user;
1487
1488	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1489		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1490}
1491
1492static
1493struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1494							u32 ppdu_id)
1495{
1496	struct htt_ppdu_stats_info *ppdu_info;
1497
1498	lockdep_assert_held(&ar->data_lock);
1499	if (!list_empty(&ar->ppdu_stats_info)) {
1500		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1501			if (ppdu_info->ppdu_id == ppdu_id)
1502				return ppdu_info;
1503		}
1504
1505		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1506			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1507						     typeof(*ppdu_info), list);
1508			list_del(&ppdu_info->list);
1509			ar->ppdu_stat_list_depth--;
1510			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1511			kfree(ppdu_info);
1512		}
1513	}
1514
1515	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1516	if (!ppdu_info)
1517		return NULL;
1518
1519	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1520	ar->ppdu_stat_list_depth++;
1521
1522	return ppdu_info;
1523}
1524
1525static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1526				       struct htt_ppdu_user_stats *usr_stats)
1527{
1528	peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1529	peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1530	peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1531	peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1532	peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1533	peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1534	peer->ppdu_stats_delayba.resp_rate_flags =
1535		le32_to_cpu(usr_stats->rate.resp_rate_flags);
1536
1537	peer->delayba_flag = true;
1538}
1539
1540static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1541			       struct htt_ppdu_user_stats *usr_stats)
1542{
1543	usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1544	usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1545	usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1546	usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1547	usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1548	usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1549	usr_stats->rate.resp_rate_flags =
1550		cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1551
1552	peer->delayba_flag = false;
1553}
1554
1555static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1556				      struct sk_buff *skb)
1557{
1558	struct ath12k_htt_ppdu_stats_msg *msg;
1559	struct htt_ppdu_stats_info *ppdu_info;
1560	struct ath12k_peer *peer = NULL;
1561	struct htt_ppdu_user_stats *usr_stats = NULL;
1562	u32 peer_id = 0;
1563	struct ath12k *ar;
1564	int ret, i;
1565	u8 pdev_id;
1566	u32 ppdu_id, len;
1567
1568	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1569	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1570	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1571	ppdu_id = le32_to_cpu(msg->ppdu_id);
1572
1573	rcu_read_lock();
1574	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1575	if (!ar) {
1576		ret = -EINVAL;
1577		goto exit;
1578	}
1579
1580	spin_lock_bh(&ar->data_lock);
1581	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1582	if (!ppdu_info) {
1583		spin_unlock_bh(&ar->data_lock);
1584		ret = -EINVAL;
1585		goto exit;
1586	}
1587
1588	ppdu_info->ppdu_id = ppdu_id;
1589	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1590				     ath12k_htt_tlv_ppdu_stats_parse,
1591				     (void *)ppdu_info);
1592	if (ret) {
1593		spin_unlock_bh(&ar->data_lock);
1594		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1595		goto exit;
1596	}
1597
1598	/* back up data rate tlv for all peers */
1599	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1600	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1601	    ppdu_info->delay_ba) {
1602		for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1603			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1604			spin_lock_bh(&ab->base_lock);
1605			peer = ath12k_peer_find_by_id(ab, peer_id);
1606			if (!peer) {
1607				spin_unlock_bh(&ab->base_lock);
1608				continue;
1609			}
1610
1611			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1612			if (usr_stats->delay_ba)
1613				ath12k_copy_to_delay_stats(peer, usr_stats);
1614			spin_unlock_bh(&ab->base_lock);
1615		}
1616	}
1617
1618	/* restore all peers' data rate tlv to mu-bar tlv */
1619	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1620	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1621		for (i = 0; i < ppdu_info->bar_num_users; i++) {
1622			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1623			spin_lock_bh(&ab->base_lock);
1624			peer = ath12k_peer_find_by_id(ab, peer_id);
1625			if (!peer) {
1626				spin_unlock_bh(&ab->base_lock);
1627				continue;
1628			}
1629
1630			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1631			if (peer->delayba_flag)
1632				ath12k_copy_to_bar(peer, usr_stats);
1633			spin_unlock_bh(&ab->base_lock);
1634		}
1635	}
1636
1637	spin_unlock_bh(&ar->data_lock);
1638
1639exit:
1640	rcu_read_unlock();
1641
1642	return ret;
1643}
1644
1645static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1646						struct sk_buff *skb)
1647{
1648	struct ath12k_htt_mlo_offset_msg *msg;
1649	struct ath12k_pdev *pdev;
1650	struct ath12k *ar;
1651	u8 pdev_id;
1652
1653	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1654	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1655			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1656	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1657
1658	if (!ar) {
1659		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
1660		return;
1661	}
1662
1663	spin_lock_bh(&ar->data_lock);
1664	pdev = ar->pdev;
1665
1666	pdev->timestamp.info = __le32_to_cpu(msg->info);
1667	pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1668	pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1669	pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1670	pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1671	pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1672	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1673	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1674
1675	spin_unlock_bh(&ar->data_lock);
1676}
1677
1678void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1679				       struct sk_buff *skb)
1680{
1681	struct ath12k_dp *dp = &ab->dp;
1682	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1683	enum htt_t2h_msg_type type;
1684	u16 peer_id;
1685	u8 vdev_id;
1686	u8 mac_addr[ETH_ALEN];
1687	u16 peer_mac_h16;
1688	u16 ast_hash = 0;
1689	u16 hw_peer_id;
1690
1691	type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1692
1693	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1694
1695	switch (type) {
1696	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1697		dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1698						      HTT_T2H_VERSION_CONF_MAJOR);
1699		dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1700						      HTT_T2H_VERSION_CONF_MINOR);
1701		complete(&dp->htt_tgt_version_received);
1702		break;
1703	/* TODO: remove unused peer map versions after testing */
1704	case HTT_T2H_MSG_TYPE_PEER_MAP:
1705		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1706					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1707		peer_id = le32_get_bits(resp->peer_map_ev.info,
1708					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1709		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1710					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1711		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1712				       peer_mac_h16, mac_addr);
1713		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1714		break;
1715	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1716		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1717					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1718		peer_id = le32_get_bits(resp->peer_map_ev.info,
1719					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1720		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1721					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1722		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1723				       peer_mac_h16, mac_addr);
1724		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1725					 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1726		hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1727					   HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1728		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1729				      hw_peer_id);
1730		break;
1731	case HTT_T2H_MSG_TYPE_PEER_MAP3:
1732		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1733					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1734		peer_id = le32_get_bits(resp->peer_map_ev.info,
1735					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1736		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1737					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1738		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1739				       peer_mac_h16, mac_addr);
1740		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1741				      peer_id);
1742		break;
1743	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1744	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1745		peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1746					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1747		ath12k_peer_unmap_event(ab, peer_id);
1748		break;
1749	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1750		ath12k_htt_pull_ppdu_stats(ab, skb);
1751		break;
1752	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1753		break;
1754	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1755		ath12k_htt_mlo_offset_event_handler(ab, skb);
1756		break;
1757	default:
1758		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1759			   type);
1760		break;
1761	}
1762
1763	dev_kfree_skb_any(skb);
1764}
1765
1766static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1767				      struct sk_buff_head *msdu_list,
1768				      struct sk_buff *first, struct sk_buff *last,
1769				      u8 l3pad_bytes, int msdu_len)
1770{
1771	struct ath12k_base *ab = ar->ab;
1772	struct sk_buff *skb;
1773	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1774	int buf_first_hdr_len, buf_first_len;
1775	struct hal_rx_desc *ldesc;
1776	int space_extra, rem_len, buf_len;
1777	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
1778
1779	/* As the msdu is spread across multiple rx buffers,
1780	 * find the offset to the start of msdu for computing
1781	 * the length of the msdu in the first buffer.
1782	 */
1783	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1784	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1785
1786	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1787		skb_put(first, buf_first_hdr_len + msdu_len);
1788		skb_pull(first, buf_first_hdr_len);
1789		return 0;
1790	}
1791
1792	ldesc = (struct hal_rx_desc *)last->data;
1793	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1794	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1795
1796	/* MSDU spans over multiple buffers because the length of the MSDU
1797	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1798	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1799	 */
1800	skb_put(first, DP_RX_BUFFER_SIZE);
1801	skb_pull(first, buf_first_hdr_len);
1802
1803	/* When an MSDU spread over multiple buffers MSDU_END
1804	 * tlvs are valid only in the last buffer. Copy those tlvs.
1805	 */
1806	ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1807
1808	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1809	if (space_extra > 0 &&
1810	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1811		/* Free up all buffers of the MSDU */
1812		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1813			rxcb = ATH12K_SKB_RXCB(skb);
1814			if (!rxcb->is_continuation) {
1815				dev_kfree_skb_any(skb);
1816				break;
1817			}
1818			dev_kfree_skb_any(skb);
1819		}
1820		return -ENOMEM;
1821	}
1822
1823	rem_len = msdu_len - buf_first_len;
1824	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1825		rxcb = ATH12K_SKB_RXCB(skb);
1826		if (rxcb->is_continuation)
1827			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1828		else
1829			buf_len = rem_len;
1830
1831		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1832			WARN_ON_ONCE(1);
1833			dev_kfree_skb_any(skb);
1834			return -EINVAL;
1835		}
1836
1837		skb_put(skb, buf_len + hal_rx_desc_sz);
1838		skb_pull(skb, hal_rx_desc_sz);
1839		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1840					  buf_len);
1841		dev_kfree_skb_any(skb);
1842
1843		rem_len -= buf_len;
1844		if (!rxcb->is_continuation)
1845			break;
1846	}
1847
1848	return 0;
1849}
1850
1851static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1852						      struct sk_buff *first)
1853{
1854	struct sk_buff *skb;
1855	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1856
1857	if (!rxcb->is_continuation)
1858		return first;
1859
1860	skb_queue_walk(msdu_list, skb) {
1861		rxcb = ATH12K_SKB_RXCB(skb);
1862		if (!rxcb->is_continuation)
1863			return skb;
1864	}
1865
1866	return NULL;
1867}
1868
1869static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
1870{
1871	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1872	struct ath12k_base *ab = ar->ab;
1873	bool ip_csum_fail, l4_csum_fail;
1874
1875	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
1876	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
1877
1878	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1879			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1880}
1881
1882static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
1883				       enum hal_encrypt_type enctype)
1884{
1885	switch (enctype) {
1886	case HAL_ENCRYPT_TYPE_OPEN:
1887	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1888	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1889		return 0;
1890	case HAL_ENCRYPT_TYPE_CCMP_128:
1891		return IEEE80211_CCMP_MIC_LEN;
1892	case HAL_ENCRYPT_TYPE_CCMP_256:
1893		return IEEE80211_CCMP_256_MIC_LEN;
1894	case HAL_ENCRYPT_TYPE_GCMP_128:
1895	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1896		return IEEE80211_GCMP_MIC_LEN;
1897	case HAL_ENCRYPT_TYPE_WEP_40:
1898	case HAL_ENCRYPT_TYPE_WEP_104:
1899	case HAL_ENCRYPT_TYPE_WEP_128:
1900	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1901	case HAL_ENCRYPT_TYPE_WAPI:
1902		break;
1903	}
1904
1905	ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1906	return 0;
1907}
1908
1909static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
1910					 enum hal_encrypt_type enctype)
1911{
1912	switch (enctype) {
1913	case HAL_ENCRYPT_TYPE_OPEN:
1914		return 0;
1915	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1916	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1917		return IEEE80211_TKIP_IV_LEN;
1918	case HAL_ENCRYPT_TYPE_CCMP_128:
1919		return IEEE80211_CCMP_HDR_LEN;
1920	case HAL_ENCRYPT_TYPE_CCMP_256:
1921		return IEEE80211_CCMP_256_HDR_LEN;
1922	case HAL_ENCRYPT_TYPE_GCMP_128:
1923	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1924		return IEEE80211_GCMP_HDR_LEN;
1925	case HAL_ENCRYPT_TYPE_WEP_40:
1926	case HAL_ENCRYPT_TYPE_WEP_104:
1927	case HAL_ENCRYPT_TYPE_WEP_128:
1928	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1929	case HAL_ENCRYPT_TYPE_WAPI:
1930		break;
1931	}
1932
1933	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1934	return 0;
1935}
1936
1937static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
1938				       enum hal_encrypt_type enctype)
1939{
1940	switch (enctype) {
1941	case HAL_ENCRYPT_TYPE_OPEN:
1942	case HAL_ENCRYPT_TYPE_CCMP_128:
1943	case HAL_ENCRYPT_TYPE_CCMP_256:
1944	case HAL_ENCRYPT_TYPE_GCMP_128:
1945	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1946		return 0;
1947	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1948	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1949		return IEEE80211_TKIP_ICV_LEN;
1950	case HAL_ENCRYPT_TYPE_WEP_40:
1951	case HAL_ENCRYPT_TYPE_WEP_104:
1952	case HAL_ENCRYPT_TYPE_WEP_128:
1953	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1954	case HAL_ENCRYPT_TYPE_WAPI:
1955		break;
1956	}
1957
1958	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1959	return 0;
1960}
1961
1962static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
1963					 struct sk_buff *msdu,
1964					 enum hal_encrypt_type enctype,
1965					 struct ieee80211_rx_status *status)
1966{
1967	struct ath12k_base *ab = ar->ab;
1968	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1969	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1970	struct ieee80211_hdr *hdr;
1971	size_t hdr_len;
1972	u8 *crypto_hdr;
1973	u16 qos_ctl;
1974
1975	/* pull decapped header */
1976	hdr = (struct ieee80211_hdr *)msdu->data;
1977	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1978	skb_pull(msdu, hdr_len);
1979
1980	/*  Rebuild qos header */
1981	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1982
1983	/* Reset the order bit as the HT_Control header is stripped */
1984	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1985
1986	qos_ctl = rxcb->tid;
1987
1988	if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
1989		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1990
1991	/* TODO: Add other QoS ctl fields when required */
1992
1993	/* copy decap header before overwriting for reuse below */
1994	memcpy(decap_hdr, hdr, hdr_len);
1995
1996	/* Rebuild crypto header for mac80211 use */
1997	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1998		crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
1999		ath12k_dp_rx_desc_get_crypto_header(ar->ab,
2000						    rxcb->rx_desc, crypto_hdr,
2001						    enctype);
2002	}
2003
2004	memcpy(skb_push(msdu,
2005			IEEE80211_QOS_CTL_LEN), &qos_ctl,
2006			IEEE80211_QOS_CTL_LEN);
2007	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2008}
2009
2010static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
2011				       enum hal_encrypt_type enctype,
2012				       struct ieee80211_rx_status *status,
2013				       bool decrypted)
2014{
2015	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2016	struct ieee80211_hdr *hdr;
2017	size_t hdr_len;
2018	size_t crypto_len;
2019
2020	if (!rxcb->is_first_msdu ||
2021	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2022		WARN_ON_ONCE(1);
2023		return;
2024	}
2025
2026	skb_trim(msdu, msdu->len - FCS_LEN);
2027
2028	if (!decrypted)
2029		return;
2030
2031	hdr = (void *)msdu->data;
2032
2033	/* Tail */
2034	if (status->flag & RX_FLAG_IV_STRIPPED) {
2035		skb_trim(msdu, msdu->len -
2036			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2037
2038		skb_trim(msdu, msdu->len -
2039			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2040	} else {
2041		/* MIC */
2042		if (status->flag & RX_FLAG_MIC_STRIPPED)
2043			skb_trim(msdu, msdu->len -
2044				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2045
2046		/* ICV */
2047		if (status->flag & RX_FLAG_ICV_STRIPPED)
2048			skb_trim(msdu, msdu->len -
2049				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2050	}
2051
2052	/* MMIC */
2053	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2054	    !ieee80211_has_morefrags(hdr->frame_control) &&
2055	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2056		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2057
2058	/* Head */
2059	if (status->flag & RX_FLAG_IV_STRIPPED) {
2060		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2061		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2062
2063		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2064		skb_pull(msdu, crypto_len);
2065	}
2066}
2067
2068static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2069					      struct sk_buff *msdu,
2070					      struct ath12k_skb_rxcb *rxcb,
2071					      struct ieee80211_rx_status *status,
2072					      enum hal_encrypt_type enctype)
2073{
2074	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2075	struct ath12k_base *ab = ar->ab;
2076	size_t hdr_len, crypto_len;
2077	struct ieee80211_hdr *hdr;
2078	u16 qos_ctl;
2079	__le16 fc;
2080	u8 *crypto_hdr;
2081
2082	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2083		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2084		crypto_hdr = skb_push(msdu, crypto_len);
2085		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2086	}
2087
2088	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
2089	hdr_len = ieee80211_hdrlen(fc);
2090	skb_push(msdu, hdr_len);
2091	hdr = (struct ieee80211_hdr *)msdu->data;
2092	hdr->frame_control = fc;
2093
2094	/* Get wifi header from rx_desc */
2095	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
2096
2097	if (rxcb->is_mcbc)
2098		status->flag &= ~RX_FLAG_PN_VALIDATED;
2099
2100	/* Add QOS header */
2101	if (ieee80211_is_data_qos(hdr->frame_control)) {
2102		qos_ctl = rxcb->tid;
2103		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
2104			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2105
2106		/* TODO: Add other QoS ctl fields when required */
2107		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
2108		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
2109	}
2110}
2111
2112static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2113				       struct sk_buff *msdu,
2114				       enum hal_encrypt_type enctype,
2115				       struct ieee80211_rx_status *status)
2116{
2117	struct ieee80211_hdr *hdr;
2118	struct ethhdr *eth;
2119	u8 da[ETH_ALEN];
2120	u8 sa[ETH_ALEN];
2121	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2122	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2123
2124	eth = (struct ethhdr *)msdu->data;
2125	ether_addr_copy(da, eth->h_dest);
2126	ether_addr_copy(sa, eth->h_source);
2127	rfc.snap_type = eth->h_proto;
2128	skb_pull(msdu, sizeof(*eth));
2129	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2130	       sizeof(rfc));
2131	ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2132
2133	/* original 802.11 header has a different DA and in
2134	 * case of 4addr it may also have different SA
2135	 */
2136	hdr = (struct ieee80211_hdr *)msdu->data;
2137	ether_addr_copy(ieee80211_get_DA(hdr), da);
2138	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2139}
2140
2141static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2142				   struct hal_rx_desc *rx_desc,
2143				   enum hal_encrypt_type enctype,
2144				   struct ieee80211_rx_status *status,
2145				   bool decrypted)
2146{
2147	struct ath12k_base *ab = ar->ab;
2148	u8 decap;
2149	struct ethhdr *ehdr;
2150
2151	decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2152
2153	switch (decap) {
2154	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2155		ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2156		break;
2157	case DP_RX_DECAP_TYPE_RAW:
2158		ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2159					   decrypted);
2160		break;
2161	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2162		ehdr = (struct ethhdr *)msdu->data;
2163
2164		/* mac80211 allows fast path only for authorized STA */
2165		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2166			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2167			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2168			break;
2169		}
2170
2171		/* PN for mcast packets will be validated in mac80211;
2172		 * remove eth header and add 802.11 header.
2173		 */
2174		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2175			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2176		break;
2177	case DP_RX_DECAP_TYPE_8023:
2178		/* TODO: Handle undecap for these formats */
2179		break;
2180	}
2181}
2182
2183struct ath12k_peer *
2184ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
2185{
2186	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2187	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2188	struct ath12k_peer *peer = NULL;
2189
2190	lockdep_assert_held(&ab->base_lock);
2191
2192	if (rxcb->peer_id)
2193		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2194
2195	if (peer)
2196		return peer;
2197
2198	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2199		return NULL;
2200
2201	peer = ath12k_peer_find_by_addr(ab,
2202					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
2203									      rx_desc));
2204	return peer;
2205}
2206
2207static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2208				struct sk_buff *msdu,
2209				struct hal_rx_desc *rx_desc,
2210				struct ieee80211_rx_status *rx_status)
2211{
2212	bool  fill_crypto_hdr;
2213	struct ath12k_base *ab = ar->ab;
2214	struct ath12k_skb_rxcb *rxcb;
2215	enum hal_encrypt_type enctype;
2216	bool is_decrypted = false;
2217	struct ieee80211_hdr *hdr;
2218	struct ath12k_peer *peer;
2219	u32 err_bitmap;
2220
2221	/* PN for multicast packets will be checked in mac80211 */
2222	rxcb = ATH12K_SKB_RXCB(msdu);
2223	fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
2224	rxcb->is_mcbc = fill_crypto_hdr;
2225
2226	if (rxcb->is_mcbc)
2227		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
2228
2229	spin_lock_bh(&ar->ab->base_lock);
2230	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
2231	if (peer) {
2232		if (rxcb->is_mcbc)
2233			enctype = peer->sec_type_grp;
2234		else
2235			enctype = peer->sec_type;
2236	} else {
2237		enctype = HAL_ENCRYPT_TYPE_OPEN;
2238	}
2239	spin_unlock_bh(&ar->ab->base_lock);
2240
2241	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2242	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2243		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2244
2245	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2246	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2247			     RX_FLAG_MMIC_ERROR |
2248			     RX_FLAG_DECRYPTED |
2249			     RX_FLAG_IV_STRIPPED |
2250			     RX_FLAG_MMIC_STRIPPED);
2251
2252	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2253		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2254	if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2255		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2256
2257	if (is_decrypted) {
2258		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2259
2260		if (fill_crypto_hdr)
2261			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2262					RX_FLAG_ICV_STRIPPED;
2263		else
2264			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2265					   RX_FLAG_PN_VALIDATED;
2266	}
2267
2268	ath12k_dp_rx_h_csum_offload(ar, msdu);
2269	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2270			       enctype, rx_status, is_decrypted);
2271
2272	if (!is_decrypted || fill_crypto_hdr)
2273		return;
2274
2275	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
2276	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2277		hdr = (void *)msdu->data;
2278		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2279	}
2280}
2281
2282static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2283				struct ieee80211_rx_status *rx_status)
2284{
2285	struct ath12k_base *ab = ar->ab;
2286	struct ieee80211_supported_band *sband;
2287	enum rx_msdu_start_pkt_type pkt_type;
2288	u8 bw;
2289	u8 rate_mcs, nss;
2290	u8 sgi;
2291	bool is_cck;
2292
2293	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2294	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2295	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2296	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2297	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2298
2299	switch (pkt_type) {
2300	case RX_MSDU_START_PKT_TYPE_11A:
2301	case RX_MSDU_START_PKT_TYPE_11B:
2302		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2303		sband = &ar->mac.sbands[rx_status->band];
2304		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2305								is_cck);
2306		break;
2307	case RX_MSDU_START_PKT_TYPE_11N:
2308		rx_status->encoding = RX_ENC_HT;
2309		if (rate_mcs > ATH12K_HT_MCS_MAX) {
2310			ath12k_warn(ar->ab,
2311				    "Received with invalid mcs in HT mode %d\n",
2312				     rate_mcs);
2313			break;
2314		}
2315		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2316		if (sgi)
2317			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2318		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2319		break;
2320	case RX_MSDU_START_PKT_TYPE_11AC:
2321		rx_status->encoding = RX_ENC_VHT;
2322		rx_status->rate_idx = rate_mcs;
2323		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2324			ath12k_warn(ar->ab,
2325				    "Received with invalid mcs in VHT mode %d\n",
2326				     rate_mcs);
2327			break;
2328		}
2329		rx_status->nss = nss;
2330		if (sgi)
2331			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2332		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2333		break;
2334	case RX_MSDU_START_PKT_TYPE_11AX:
2335		rx_status->rate_idx = rate_mcs;
2336		if (rate_mcs > ATH12K_HE_MCS_MAX) {
2337			ath12k_warn(ar->ab,
2338				    "Received with invalid mcs in HE mode %d\n",
2339				    rate_mcs);
2340			break;
2341		}
2342		rx_status->encoding = RX_ENC_HE;
2343		rx_status->nss = nss;
2344		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2345		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2346		break;
2347	}
2348}
2349
2350void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2351			 struct ieee80211_rx_status *rx_status)
2352{
2353	struct ath12k_base *ab = ar->ab;
2354	u8 channel_num;
2355	u32 center_freq, meta_data;
2356	struct ieee80211_channel *channel;
2357
2358	rx_status->freq = 0;
2359	rx_status->rate_idx = 0;
2360	rx_status->nss = 0;
2361	rx_status->encoding = RX_ENC_LEGACY;
2362	rx_status->bw = RATE_INFO_BW_20;
2363	rx_status->enc_flags = 0;
2364
2365	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2366
2367	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2368	channel_num = meta_data;
2369	center_freq = meta_data >> 16;
2370
2371	if (center_freq >= 5935 && center_freq <= 7105) {
2372		rx_status->band = NL80211_BAND_6GHZ;
2373	} else if (channel_num >= 1 && channel_num <= 14) {
2374		rx_status->band = NL80211_BAND_2GHZ;
2375	} else if (channel_num >= 36 && channel_num <= 173) {
2376		rx_status->band = NL80211_BAND_5GHZ;
2377	} else {
2378		spin_lock_bh(&ar->data_lock);
2379		channel = ar->rx_channel;
2380		if (channel) {
2381			rx_status->band = channel->band;
2382			channel_num =
2383				ieee80211_frequency_to_channel(channel->center_freq);
2384		}
2385		spin_unlock_bh(&ar->data_lock);
2386		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2387				rx_desc, sizeof(*rx_desc));
2388	}
2389
2390	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2391							 rx_status->band);
2392
2393	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
2394}
2395
2396static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2397				      struct sk_buff *msdu,
2398				      struct ieee80211_rx_status *status)
2399{
2400	struct ath12k_base *ab = ar->ab;
2401	static const struct ieee80211_radiotap_he known = {
2402		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2403				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2404		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2405	};
2406	struct ieee80211_radiotap_he *he;
2407	struct ieee80211_rx_status *rx_status;
2408	struct ieee80211_sta *pubsta;
2409	struct ath12k_peer *peer;
2410	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2411	u8 decap = DP_RX_DECAP_TYPE_RAW;
2412	bool is_mcbc = rxcb->is_mcbc;
2413	bool is_eapol = rxcb->is_eapol;
2414
2415	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2416	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2417		he = skb_push(msdu, sizeof(known));
2418		memcpy(he, &known, sizeof(known));
2419		status->flag |= RX_FLAG_RADIOTAP_HE;
2420	}
2421
2422	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2423		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
2424
2425	spin_lock_bh(&ab->base_lock);
2426	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
2427
2428	pubsta = peer ? peer->sta : NULL;
2429
2430	spin_unlock_bh(&ab->base_lock);
2431
2432	ath12k_dbg(ab, ATH12K_DBG_DATA,
2433		   "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2434		   msdu,
2435		   msdu->len,
2436		   peer ? peer->addr : NULL,
2437		   rxcb->tid,
2438		   is_mcbc ? "mcast" : "ucast",
2439		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2440		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2441		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2442		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2443		   (status->encoding == RX_ENC_HE) ? "he" : "",
2444		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2445		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2446		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2447		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2448		   status->rate_idx,
2449		   status->nss,
2450		   status->freq,
2451		   status->band, status->flag,
2452		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2453		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2454		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2455
2456	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2457			msdu->data, msdu->len);
2458
2459	rx_status = IEEE80211_SKB_RXCB(msdu);
2460	*rx_status = *status;
2461
2462	/* TODO: trace rx packet */
2463
2464	/* PN for multicast packets are not validate in HW,
2465	 * so skip 802.3 rx path
2466	 * Also, fast_rx expects the STA to be authorized, hence
2467	 * eapol packets are sent in slow path.
2468	 */
2469	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2470	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2471		rx_status->flag |= RX_FLAG_8023;
2472
2473	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2474}
2475
2476static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2477				     struct sk_buff *msdu,
2478				     struct sk_buff_head *msdu_list,
2479				     struct ieee80211_rx_status *rx_status)
2480{
2481	struct ath12k_base *ab = ar->ab;
2482	struct hal_rx_desc *rx_desc, *lrx_desc;
2483	struct ath12k_skb_rxcb *rxcb;
2484	struct sk_buff *last_buf;
2485	u8 l3_pad_bytes;
2486	u16 msdu_len;
2487	int ret;
2488	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2489
2490	last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2491	if (!last_buf) {
2492		ath12k_warn(ab,
2493			    "No valid Rx buffer to access MSDU_END tlv\n");
2494		ret = -EIO;
2495		goto free_out;
2496	}
2497
2498	rx_desc = (struct hal_rx_desc *)msdu->data;
2499	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2500	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2501		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2502		ret = -EIO;
2503		goto free_out;
2504	}
2505
2506	rxcb = ATH12K_SKB_RXCB(msdu);
2507	rxcb->rx_desc = rx_desc;
2508	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2509	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2510
2511	if (rxcb->is_frag) {
2512		skb_pull(msdu, hal_rx_desc_sz);
2513	} else if (!rxcb->is_continuation) {
2514		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2515			ret = -EINVAL;
2516			ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2517			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2518					sizeof(*rx_desc));
2519			goto free_out;
2520		}
2521		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2522		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2523	} else {
2524		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2525						 msdu, last_buf,
2526						 l3_pad_bytes, msdu_len);
2527		if (ret) {
2528			ath12k_warn(ab,
2529				    "failed to coalesce msdu rx buffer%d\n", ret);
2530			goto free_out;
2531		}
2532	}
2533
2534	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2535	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2536
2537	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2538
2539	return 0;
2540
2541free_out:
2542	return ret;
2543}
2544
2545static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2546						  struct napi_struct *napi,
2547						  struct sk_buff_head *msdu_list,
2548						  int ring_id)
2549{
2550	struct ieee80211_rx_status rx_status = {0};
2551	struct ath12k_skb_rxcb *rxcb;
2552	struct sk_buff *msdu;
2553	struct ath12k *ar;
2554	u8 mac_id, pdev_id;
2555	int ret;
2556
2557	if (skb_queue_empty(msdu_list))
2558		return;
2559
2560	rcu_read_lock();
2561
2562	while ((msdu = __skb_dequeue(msdu_list))) {
2563		rxcb = ATH12K_SKB_RXCB(msdu);
2564		mac_id = rxcb->mac_id;
2565		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
2566		ar = ab->pdevs[pdev_id].ar;
2567		if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
2568			dev_kfree_skb_any(msdu);
2569			continue;
2570		}
2571
2572		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
2573			dev_kfree_skb_any(msdu);
2574			continue;
2575		}
2576
2577		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2578		if (ret) {
2579			ath12k_dbg(ab, ATH12K_DBG_DATA,
2580				   "Unable to process msdu %d", ret);
2581			dev_kfree_skb_any(msdu);
2582			continue;
2583		}
2584
2585		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2586	}
2587
2588	rcu_read_unlock();
2589}
2590
2591int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2592			 struct napi_struct *napi, int budget)
2593{
2594	struct ath12k_rx_desc_info *desc_info;
2595	struct ath12k_dp *dp = &ab->dp;
2596	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2597	struct hal_reo_dest_ring *desc;
2598	int num_buffs_reaped = 0;
2599	struct sk_buff_head msdu_list;
2600	struct ath12k_skb_rxcb *rxcb;
2601	int total_msdu_reaped = 0;
2602	struct hal_srng *srng;
2603	struct sk_buff *msdu;
2604	bool done = false;
2605	int mac_id;
2606	u64 desc_va;
2607
2608	__skb_queue_head_init(&msdu_list);
2609
2610	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2611
2612	spin_lock_bh(&srng->lock);
2613
2614try_again:
2615	ath12k_hal_srng_access_begin(ab, srng);
2616
2617	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2618		enum hal_reo_dest_ring_push_reason push_reason;
2619		u32 cookie;
2620
2621		cookie = le32_get_bits(desc->buf_addr_info.info1,
2622				       BUFFER_ADDR_INFO1_SW_COOKIE);
2623
2624		mac_id = le32_get_bits(desc->info0,
2625				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2626
2627		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2628			   le32_to_cpu(desc->buf_va_lo));
2629		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2630
2631		/* retry manual desc retrieval */
2632		if (!desc_info) {
2633			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
2634			if (!desc_info) {
2635				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
2636				continue;
2637			}
2638		}
2639
2640		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2641			ath12k_warn(ab, "Check HW CC implementation");
2642
2643		msdu = desc_info->skb;
2644		desc_info->skb = NULL;
2645
2646		spin_lock_bh(&dp->rx_desc_lock);
2647		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
2648		spin_unlock_bh(&dp->rx_desc_lock);
2649
2650		rxcb = ATH12K_SKB_RXCB(msdu);
2651		dma_unmap_single(ab->dev, rxcb->paddr,
2652				 msdu->len + skb_tailroom(msdu),
2653				 DMA_FROM_DEVICE);
2654
2655		num_buffs_reaped++;
2656
2657		push_reason = le32_get_bits(desc->info0,
2658					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2659		if (push_reason !=
2660		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2661			dev_kfree_skb_any(msdu);
2662			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2663			continue;
2664		}
2665
2666		rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2667					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2668		rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2669					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2670		rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2671					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2672		rxcb->mac_id = mac_id;
2673		rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
2674					      RX_MPDU_DESC_META_DATA_PEER_ID);
2675		rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
2676					  RX_MPDU_DESC_INFO0_TID);
2677
2678		__skb_queue_tail(&msdu_list, msdu);
2679
2680		if (!rxcb->is_continuation) {
2681			total_msdu_reaped++;
2682			done = true;
2683		} else {
2684			done = false;
2685		}
2686
2687		if (total_msdu_reaped >= budget)
2688			break;
2689	}
2690
2691	/* Hw might have updated the head pointer after we cached it.
2692	 * In this case, even though there are entries in the ring we'll
2693	 * get rx_desc NULL. Give the read another try with updated cached
2694	 * head pointer so that we can reap complete MPDU in the current
2695	 * rx processing.
2696	 */
2697	if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2698		ath12k_hal_srng_access_end(ab, srng);
2699		goto try_again;
2700	}
2701
2702	ath12k_hal_srng_access_end(ab, srng);
2703
2704	spin_unlock_bh(&srng->lock);
2705
2706	if (!total_msdu_reaped)
2707		goto exit;
2708
2709	/* TODO: Move to implicit BM? */
2710	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
2711				    ab->hw_params->hal_params->rx_buf_rbm, true);
2712
2713	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2714					      ring_id);
2715
2716exit:
2717	return total_msdu_reaped;
2718}
2719
2720static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2721{
2722	struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2723
2724	spin_lock_bh(&rx_tid->ab->base_lock);
2725	if (rx_tid->last_frag_no &&
2726	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2727		spin_unlock_bh(&rx_tid->ab->base_lock);
2728		return;
2729	}
2730	ath12k_dp_rx_frags_cleanup(rx_tid, true);
2731	spin_unlock_bh(&rx_tid->ab->base_lock);
2732}
2733
2734int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2735{
2736	struct ath12k_base *ab = ar->ab;
2737	struct crypto_shash *tfm;
2738	struct ath12k_peer *peer;
2739	struct ath12k_dp_rx_tid *rx_tid;
2740	int i;
2741
2742	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2743	if (IS_ERR(tfm))
2744		return PTR_ERR(tfm);
2745
2746	spin_lock_bh(&ab->base_lock);
2747
2748	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2749	if (!peer) {
2750		spin_unlock_bh(&ab->base_lock);
2751		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2752		return -ENOENT;
2753	}
2754
2755	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2756		rx_tid = &peer->rx_tid[i];
2757		rx_tid->ab = ab;
2758		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2759		skb_queue_head_init(&rx_tid->rx_frags);
2760	}
2761
2762	peer->tfm_mmic = tfm;
2763	spin_unlock_bh(&ab->base_lock);
2764
2765	return 0;
2766}
2767
2768static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2769				      struct ieee80211_hdr *hdr, u8 *data,
2770				      size_t data_len, u8 *mic)
2771{
2772	SHASH_DESC_ON_STACK(desc, tfm);
2773	u8 mic_hdr[16] = {0};
2774	u8 tid = 0;
2775	int ret;
2776
2777	if (!tfm)
2778		return -EINVAL;
2779
2780	desc->tfm = tfm;
2781
2782	ret = crypto_shash_setkey(tfm, key, 8);
2783	if (ret)
2784		goto out;
2785
2786	ret = crypto_shash_init(desc);
2787	if (ret)
2788		goto out;
2789
2790	/* TKIP MIC header */
2791	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2792	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2793	if (ieee80211_is_data_qos(hdr->frame_control))
2794		tid = ieee80211_get_tid(hdr);
2795	mic_hdr[12] = tid;
2796
2797	ret = crypto_shash_update(desc, mic_hdr, 16);
2798	if (ret)
2799		goto out;
2800	ret = crypto_shash_update(desc, data, data_len);
2801	if (ret)
2802		goto out;
2803	ret = crypto_shash_final(desc, mic);
2804out:
2805	shash_desc_zero(desc);
2806	return ret;
2807}
2808
2809static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
2810					  struct sk_buff *msdu)
2811{
2812	struct ath12k_base *ab = ar->ab;
2813	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2814	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2815	struct ieee80211_key_conf *key_conf;
2816	struct ieee80211_hdr *hdr;
2817	u8 mic[IEEE80211_CCMP_MIC_LEN];
2818	int head_len, tail_len, ret;
2819	size_t data_len;
2820	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2821	u8 *key, *data;
2822	u8 key_idx;
2823
2824	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2825		return 0;
2826
2827	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2828	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2829	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
2830	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2831
2832	if (!is_multicast_ether_addr(hdr->addr1))
2833		key_idx = peer->ucast_keyidx;
2834	else
2835		key_idx = peer->mcast_keyidx;
2836
2837	key_conf = peer->keys[key_idx];
2838
2839	data = msdu->data + head_len;
2840	data_len = msdu->len - head_len - tail_len;
2841	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2842
2843	ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2844	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2845		goto mic_fail;
2846
2847	return 0;
2848
2849mic_fail:
2850	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
2851	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
2852
2853	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2854		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2855	skb_pull(msdu, hal_rx_desc_sz);
2856
2857	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2858	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2859			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2860	ieee80211_rx(ar->hw, msdu);
2861	return -EINVAL;
2862}
2863
2864static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
2865					enum hal_encrypt_type enctype, u32 flags)
2866{
2867	struct ieee80211_hdr *hdr;
2868	size_t hdr_len;
2869	size_t crypto_len;
2870	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2871
2872	if (!flags)
2873		return;
2874
2875	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2876
2877	if (flags & RX_FLAG_MIC_STRIPPED)
2878		skb_trim(msdu, msdu->len -
2879			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2880
2881	if (flags & RX_FLAG_ICV_STRIPPED)
2882		skb_trim(msdu, msdu->len -
2883			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2884
2885	if (flags & RX_FLAG_IV_STRIPPED) {
2886		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2887		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2888
2889		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
2890			msdu->data + hal_rx_desc_sz, hdr_len);
2891		skb_pull(msdu, crypto_len);
2892	}
2893}
2894
2895static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
2896				 struct ath12k_peer *peer,
2897				 struct ath12k_dp_rx_tid *rx_tid,
2898				 struct sk_buff **defrag_skb)
2899{
2900	struct ath12k_base *ab = ar->ab;
2901	struct hal_rx_desc *rx_desc;
2902	struct sk_buff *skb, *first_frag, *last_frag;
2903	struct ieee80211_hdr *hdr;
2904	enum hal_encrypt_type enctype;
2905	bool is_decrypted = false;
2906	int msdu_len = 0;
2907	int extra_space;
2908	u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2909
2910	first_frag = skb_peek(&rx_tid->rx_frags);
2911	last_frag = skb_peek_tail(&rx_tid->rx_frags);
2912
2913	skb_queue_walk(&rx_tid->rx_frags, skb) {
2914		flags = 0;
2915		rx_desc = (struct hal_rx_desc *)skb->data;
2916		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
2917
2918		enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
2919		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
2920			is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
2921								   rx_desc);
2922
2923		if (is_decrypted) {
2924			if (skb != first_frag)
2925				flags |= RX_FLAG_IV_STRIPPED;
2926			if (skb != last_frag)
2927				flags |= RX_FLAG_ICV_STRIPPED |
2928					 RX_FLAG_MIC_STRIPPED;
2929		}
2930
2931		/* RX fragments are always raw packets */
2932		if (skb != last_frag)
2933			skb_trim(skb, skb->len - FCS_LEN);
2934		ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
2935
2936		if (skb != first_frag)
2937			skb_pull(skb, hal_rx_desc_sz +
2938				      ieee80211_hdrlen(hdr->frame_control));
2939		msdu_len += skb->len;
2940	}
2941
2942	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
2943	if (extra_space > 0 &&
2944	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
2945		return -ENOMEM;
2946
2947	__skb_unlink(first_frag, &rx_tid->rx_frags);
2948	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
2949		skb_put_data(first_frag, skb->data, skb->len);
2950		dev_kfree_skb_any(skb);
2951	}
2952
2953	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
2954	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
2955	ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
2956
2957	if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
2958		first_frag = NULL;
2959
2960	*defrag_skb = first_frag;
2961	return 0;
2962}
2963
2964static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
2965					      struct ath12k_dp_rx_tid *rx_tid,
2966					      struct sk_buff *defrag_skb)
2967{
2968	struct ath12k_base *ab = ar->ab;
2969	struct ath12k_dp *dp = &ab->dp;
2970	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
2971	struct hal_reo_entrance_ring *reo_ent_ring;
2972	struct hal_reo_dest_ring *reo_dest_ring;
2973	struct dp_link_desc_bank *link_desc_banks;
2974	struct hal_rx_msdu_link *msdu_link;
2975	struct hal_rx_msdu_details *msdu0;
2976	struct hal_srng *srng;
2977	dma_addr_t link_paddr, buf_paddr;
2978	u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
2979	u32 cookie, hal_rx_desc_sz, dest_ring_info0;
2980	int ret;
2981	struct ath12k_rx_desc_info *desc_info;
2982	u8 dst_ind;
2983
2984	hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
2985	link_desc_banks = dp->link_desc_banks;
2986	reo_dest_ring = rx_tid->dst_ring_desc;
2987
2988	ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
2989					&link_paddr, &cookie);
2990	desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
2991
2992#if defined(__linux__)
2993	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
2994			(link_paddr - link_desc_banks[desc_bank].paddr));
2995#elif defined(__FreeBSD__)
2996	msdu_link = (struct hal_rx_msdu_link *)((uintptr_t)link_desc_banks[desc_bank].vaddr +
2997			(link_paddr - link_desc_banks[desc_bank].paddr));
2998#endif
2999	msdu0 = &msdu_link->msdu_link[0];
3000	msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
3001	dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
3002
3003	memset(msdu0, 0, sizeof(*msdu0));
3004
3005	msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
3006		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
3007		    u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
3008		    u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
3009				    RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
3010		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
3011		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
3012	msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
3013	msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
3014
3015	/* change msdu len in hal rx desc */
3016	ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3017
3018	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3019				   defrag_skb->len + skb_tailroom(defrag_skb),
3020				   DMA_FROM_DEVICE);
3021	if (dma_mapping_error(ab->dev, buf_paddr))
3022		return -ENOMEM;
3023
3024	spin_lock_bh(&dp->rx_desc_lock);
3025	desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3026					     struct ath12k_rx_desc_info,
3027					     list);
3028	if (!desc_info) {
3029		spin_unlock_bh(&dp->rx_desc_lock);
3030		ath12k_warn(ab, "failed to find rx desc for reinject\n");
3031		ret = -ENOMEM;
3032		goto err_unmap_dma;
3033	}
3034
3035	desc_info->skb = defrag_skb;
3036
3037	list_del(&desc_info->list);
3038	list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
3039	spin_unlock_bh(&dp->rx_desc_lock);
3040
3041	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3042
3043	ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3044					desc_info->cookie,
3045					HAL_RX_BUF_RBM_SW3_BM);
3046
3047	/* Fill mpdu details into reo entrace ring */
3048	srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3049
3050	spin_lock_bh(&srng->lock);
3051	ath12k_hal_srng_access_begin(ab, srng);
3052
3053	reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3054	if (!reo_ent_ring) {
3055		ath12k_hal_srng_access_end(ab, srng);
3056		spin_unlock_bh(&srng->lock);
3057		ret = -ENOSPC;
3058		goto err_free_desc;
3059	}
3060	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3061
3062	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3063					cookie,
3064					HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
3065
3066	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3067		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3068		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3069		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3070		    u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3071
3072	reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3073	reo_ent_ring->rx_mpdu_info.peer_meta_data =
3074		reo_dest_ring->rx_mpdu_info.peer_meta_data;
3075
3076	/* Firmware expects physical address to be filled in queue_addr_lo in
3077	 * the MLO scenario and in case of non MLO peer meta data needs to be
3078	 * filled.
3079	 * TODO: Need to handle for MLO scenario.
3080	 */
3081	reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3082	reo_ent_ring->info0 = le32_encode_bits(dst_ind,
3083					       HAL_REO_ENTR_RING_INFO0_DEST_IND);
3084
3085	reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3086					       HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3087	dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3088					HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3089	reo_ent_ring->info2 =
3090		cpu_to_le32(u32_get_bits(dest_ring_info0,
3091					 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3092
3093	ath12k_hal_srng_access_end(ab, srng);
3094	spin_unlock_bh(&srng->lock);
3095
3096	return 0;
3097
3098err_free_desc:
3099	spin_lock_bh(&dp->rx_desc_lock);
3100	list_del(&desc_info->list);
3101	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3102	desc_info->skb = NULL;
3103	spin_unlock_bh(&dp->rx_desc_lock);
3104err_unmap_dma:
3105	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3106			 DMA_FROM_DEVICE);
3107	return ret;
3108}
3109
3110static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3111				    struct sk_buff *a, struct sk_buff *b)
3112{
3113	int frag1, frag2;
3114
3115	frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3116	frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3117
3118	return frag1 - frag2;
3119}
3120
3121static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3122				      struct sk_buff_head *frag_list,
3123				      struct sk_buff *cur_frag)
3124{
3125	struct sk_buff *skb;
3126	int cmp;
3127
3128	skb_queue_walk(frag_list, skb) {
3129		cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3130		if (cmp < 0)
3131			continue;
3132		__skb_queue_before(frag_list, skb, cur_frag);
3133		return;
3134	}
3135	__skb_queue_tail(frag_list, cur_frag);
3136}
3137
3138static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3139{
3140	struct ieee80211_hdr *hdr;
3141	u64 pn = 0;
3142	u8 *ehdr;
3143	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3144
3145	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3146	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3147
3148	pn = ehdr[0];
3149	pn |= (u64)ehdr[1] << 8;
3150	pn |= (u64)ehdr[4] << 16;
3151	pn |= (u64)ehdr[5] << 24;
3152	pn |= (u64)ehdr[6] << 32;
3153	pn |= (u64)ehdr[7] << 40;
3154
3155	return pn;
3156}
3157
3158static bool
3159ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3160{
3161	struct ath12k_base *ab = ar->ab;
3162	enum hal_encrypt_type encrypt_type;
3163	struct sk_buff *first_frag, *skb;
3164	struct hal_rx_desc *desc;
3165	u64 last_pn;
3166	u64 cur_pn;
3167
3168	first_frag = skb_peek(&rx_tid->rx_frags);
3169	desc = (struct hal_rx_desc *)first_frag->data;
3170
3171	encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3172	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3173	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3174	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3175	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3176		return true;
3177
3178	last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3179	skb_queue_walk(&rx_tid->rx_frags, skb) {
3180		if (skb == first_frag)
3181			continue;
3182
3183		cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3184		if (cur_pn != last_pn + 1)
3185			return false;
3186		last_pn = cur_pn;
3187	}
3188	return true;
3189}
3190
3191static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3192				    struct sk_buff *msdu,
3193				    struct hal_reo_dest_ring *ring_desc)
3194{
3195	struct ath12k_base *ab = ar->ab;
3196	struct hal_rx_desc *rx_desc;
3197	struct ath12k_peer *peer;
3198	struct ath12k_dp_rx_tid *rx_tid;
3199	struct sk_buff *defrag_skb = NULL;
3200	u32 peer_id;
3201	u16 seqno, frag_no;
3202	u8 tid;
3203	int ret = 0;
3204	bool more_frags;
3205
3206	rx_desc = (struct hal_rx_desc *)msdu->data;
3207	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3208	tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3209	seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3210	frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3211	more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3212
3213	if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3214	    !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3215	    tid > IEEE80211_NUM_TIDS)
3216		return -EINVAL;
3217
3218	/* received unfragmented packet in reo
3219	 * exception ring, this shouldn't happen
3220	 * as these packets typically come from
3221	 * reo2sw srngs.
3222	 */
3223	if (WARN_ON_ONCE(!frag_no && !more_frags))
3224		return -EINVAL;
3225
3226	spin_lock_bh(&ab->base_lock);
3227	peer = ath12k_peer_find_by_id(ab, peer_id);
3228	if (!peer) {
3229		ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3230			    peer_id);
3231		ret = -ENOENT;
3232		goto out_unlock;
3233	}
3234	rx_tid = &peer->rx_tid[tid];
3235
3236	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3237	    skb_queue_empty(&rx_tid->rx_frags)) {
3238		/* Flush stored fragments and start a new sequence */
3239		ath12k_dp_rx_frags_cleanup(rx_tid, true);
3240		rx_tid->cur_sn = seqno;
3241	}
3242
3243	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3244		/* Fragment already present */
3245		ret = -EINVAL;
3246		goto out_unlock;
3247	}
3248
3249	if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3250		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3251	else
3252		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3253
3254	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3255	if (!more_frags)
3256		rx_tid->last_frag_no = frag_no;
3257
3258	if (frag_no == 0) {
3259		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3260						sizeof(*rx_tid->dst_ring_desc),
3261						GFP_ATOMIC);
3262		if (!rx_tid->dst_ring_desc) {
3263			ret = -ENOMEM;
3264			goto out_unlock;
3265		}
3266	} else {
3267		ath12k_dp_rx_link_desc_return(ab, ring_desc,
3268					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3269	}
3270
3271	if (!rx_tid->last_frag_no ||
3272	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3273		mod_timer(&rx_tid->frag_timer, jiffies +
3274					       ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3275		goto out_unlock;
3276	}
3277
3278	spin_unlock_bh(&ab->base_lock);
3279	del_timer_sync(&rx_tid->frag_timer);
3280	spin_lock_bh(&ab->base_lock);
3281
3282	peer = ath12k_peer_find_by_id(ab, peer_id);
3283	if (!peer)
3284		goto err_frags_cleanup;
3285
3286	if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3287		goto err_frags_cleanup;
3288
3289	if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3290		goto err_frags_cleanup;
3291
3292	if (!defrag_skb)
3293		goto err_frags_cleanup;
3294
3295	if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3296		goto err_frags_cleanup;
3297
3298	ath12k_dp_rx_frags_cleanup(rx_tid, false);
3299	goto out_unlock;
3300
3301err_frags_cleanup:
3302	dev_kfree_skb_any(defrag_skb);
3303	ath12k_dp_rx_frags_cleanup(rx_tid, true);
3304out_unlock:
3305	spin_unlock_bh(&ab->base_lock);
3306	return ret;
3307}
3308
3309static int
3310ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3311			     bool drop, u32 cookie)
3312{
3313	struct ath12k_base *ab = ar->ab;
3314	struct sk_buff *msdu;
3315	struct ath12k_skb_rxcb *rxcb;
3316	struct hal_rx_desc *rx_desc;
3317	u16 msdu_len;
3318	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3319	struct ath12k_rx_desc_info *desc_info;
3320	u64 desc_va;
3321
3322	desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3323		   le32_to_cpu(desc->buf_va_lo));
3324	desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3325
3326	/* retry manual desc retrieval */
3327	if (!desc_info) {
3328		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3329		if (!desc_info) {
3330			ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3331			return -EINVAL;
3332		}
3333	}
3334
3335	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3336		ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3337
3338	msdu = desc_info->skb;
3339	desc_info->skb = NULL;
3340	spin_lock_bh(&ab->dp.rx_desc_lock);
3341	list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
3342	spin_unlock_bh(&ab->dp.rx_desc_lock);
3343
3344	rxcb = ATH12K_SKB_RXCB(msdu);
3345	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3346			 msdu->len + skb_tailroom(msdu),
3347			 DMA_FROM_DEVICE);
3348
3349	if (drop) {
3350		dev_kfree_skb_any(msdu);
3351		return 0;
3352	}
3353
3354	rcu_read_lock();
3355	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3356		dev_kfree_skb_any(msdu);
3357		goto exit;
3358	}
3359
3360	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3361		dev_kfree_skb_any(msdu);
3362		goto exit;
3363	}
3364
3365	rx_desc = (struct hal_rx_desc *)msdu->data;
3366	msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3367	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3368		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3369		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3370				sizeof(*rx_desc));
3371		dev_kfree_skb_any(msdu);
3372		goto exit;
3373	}
3374
3375	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3376
3377	if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3378		dev_kfree_skb_any(msdu);
3379		ath12k_dp_rx_link_desc_return(ar->ab, desc,
3380					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3381	}
3382exit:
3383	rcu_read_unlock();
3384	return 0;
3385}
3386
3387int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3388			     int budget)
3389{
3390	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3391	struct dp_link_desc_bank *link_desc_banks;
3392	enum hal_rx_buf_return_buf_manager rbm;
3393	struct hal_rx_msdu_link *link_desc_va;
3394	int tot_n_bufs_reaped, quota, ret, i;
3395	struct hal_reo_dest_ring *reo_desc;
3396	struct dp_rxdma_ring *rx_ring;
3397	struct dp_srng *reo_except;
3398	u32 desc_bank, num_msdus;
3399	struct hal_srng *srng;
3400	struct ath12k_dp *dp;
3401	int mac_id;
3402	struct ath12k *ar;
3403	dma_addr_t paddr;
3404	bool is_frag;
3405	bool drop = false;
3406	int pdev_id;
3407
3408	tot_n_bufs_reaped = 0;
3409	quota = budget;
3410
3411	dp = &ab->dp;
3412	reo_except = &dp->reo_except_ring;
3413	link_desc_banks = dp->link_desc_banks;
3414
3415	srng = &ab->hal.srng_list[reo_except->ring_id];
3416
3417	spin_lock_bh(&srng->lock);
3418
3419	ath12k_hal_srng_access_begin(ab, srng);
3420
3421	while (budget &&
3422	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3423		ab->soc_stats.err_ring_pkts++;
3424		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3425						    &desc_bank);
3426		if (ret) {
3427			ath12k_warn(ab, "failed to parse error reo desc %d\n",
3428				    ret);
3429			continue;
3430		}
3431#if defined(__linux__)
3432		link_desc_va = link_desc_banks[desc_bank].vaddr +
3433			       (paddr - link_desc_banks[desc_bank].paddr);
3434#elif defined(__FreeBSD__)
3435		link_desc_va = (void *)((uintptr_t)link_desc_banks[desc_bank].vaddr +
3436			       (paddr - link_desc_banks[desc_bank].paddr));
3437#endif
3438		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3439						 &rbm);
3440		if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
3441		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
3442		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
3443			ab->soc_stats.invalid_rbm++;
3444			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3445			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3446						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3447			continue;
3448		}
3449
3450		is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3451			     RX_MPDU_DESC_INFO0_FRAG_FLAG);
3452
3453		/* Process only rx fragments with one msdu per link desc below, and drop
3454		 * msdu's indicated due to error reasons.
3455		 */
3456		if (!is_frag || num_msdus > 1) {
3457			drop = true;
3458			/* Return the link desc back to wbm idle list */
3459			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3460						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3461		}
3462
3463		for (i = 0; i < num_msdus; i++) {
3464			mac_id = le32_get_bits(reo_desc->info0,
3465					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3466
3467			pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
3468			ar = ab->pdevs[pdev_id].ar;
3469
3470			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
3471							  msdu_cookies[i]))
3472				tot_n_bufs_reaped++;
3473		}
3474
3475		if (tot_n_bufs_reaped >= quota) {
3476			tot_n_bufs_reaped = quota;
3477			goto exit;
3478		}
3479
3480		budget = quota - tot_n_bufs_reaped;
3481	}
3482
3483exit:
3484	ath12k_hal_srng_access_end(ab, srng);
3485
3486	spin_unlock_bh(&srng->lock);
3487
3488	rx_ring = &dp->rx_refill_buf_ring;
3489
3490	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
3491				    ab->hw_params->hal_params->rx_buf_rbm, true);
3492
3493	return tot_n_bufs_reaped;
3494}
3495
3496static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3497					     int msdu_len,
3498					     struct sk_buff_head *msdu_list)
3499{
3500	struct sk_buff *skb, *tmp;
3501	struct ath12k_skb_rxcb *rxcb;
3502	int n_buffs;
3503
3504	n_buffs = DIV_ROUND_UP(msdu_len,
3505			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
3506
3507	skb_queue_walk_safe(msdu_list, skb, tmp) {
3508		rxcb = ATH12K_SKB_RXCB(skb);
3509		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3510		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3511			if (!n_buffs)
3512				break;
3513			__skb_unlink(skb, msdu_list);
3514			dev_kfree_skb_any(skb);
3515			n_buffs--;
3516		}
3517	}
3518}
3519
3520static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3521				      struct ieee80211_rx_status *status,
3522				      struct sk_buff_head *msdu_list)
3523{
3524	struct ath12k_base *ab = ar->ab;
3525	u16 msdu_len, peer_id;
3526	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3527	u8 l3pad_bytes;
3528	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3529	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3530
3531	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3532	peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
3533
3534	spin_lock(&ab->base_lock);
3535	if (!ath12k_peer_find_by_id(ab, peer_id)) {
3536		spin_unlock(&ab->base_lock);
3537		ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
3538			   peer_id);
3539		return -EINVAL;
3540	}
3541	spin_unlock(&ab->base_lock);
3542
3543	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3544		/* First buffer will be freed by the caller, so deduct it's length */
3545		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3546		ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3547		return -EINVAL;
3548	}
3549
3550	/* Even after cleaning up the sg buffers in the msdu list with above check
3551	 * any msdu received with continuation flag needs to be dropped as invalid.
3552	 * This protects against some random err frame with continuation flag.
3553	 */
3554	if (rxcb->is_continuation)
3555		return -EINVAL;
3556
3557	if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3558		ath12k_warn(ar->ab,
3559			    "msdu_done bit not set in null_q_des processing\n");
3560		__skb_queue_purge(msdu_list);
3561		return -EIO;
3562	}
3563
3564	/* Handle NULL queue descriptor violations arising out a missing
3565	 * REO queue for a given peer or a given TID. This typically
3566	 * may happen if a packet is received on a QOS enabled TID before the
3567	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3568	 * it may also happen for MC/BC frames if they are not routed to the
3569	 * non-QOS TID queue, in the absence of any other default TID queue.
3570	 * This error can show up both in a REO destination or WBM release ring.
3571	 */
3572
3573	if (rxcb->is_frag) {
3574		skb_pull(msdu, hal_rx_desc_sz);
3575	} else {
3576		l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3577
3578		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3579			return -EINVAL;
3580
3581		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3582		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3583	}
3584	ath12k_dp_rx_h_ppdu(ar, desc, status);
3585
3586	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
3587
3588	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
3589
3590	/* Please note that caller will having the access to msdu and completing
3591	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3592	 */
3593
3594	return 0;
3595}
3596
3597static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3598				   struct ieee80211_rx_status *status,
3599				   struct sk_buff_head *msdu_list)
3600{
3601	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3602	bool drop = false;
3603
3604	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3605
3606	switch (rxcb->err_code) {
3607	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3608		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3609			drop = true;
3610		break;
3611	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3612		/* TODO: Do not drop PN failed packets in the driver;
3613		 * instead, it is good to drop such packets in mac80211
3614		 * after incrementing the replay counters.
3615		 */
3616		fallthrough;
3617	default:
3618		/* TODO: Review other errors and process them to mac80211
3619		 * as appropriate.
3620		 */
3621		drop = true;
3622		break;
3623	}
3624
3625	return drop;
3626}
3627
3628static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3629					struct ieee80211_rx_status *status)
3630{
3631	struct ath12k_base *ab = ar->ab;
3632	u16 msdu_len;
3633	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3634	u8 l3pad_bytes;
3635	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3636	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3637
3638	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3639	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3640
3641	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3642	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3643	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3644	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3645
3646	ath12k_dp_rx_h_ppdu(ar, desc, status);
3647
3648	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3649			 RX_FLAG_DECRYPTED);
3650
3651	ath12k_dp_rx_h_undecap(ar, msdu, desc,
3652			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3653}
3654
3655static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3656				     struct ieee80211_rx_status *status)
3657{
3658	struct ath12k_base *ab = ar->ab;
3659	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3660	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3661	bool drop = false;
3662	u32 err_bitmap;
3663
3664	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3665
3666	switch (rxcb->err_code) {
3667	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3668	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3669		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3670		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3671			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3672			break;
3673		}
3674		fallthrough;
3675	default:
3676		/* TODO: Review other rxdma error code to check if anything is
3677		 * worth reporting to mac80211
3678		 */
3679		drop = true;
3680		break;
3681	}
3682
3683	return drop;
3684}
3685
3686static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3687				 struct napi_struct *napi,
3688				 struct sk_buff *msdu,
3689				 struct sk_buff_head *msdu_list)
3690{
3691	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3692	struct ieee80211_rx_status rxs = {0};
3693	bool drop = true;
3694
3695	switch (rxcb->err_rel_src) {
3696	case HAL_WBM_REL_SRC_MODULE_REO:
3697		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3698		break;
3699	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3700		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3701		break;
3702	default:
3703		/* msdu will get freed */
3704		break;
3705	}
3706
3707	if (drop) {
3708		dev_kfree_skb_any(msdu);
3709		return;
3710	}
3711
3712	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
3713}
3714
3715int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3716				 struct napi_struct *napi, int budget)
3717{
3718	struct ath12k *ar;
3719	struct ath12k_dp *dp = &ab->dp;
3720	struct dp_rxdma_ring *rx_ring;
3721	struct hal_rx_wbm_rel_info err_info;
3722	struct hal_srng *srng;
3723	struct sk_buff *msdu;
3724	struct sk_buff_head msdu_list[MAX_RADIOS];
3725	struct ath12k_skb_rxcb *rxcb;
3726	void *rx_desc;
3727	int mac_id;
3728	int num_buffs_reaped = 0;
3729	struct ath12k_rx_desc_info *desc_info;
3730	int ret, i;
3731
3732	for (i = 0; i < ab->num_radios; i++)
3733		__skb_queue_head_init(&msdu_list[i]);
3734
3735	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3736	rx_ring = &dp->rx_refill_buf_ring;
3737
3738	spin_lock_bh(&srng->lock);
3739
3740	ath12k_hal_srng_access_begin(ab, srng);
3741
3742	while (budget) {
3743		rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
3744		if (!rx_desc)
3745			break;
3746
3747		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3748		if (ret) {
3749			ath12k_warn(ab,
3750				    "failed to parse rx error in wbm_rel ring desc %d\n",
3751				    ret);
3752			continue;
3753		}
3754
3755		desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
3756
3757		/* retry manual desc retrieval if hw cc is not done */
3758		if (!desc_info) {
3759			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
3760			if (!desc_info) {
3761				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3762				continue;
3763			}
3764		}
3765
3766		/* FIXME: Extract mac id correctly. Since descs are not tied
3767		 * to mac, we can extract from vdev id in ring desc.
3768		 */
3769		mac_id = 0;
3770
3771		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3772			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
3773
3774		msdu = desc_info->skb;
3775		desc_info->skb = NULL;
3776
3777		spin_lock_bh(&dp->rx_desc_lock);
3778		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
3779		spin_unlock_bh(&dp->rx_desc_lock);
3780
3781		rxcb = ATH12K_SKB_RXCB(msdu);
3782		dma_unmap_single(ab->dev, rxcb->paddr,
3783				 msdu->len + skb_tailroom(msdu),
3784				 DMA_FROM_DEVICE);
3785
3786		num_buffs_reaped++;
3787
3788		if (!err_info.continuation)
3789			budget--;
3790
3791		if (err_info.push_reason !=
3792		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3793			dev_kfree_skb_any(msdu);
3794			continue;
3795		}
3796
3797		rxcb->err_rel_src = err_info.err_rel_src;
3798		rxcb->err_code = err_info.err_code;
3799		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3800		__skb_queue_tail(&msdu_list[mac_id], msdu);
3801
3802		rxcb->is_first_msdu = err_info.first_msdu;
3803		rxcb->is_last_msdu = err_info.last_msdu;
3804		rxcb->is_continuation = err_info.continuation;
3805	}
3806
3807	ath12k_hal_srng_access_end(ab, srng);
3808
3809	spin_unlock_bh(&srng->lock);
3810
3811	if (!num_buffs_reaped)
3812		goto done;
3813
3814	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
3815				    ab->hw_params->hal_params->rx_buf_rbm, true);
3816
3817	rcu_read_lock();
3818	for (i = 0; i <  ab->num_radios; i++) {
3819		if (!rcu_dereference(ab->pdevs_active[i])) {
3820			__skb_queue_purge(&msdu_list[i]);
3821			continue;
3822		}
3823
3824		ar = ab->pdevs[i].ar;
3825
3826		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3827			__skb_queue_purge(&msdu_list[i]);
3828			continue;
3829		}
3830
3831		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3832			ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3833	}
3834	rcu_read_unlock();
3835done:
3836	return num_buffs_reaped;
3837}
3838
3839void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
3840{
3841	struct ath12k_dp *dp = &ab->dp;
3842	struct hal_tlv_64_hdr *hdr;
3843	struct hal_srng *srng;
3844	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
3845	bool found = false;
3846	u16 tag;
3847	struct hal_reo_status reo_status;
3848
3849	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3850
3851	memset(&reo_status, 0, sizeof(reo_status));
3852
3853	spin_lock_bh(&srng->lock);
3854
3855	ath12k_hal_srng_access_begin(ab, srng);
3856
3857	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3858		tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
3859
3860		switch (tag) {
3861		case HAL_REO_GET_QUEUE_STATS_STATUS:
3862			ath12k_hal_reo_status_queue_stats(ab, hdr,
3863							  &reo_status);
3864			break;
3865		case HAL_REO_FLUSH_QUEUE_STATUS:
3866			ath12k_hal_reo_flush_queue_status(ab, hdr,
3867							  &reo_status);
3868			break;
3869		case HAL_REO_FLUSH_CACHE_STATUS:
3870			ath12k_hal_reo_flush_cache_status(ab, hdr,
3871							  &reo_status);
3872			break;
3873		case HAL_REO_UNBLOCK_CACHE_STATUS:
3874			ath12k_hal_reo_unblk_cache_status(ab, hdr,
3875							  &reo_status);
3876			break;
3877		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3878			ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
3879								 &reo_status);
3880			break;
3881		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3882			ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
3883								  &reo_status);
3884			break;
3885		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3886			ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
3887								  &reo_status);
3888			break;
3889		default:
3890			ath12k_warn(ab, "Unknown reo status type %d\n", tag);
3891			continue;
3892		}
3893
3894		spin_lock_bh(&dp->reo_cmd_lock);
3895		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3896			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3897				found = true;
3898				list_del(&cmd->list);
3899				break;
3900			}
3901		}
3902		spin_unlock_bh(&dp->reo_cmd_lock);
3903
3904		if (found) {
3905			cmd->handler(dp, (void *)&cmd->data,
3906				     reo_status.uniform_hdr.cmd_status);
3907			kfree(cmd);
3908		}
3909
3910		found = false;
3911	}
3912
3913	ath12k_hal_srng_access_end(ab, srng);
3914
3915	spin_unlock_bh(&srng->lock);
3916}
3917
3918void ath12k_dp_rx_free(struct ath12k_base *ab)
3919{
3920	struct ath12k_dp *dp = &ab->dp;
3921	int i;
3922
3923	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
3924
3925	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3926		if (ab->hw_params->rx_mac_buf_ring)
3927			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
3928	}
3929
3930	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
3931		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
3932
3933	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
3934	ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
3935
3936	ath12k_dp_rxdma_buf_free(ab);
3937}
3938
3939void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
3940{
3941	struct ath12k *ar = ab->pdevs[mac_id].ar;
3942
3943	ath12k_dp_rx_pdev_srng_free(ar);
3944}
3945
3946int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
3947{
3948	struct ath12k_dp *dp = &ab->dp;
3949	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3950	u32 ring_id;
3951	int ret;
3952	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3953
3954	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3955
3956	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3957	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3958	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3959					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3960					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3961	tlv_filter.offset_valid = true;
3962	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3963
3964	tlv_filter.rx_mpdu_start_offset =
3965			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3966	tlv_filter.rx_msdu_end_offset =
3967		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3968
3969	/* TODO: Selectively subscribe to required qwords within msdu_end
3970	 * and mpdu_start and setup the mask in below msg
3971	 * and modify the rx_desc struct
3972	 */
3973	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
3974					       HAL_RXDMA_BUF,
3975					       DP_RXDMA_REFILL_RING_SIZE,
3976					       &tlv_filter);
3977
3978	return ret;
3979}
3980
3981int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
3982{
3983	struct ath12k_dp *dp = &ab->dp;
3984	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3985	u32 ring_id;
3986	int ret;
3987	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3988	int i;
3989
3990	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3991
3992	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3993	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3994	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3995					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3996					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3997	tlv_filter.offset_valid = true;
3998	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3999
4000	tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
4001
4002	tlv_filter.rx_mpdu_start_offset =
4003			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
4004	tlv_filter.rx_msdu_end_offset =
4005		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
4006
4007	/* TODO: Selectively subscribe to required qwords within msdu_end
4008	 * and mpdu_start and setup the mask in below msg
4009	 * and modify the rx_desc struct
4010	 */
4011
4012	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4013		ring_id = dp->rx_mac_buf_ring[i].ring_id;
4014		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
4015						       HAL_RXDMA_BUF,
4016						       DP_RXDMA_REFILL_RING_SIZE,
4017						       &tlv_filter);
4018	}
4019
4020	return ret;
4021}
4022
4023int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
4024{
4025	struct ath12k_dp *dp = &ab->dp;
4026	u32 ring_id;
4027	int i, ret;
4028
4029	/* TODO: Need to verify the HTT setup for QCN9224 */
4030	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4031	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4032	if (ret) {
4033		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4034			    ret);
4035		return ret;
4036	}
4037
4038	if (ab->hw_params->rx_mac_buf_ring) {
4039		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4040			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4041			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4042							  i, HAL_RXDMA_BUF);
4043			if (ret) {
4044				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4045					    i, ret);
4046				return ret;
4047			}
4048		}
4049	}
4050
4051	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4052		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4053		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4054						  i, HAL_RXDMA_DST);
4055		if (ret) {
4056			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4057				    i, ret);
4058			return ret;
4059		}
4060	}
4061
4062	if (ab->hw_params->rxdma1_enable) {
4063		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4064		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4065						  0, HAL_RXDMA_MONITOR_BUF);
4066		if (ret) {
4067			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4068				    ret);
4069			return ret;
4070		}
4071
4072		ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
4073		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4074						  0, HAL_TX_MONITOR_BUF);
4075		if (ret) {
4076			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4077				    ret);
4078			return ret;
4079		}
4080	}
4081
4082	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4083	if (ret) {
4084		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4085		return ret;
4086	}
4087
4088	return 0;
4089}
4090
4091int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4092{
4093	struct ath12k_dp *dp = &ab->dp;
4094	int i, ret;
4095
4096	idr_init(&dp->rx_refill_buf_ring.bufs_idr);
4097	spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
4098
4099	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4100	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4101
4102	idr_init(&dp->tx_mon_buf_ring.bufs_idr);
4103	spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
4104
4105	ret = ath12k_dp_srng_setup(ab,
4106				   &dp->rx_refill_buf_ring.refill_buf_ring,
4107				   HAL_RXDMA_BUF, 0, 0,
4108				   DP_RXDMA_BUF_RING_SIZE);
4109	if (ret) {
4110		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4111		return ret;
4112	}
4113
4114	if (ab->hw_params->rx_mac_buf_ring) {
4115		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4116			ret = ath12k_dp_srng_setup(ab,
4117						   &dp->rx_mac_buf_ring[i],
4118						   HAL_RXDMA_BUF, 1,
4119						   i, 1024);
4120			if (ret) {
4121				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4122					    i);
4123				return ret;
4124			}
4125		}
4126	}
4127
4128	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4129		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4130					   HAL_RXDMA_DST, 0, i,
4131					   DP_RXDMA_ERR_DST_RING_SIZE);
4132		if (ret) {
4133			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4134			return ret;
4135		}
4136	}
4137
4138	if (ab->hw_params->rxdma1_enable) {
4139		ret = ath12k_dp_srng_setup(ab,
4140					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
4141					   HAL_RXDMA_MONITOR_BUF, 0, 0,
4142					   DP_RXDMA_MONITOR_BUF_RING_SIZE);
4143		if (ret) {
4144			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4145			return ret;
4146		}
4147
4148		ret = ath12k_dp_srng_setup(ab,
4149					   &dp->tx_mon_buf_ring.refill_buf_ring,
4150					   HAL_TX_MONITOR_BUF, 0, 0,
4151					   DP_TX_MONITOR_BUF_RING_SIZE);
4152		if (ret) {
4153			ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
4154			return ret;
4155		}
4156	}
4157
4158	ret = ath12k_dp_rxdma_buf_setup(ab);
4159	if (ret) {
4160		ath12k_warn(ab, "failed to setup rxdma ring\n");
4161		return ret;
4162	}
4163
4164	return 0;
4165}
4166
4167int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4168{
4169	struct ath12k *ar = ab->pdevs[mac_id].ar;
4170	struct ath12k_pdev_dp *dp = &ar->dp;
4171	u32 ring_id;
4172	int i;
4173	int ret;
4174
4175	if (!ab->hw_params->rxdma1_enable)
4176		goto out;
4177
4178	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4179	if (ret) {
4180		ath12k_warn(ab, "failed to setup rx srngs\n");
4181		return ret;
4182	}
4183
4184	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4185		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4186		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4187						  mac_id + i,
4188						  HAL_RXDMA_MONITOR_DST);
4189		if (ret) {
4190			ath12k_warn(ab,
4191				    "failed to configure rxdma_mon_dst_ring %d %d\n",
4192				    i, ret);
4193			return ret;
4194		}
4195
4196		ring_id = dp->tx_mon_dst_ring[i].ring_id;
4197		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4198						  mac_id + i,
4199						  HAL_TX_MONITOR_DST);
4200		if (ret) {
4201			ath12k_warn(ab,
4202				    "failed to configure tx_mon_dst_ring %d %d\n",
4203				    i, ret);
4204			return ret;
4205		}
4206	}
4207out:
4208	return 0;
4209}
4210
4211static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4212{
4213	struct ath12k_pdev_dp *dp = &ar->dp;
4214	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4215
4216	skb_queue_head_init(&pmon->rx_status_q);
4217
4218	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4219
4220	memset(&pmon->rx_mon_stats, 0,
4221	       sizeof(pmon->rx_mon_stats));
4222	return 0;
4223}
4224
4225int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4226{
4227	struct ath12k_pdev_dp *dp = &ar->dp;
4228	struct ath12k_mon_data *pmon = &dp->mon_data;
4229	int ret = 0;
4230
4231	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4232	if (ret) {
4233		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4234		return ret;
4235	}
4236
4237	/* if rxdma1_enable is false, no need to setup
4238	 * rxdma_mon_desc_ring.
4239	 */
4240	if (!ar->ab->hw_params->rxdma1_enable)
4241		return 0;
4242
4243	pmon->mon_last_linkdesc_paddr = 0;
4244	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4245	spin_lock_init(&pmon->mon_lock);
4246
4247	return 0;
4248}
4249
4250int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
4251{
4252	/* start reap timer */
4253	mod_timer(&ab->mon_reap_timer,
4254		  jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
4255
4256	return 0;
4257}
4258
4259int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
4260{
4261	int ret;
4262
4263	if (stop_timer)
4264		del_timer_sync(&ab->mon_reap_timer);
4265
4266	/* reap all the monitor related rings */
4267	ret = ath12k_dp_purge_mon_ring(ab);
4268	if (ret) {
4269		ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
4270		return ret;
4271	}
4272
4273	return 0;
4274}
4275