1145519Sdarrenr// SPDX-License-Identifier: BSD-3-Clause-Clear
2145510Sdarrenr/*
3145510Sdarrenr * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4170268Sdarrenr * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5145510Sdarrenr */
6145510Sdarrenr
7145510Sdarrenr#include "dp_rx.h"
8145510Sdarrenr#include "debug.h"
9145510Sdarrenr#include "hif.h"
10145510Sdarrenr
11145510Sdarrenrconst struct ce_attr ath12k_host_ce_config_qcn9274[] = {
12145510Sdarrenr	/* CE0: host->target HTC control and raw streams */
13145510Sdarrenr	{
14145510Sdarrenr		.flags = CE_ATTR_FLAGS,
15145510Sdarrenr		.src_nentries = 16,
16170268Sdarrenr		.src_sz_max = 2048,
17145510Sdarrenr		.dest_nentries = 0,
18145510Sdarrenr	},
19145510Sdarrenr
20145510Sdarrenr	/* CE1: target->host HTT + HTC control */
21145510Sdarrenr	{
22145510Sdarrenr		.flags = CE_ATTR_FLAGS,
23145510Sdarrenr		.src_nentries = 0,
24145510Sdarrenr		.src_sz_max = 2048,
25145510Sdarrenr		.dest_nentries = 512,
26145510Sdarrenr		.recv_cb = ath12k_htc_rx_completion_handler,
27145510Sdarrenr	},
28145510Sdarrenr
29145510Sdarrenr	/* CE2: target->host WMI */
30145510Sdarrenr	{
31145510Sdarrenr		.flags = CE_ATTR_FLAGS,
32145510Sdarrenr		.src_nentries = 0,
33145510Sdarrenr		.src_sz_max = 2048,
34145510Sdarrenr		.dest_nentries = 128,
35145510Sdarrenr		.recv_cb = ath12k_htc_rx_completion_handler,
36145510Sdarrenr	},
37145510Sdarrenr
38145510Sdarrenr	/* CE3: host->target WMI (mac0) */
39145510Sdarrenr	{
40145510Sdarrenr		.flags = CE_ATTR_FLAGS,
41145510Sdarrenr		.src_nentries = 32,
42145510Sdarrenr		.src_sz_max = 2048,
43145510Sdarrenr		.dest_nentries = 0,
44145510Sdarrenr	},
45145510Sdarrenr
46145510Sdarrenr	/* CE4: host->target HTT */
47145510Sdarrenr	{
48145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
49145510Sdarrenr		.src_nentries = 2048,
50145510Sdarrenr		.src_sz_max = 256,
51145510Sdarrenr		.dest_nentries = 0,
52145510Sdarrenr	},
53145510Sdarrenr
54145510Sdarrenr	/* CE5: target->host pktlog */
55145510Sdarrenr	{
56145510Sdarrenr		.flags = CE_ATTR_FLAGS,
57145510Sdarrenr		.src_nentries = 0,
58145510Sdarrenr		.src_sz_max = 2048,
59145510Sdarrenr		.dest_nentries = 512,
60145510Sdarrenr		.recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
61145510Sdarrenr	},
62145510Sdarrenr
63145510Sdarrenr	/* CE6: target autonomous hif_memcpy */
64145510Sdarrenr	{
65145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
66145510Sdarrenr		.src_nentries = 0,
67145510Sdarrenr		.src_sz_max = 0,
68145510Sdarrenr		.dest_nentries = 0,
69145510Sdarrenr	},
70145510Sdarrenr
71145510Sdarrenr	/* CE7: host->target WMI (mac1) */
72145510Sdarrenr	{
73145510Sdarrenr		.flags = CE_ATTR_FLAGS,
74145510Sdarrenr		.src_nentries = 32,
75145510Sdarrenr		.src_sz_max = 2048,
76145510Sdarrenr		.dest_nentries = 0,
77145510Sdarrenr	},
78145510Sdarrenr
79145510Sdarrenr	/* CE8: target autonomous hif_memcpy */
80145510Sdarrenr	{
81145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
82145510Sdarrenr		.src_nentries = 0,
83145510Sdarrenr		.src_sz_max = 0,
84145510Sdarrenr		.dest_nentries = 0,
85145510Sdarrenr	},
86145510Sdarrenr
87145510Sdarrenr	/* CE9: MHI */
88145510Sdarrenr	{
89145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
90145510Sdarrenr		.src_nentries = 0,
91145510Sdarrenr		.src_sz_max = 0,
92145510Sdarrenr		.dest_nentries = 0,
93145510Sdarrenr	},
94145510Sdarrenr
95145510Sdarrenr	/* CE10: MHI */
96145510Sdarrenr	{
97145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
98145510Sdarrenr		.src_nentries = 0,
99145510Sdarrenr		.src_sz_max = 0,
100145510Sdarrenr		.dest_nentries = 0,
101145510Sdarrenr	},
102145510Sdarrenr
103145510Sdarrenr	/* CE11: MHI */
104145510Sdarrenr	{
105145510Sdarrenr		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
106145510Sdarrenr		.src_nentries = 0,
107145510Sdarrenr		.src_sz_max = 0,
108145510Sdarrenr		.dest_nentries = 0,
109145510Sdarrenr	},
110145510Sdarrenr
111145510Sdarrenr	/* CE12: CV Prefetch */
112145510Sdarrenr	{
113		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
114		.src_nentries = 0,
115		.src_sz_max = 0,
116		.dest_nentries = 0,
117	},
118
119	/* CE13: CV Prefetch */
120	{
121		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
122		.src_nentries = 0,
123		.src_sz_max = 0,
124		.dest_nentries = 0,
125	},
126
127	/* CE14: target->host dbg log */
128	{
129		.flags = CE_ATTR_FLAGS,
130		.src_nentries = 0,
131		.src_sz_max = 2048,
132		.dest_nentries = 512,
133		.recv_cb = ath12k_htc_rx_completion_handler,
134	},
135
136	/* CE15: reserved for future use */
137	{
138		.flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
139		.src_nentries = 0,
140		.src_sz_max = 0,
141		.dest_nentries = 0,
142	},
143};
144
145const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
146	/* CE0: host->target HTC control and raw streams */
147	{
148		.flags = CE_ATTR_FLAGS,
149		.src_nentries = 16,
150		.src_sz_max = 2048,
151		.dest_nentries = 0,
152	},
153
154	/* CE1: target->host HTT + HTC control */
155	{
156		.flags = CE_ATTR_FLAGS,
157		.src_nentries = 0,
158		.src_sz_max = 2048,
159		.dest_nentries = 512,
160		.recv_cb = ath12k_htc_rx_completion_handler,
161	},
162
163	/* CE2: target->host WMI */
164	{
165		.flags = CE_ATTR_FLAGS,
166		.src_nentries = 0,
167		.src_sz_max = 2048,
168		.dest_nentries = 64,
169		.recv_cb = ath12k_htc_rx_completion_handler,
170	},
171
172	/* CE3: host->target WMI (mac0) */
173	{
174		.flags = CE_ATTR_FLAGS,
175		.src_nentries = 32,
176		.src_sz_max = 2048,
177		.dest_nentries = 0,
178	},
179
180	/* CE4: host->target HTT */
181	{
182		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
183		.src_nentries = 2048,
184		.src_sz_max = 256,
185		.dest_nentries = 0,
186	},
187
188	/* CE5: target->host pktlog */
189	{
190		.flags = CE_ATTR_FLAGS,
191		.src_nentries = 0,
192		.src_sz_max = 0,
193		.dest_nentries = 0,
194	},
195
196	/* CE6: target autonomous hif_memcpy */
197	{
198		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
199		.src_nentries = 0,
200		.src_sz_max = 0,
201		.dest_nentries = 0,
202	},
203
204	/* CE7: host->target WMI (mac1) */
205	{
206		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
207		.src_nentries = 0,
208		.src_sz_max = 2048,
209		.dest_nentries = 0,
210	},
211
212	/* CE8: target autonomous hif_memcpy */
213	{
214		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
215		.src_nentries = 0,
216		.src_sz_max = 0,
217		.dest_nentries = 0,
218	},
219
220};
221
222static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
223					 struct sk_buff *skb, dma_addr_t paddr)
224{
225	struct ath12k_base *ab = pipe->ab;
226	struct ath12k_ce_ring *ring = pipe->dest_ring;
227	struct hal_srng *srng;
228	unsigned int write_index;
229	unsigned int nentries_mask = ring->nentries_mask;
230	struct hal_ce_srng_dest_desc *desc;
231	int ret;
232
233	lockdep_assert_held(&ab->ce.ce_lock);
234
235	write_index = ring->write_index;
236
237	srng = &ab->hal.srng_list[ring->hal_ring_id];
238
239	spin_lock_bh(&srng->lock);
240
241	ath12k_hal_srng_access_begin(ab, srng);
242
243	if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
244		ret = -ENOSPC;
245		goto exit;
246	}
247
248	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
249	if (!desc) {
250		ret = -ENOSPC;
251		goto exit;
252	}
253
254	ath12k_hal_ce_dst_set_desc(desc, paddr);
255
256	ring->skb[write_index] = skb;
257	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
258	ring->write_index = write_index;
259
260	pipe->rx_buf_needed--;
261
262	ret = 0;
263exit:
264	ath12k_hal_srng_access_end(ab, srng);
265
266	spin_unlock_bh(&srng->lock);
267
268	return ret;
269}
270
271static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
272{
273	struct ath12k_base *ab = pipe->ab;
274	struct sk_buff *skb;
275	dma_addr_t paddr;
276	int ret = 0;
277
278	if (!(pipe->dest_ring || pipe->status_ring))
279		return 0;
280
281	spin_lock_bh(&ab->ce.ce_lock);
282	while (pipe->rx_buf_needed) {
283		skb = dev_alloc_skb(pipe->buf_sz);
284		if (!skb) {
285			ret = -ENOMEM;
286			goto exit;
287		}
288
289		WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
290
291		paddr = dma_map_single(ab->dev, skb->data,
292				       skb->len + skb_tailroom(skb),
293				       DMA_FROM_DEVICE);
294		if (unlikely(dma_mapping_error(ab->dev, paddr))) {
295			ath12k_warn(ab, "failed to dma map ce rx buf\n");
296			dev_kfree_skb_any(skb);
297			ret = -EIO;
298			goto exit;
299		}
300
301		ATH12K_SKB_RXCB(skb)->paddr = paddr;
302
303		ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
304		if (ret) {
305			ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
306			dma_unmap_single(ab->dev, paddr,
307					 skb->len + skb_tailroom(skb),
308					 DMA_FROM_DEVICE);
309			dev_kfree_skb_any(skb);
310			goto exit;
311		}
312	}
313
314exit:
315	spin_unlock_bh(&ab->ce.ce_lock);
316	return ret;
317}
318
319static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
320					 struct sk_buff **skb, int *nbytes)
321{
322	struct ath12k_base *ab = pipe->ab;
323	struct hal_ce_srng_dst_status_desc *desc;
324	struct hal_srng *srng;
325	unsigned int sw_index;
326	unsigned int nentries_mask;
327	int ret = 0;
328
329	spin_lock_bh(&ab->ce.ce_lock);
330
331	sw_index = pipe->dest_ring->sw_index;
332	nentries_mask = pipe->dest_ring->nentries_mask;
333
334	srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
335
336	spin_lock_bh(&srng->lock);
337
338	ath12k_hal_srng_access_begin(ab, srng);
339
340	desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
341	if (!desc) {
342		ret = -EIO;
343		goto err;
344	}
345
346	*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
347	if (*nbytes == 0) {
348		ret = -EIO;
349		goto err;
350	}
351
352	*skb = pipe->dest_ring->skb[sw_index];
353	pipe->dest_ring->skb[sw_index] = NULL;
354
355	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
356	pipe->dest_ring->sw_index = sw_index;
357
358	pipe->rx_buf_needed++;
359err:
360	ath12k_hal_srng_access_end(ab, srng);
361
362	spin_unlock_bh(&srng->lock);
363
364	spin_unlock_bh(&ab->ce.ce_lock);
365
366	return ret;
367}
368
369static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
370{
371	struct ath12k_base *ab = pipe->ab;
372	struct sk_buff *skb;
373	struct sk_buff_head list;
374	unsigned int nbytes, max_nbytes;
375	int ret;
376
377	__skb_queue_head_init(&list);
378	while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
379		max_nbytes = skb->len + skb_tailroom(skb);
380		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
381				 max_nbytes, DMA_FROM_DEVICE);
382
383		if (unlikely(max_nbytes < nbytes)) {
384			ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
385				    nbytes, max_nbytes);
386			dev_kfree_skb_any(skb);
387			continue;
388		}
389
390		skb_put(skb, nbytes);
391		__skb_queue_tail(&list, skb);
392	}
393
394	while ((skb = __skb_dequeue(&list))) {
395		ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
396			   pipe->pipe_num, skb->len);
397		pipe->recv_cb(ab, skb);
398	}
399
400	ret = ath12k_ce_rx_post_pipe(pipe);
401	if (ret && ret != -ENOSPC) {
402		ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
403			    pipe->pipe_num, ret);
404		mod_timer(&ab->rx_replenish_retry,
405			  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
406	}
407}
408
409static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
410{
411	struct ath12k_base *ab = pipe->ab;
412	struct hal_ce_srng_src_desc *desc;
413	struct hal_srng *srng;
414	unsigned int sw_index;
415	unsigned int nentries_mask;
416	struct sk_buff *skb;
417
418	spin_lock_bh(&ab->ce.ce_lock);
419
420	sw_index = pipe->src_ring->sw_index;
421	nentries_mask = pipe->src_ring->nentries_mask;
422
423	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
424
425	spin_lock_bh(&srng->lock);
426
427	ath12k_hal_srng_access_begin(ab, srng);
428
429	desc = ath12k_hal_srng_src_reap_next(ab, srng);
430	if (!desc) {
431		skb = ERR_PTR(-EIO);
432		goto err_unlock;
433	}
434
435	skb = pipe->src_ring->skb[sw_index];
436
437	pipe->src_ring->skb[sw_index] = NULL;
438
439	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
440	pipe->src_ring->sw_index = sw_index;
441
442err_unlock:
443	spin_unlock_bh(&srng->lock);
444
445	spin_unlock_bh(&ab->ce.ce_lock);
446
447	return skb;
448}
449
450static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
451{
452	struct ath12k_base *ab = pipe->ab;
453	struct sk_buff *skb;
454
455	while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
456		if (!skb)
457			continue;
458
459		dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
460				 DMA_TO_DEVICE);
461		dev_kfree_skb_any(skb);
462	}
463}
464
465static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
466						 struct hal_srng_params *ring_params)
467{
468	u32 msi_data_start;
469	u32 msi_data_count, msi_data_idx;
470	u32 msi_irq_start;
471	u32 addr_lo;
472	u32 addr_hi;
473	int ret;
474
475	ret = ath12k_hif_get_user_msi_vector(ab, "CE",
476					     &msi_data_count, &msi_data_start,
477					     &msi_irq_start);
478
479	if (ret)
480		return;
481
482	ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
483	ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
484
485	ring_params->msi_addr = addr_lo;
486	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
487	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
488	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
489}
490
491static int ath12k_ce_init_ring(struct ath12k_base *ab,
492			       struct ath12k_ce_ring *ce_ring,
493			       int ce_id, enum hal_ring_type type)
494{
495	struct hal_srng_params params = { 0 };
496	int ret;
497
498	params.ring_base_paddr = ce_ring->base_addr_ce_space;
499	params.ring_base_vaddr = ce_ring->base_addr_owner_space;
500	params.num_entries = ce_ring->nentries;
501
502	if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
503		ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
504
505	switch (type) {
506	case HAL_CE_SRC:
507		if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
508			params.intr_batch_cntr_thres_entries = 1;
509		break;
510	case HAL_CE_DST:
511		params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
512		if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
513			params.intr_timer_thres_us = 1024;
514			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
515			params.low_threshold = ce_ring->nentries - 3;
516		}
517		break;
518	case HAL_CE_DST_STATUS:
519		if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
520			params.intr_batch_cntr_thres_entries = 1;
521			params.intr_timer_thres_us = 0x1000;
522		}
523		break;
524	default:
525		ath12k_warn(ab, "Invalid CE ring type %d\n", type);
526		return -EINVAL;
527	}
528
529	/* TODO: Init other params needed by HAL to init the ring */
530
531	ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, &params);
532	if (ret < 0) {
533		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
534			    ret, ce_id);
535		return ret;
536	}
537
538	ce_ring->hal_ring_id = ret;
539
540	return 0;
541}
542
543static struct ath12k_ce_ring *
544ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
545{
546	struct ath12k_ce_ring *ce_ring;
547	dma_addr_t base_addr;
548
549	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
550	if (!ce_ring)
551		return ERR_PTR(-ENOMEM);
552
553	ce_ring->nentries = nentries;
554	ce_ring->nentries_mask = nentries - 1;
555
556	/* Legacy platforms that do not support cache
557	 * coherent DMA are unsupported
558	 */
559	ce_ring->base_addr_owner_space_unaligned =
560		dma_alloc_coherent(ab->dev,
561				   nentries * desc_sz + CE_DESC_RING_ALIGN,
562				   &base_addr, GFP_KERNEL);
563	if (!ce_ring->base_addr_owner_space_unaligned) {
564		kfree(ce_ring);
565		return ERR_PTR(-ENOMEM);
566	}
567
568	ce_ring->base_addr_ce_space_unaligned = base_addr;
569
570	ce_ring->base_addr_owner_space =
571		PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
572			  CE_DESC_RING_ALIGN);
573
574	ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
575					    CE_DESC_RING_ALIGN);
576
577	return ce_ring;
578}
579
580static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
581{
582	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
583	const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
584	struct ath12k_ce_ring *ring;
585	int nentries;
586	int desc_sz;
587
588	pipe->attr_flags = attr->flags;
589
590	if (attr->src_nentries) {
591		pipe->send_cb = ath12k_ce_send_done_cb;
592		nentries = roundup_pow_of_two(attr->src_nentries);
593		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
594		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
595		if (IS_ERR(ring))
596			return PTR_ERR(ring);
597		pipe->src_ring = ring;
598	}
599
600	if (attr->dest_nentries) {
601		pipe->recv_cb = attr->recv_cb;
602		nentries = roundup_pow_of_two(attr->dest_nentries);
603		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
604		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
605		if (IS_ERR(ring))
606			return PTR_ERR(ring);
607		pipe->dest_ring = ring;
608
609		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
610		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
611		if (IS_ERR(ring))
612			return PTR_ERR(ring);
613		pipe->status_ring = ring;
614	}
615
616	return 0;
617}
618
619void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
620{
621	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
622
623	if (pipe->send_cb)
624		pipe->send_cb(pipe);
625
626	if (pipe->recv_cb)
627		ath12k_ce_recv_process_cb(pipe);
628}
629
630void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
631{
632	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
633
634	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
635		pipe->send_cb(pipe);
636}
637
638int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
639		   u16 transfer_id)
640{
641	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
642	struct hal_ce_srng_src_desc *desc;
643	struct hal_srng *srng;
644	unsigned int write_index, sw_index;
645	unsigned int nentries_mask;
646	int ret = 0;
647	u8 byte_swap_data = 0;
648	int num_used;
649
650	/* Check if some entries could be regained by handling tx completion if
651	 * the CE has interrupts disabled and the used entries is more than the
652	 * defined usage threshold.
653	 */
654	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
655		spin_lock_bh(&ab->ce.ce_lock);
656		write_index = pipe->src_ring->write_index;
657
658		sw_index = pipe->src_ring->sw_index;
659
660		if (write_index >= sw_index)
661			num_used = write_index - sw_index;
662		else
663			num_used = pipe->src_ring->nentries - sw_index +
664				   write_index;
665
666		spin_unlock_bh(&ab->ce.ce_lock);
667
668		if (num_used > ATH12K_CE_USAGE_THRESHOLD)
669			ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
670	}
671
672	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
673		return -ESHUTDOWN;
674
675	spin_lock_bh(&ab->ce.ce_lock);
676
677	write_index = pipe->src_ring->write_index;
678	nentries_mask = pipe->src_ring->nentries_mask;
679
680	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
681
682	spin_lock_bh(&srng->lock);
683
684	ath12k_hal_srng_access_begin(ab, srng);
685
686	if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
687		ath12k_hal_srng_access_end(ab, srng);
688		ret = -ENOBUFS;
689		goto unlock;
690	}
691
692	desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
693	if (!desc) {
694		ath12k_hal_srng_access_end(ab, srng);
695		ret = -ENOBUFS;
696		goto unlock;
697	}
698
699	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
700		byte_swap_data = 1;
701
702	ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
703				   skb->len, transfer_id, byte_swap_data);
704
705	pipe->src_ring->skb[write_index] = skb;
706	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
707						       write_index);
708
709	ath12k_hal_srng_access_end(ab, srng);
710
711unlock:
712	spin_unlock_bh(&srng->lock);
713
714	spin_unlock_bh(&ab->ce.ce_lock);
715
716	return ret;
717}
718
719static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
720{
721	struct ath12k_base *ab = pipe->ab;
722	struct ath12k_ce_ring *ring = pipe->dest_ring;
723	struct sk_buff *skb;
724	int i;
725
726	if (!(ring && pipe->buf_sz))
727		return;
728
729	for (i = 0; i < ring->nentries; i++) {
730		skb = ring->skb[i];
731		if (!skb)
732			continue;
733
734		ring->skb[i] = NULL;
735		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
736				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
737		dev_kfree_skb_any(skb);
738	}
739}
740
741void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
742{
743	struct ath12k_ce_pipe *pipe;
744	int pipe_num;
745
746	for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
747		pipe = &ab->ce.ce_pipe[pipe_num];
748		ath12k_ce_rx_pipe_cleanup(pipe);
749
750		/* Cleanup any src CE's which have interrupts disabled */
751		ath12k_ce_poll_send_completed(ab, pipe_num);
752
753		/* NOTE: Should we also clean up tx buffer in all pipes? */
754	}
755}
756
757void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
758{
759	struct ath12k_ce_pipe *pipe;
760	int i;
761	int ret;
762
763	for (i = 0; i < ab->hw_params->ce_count; i++) {
764		pipe = &ab->ce.ce_pipe[i];
765		ret = ath12k_ce_rx_post_pipe(pipe);
766		if (ret) {
767			if (ret == -ENOSPC)
768				continue;
769
770			ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
771				    i, ret);
772			mod_timer(&ab->rx_replenish_retry,
773				  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
774
775			return;
776		}
777	}
778}
779
780void ath12k_ce_rx_replenish_retry(struct timer_list *t)
781{
782	struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
783
784	ath12k_ce_rx_post_buf(ab);
785}
786
787static void ath12k_ce_shadow_config(struct ath12k_base *ab)
788{
789	int i;
790
791	for (i = 0; i < ab->hw_params->ce_count; i++) {
792		if (ab->hw_params->host_ce_config[i].src_nentries)
793			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
794
795		if (ab->hw_params->host_ce_config[i].dest_nentries) {
796			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
797			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
798		}
799	}
800}
801
802void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
803				 u32 **shadow_cfg, u32 *shadow_cfg_len)
804{
805	if (!ab->hw_params->supports_shadow_regs)
806		return;
807
808	ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
809
810	/* shadow is already configured */
811	if (*shadow_cfg_len)
812		return;
813
814	/* shadow isn't configured yet, configure now.
815	 * non-CE srngs are configured firstly, then
816	 * all CE srngs.
817	 */
818	ath12k_hal_srng_shadow_config(ab);
819	ath12k_ce_shadow_config(ab);
820
821	/* get the shadow configuration */
822	ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
823}
824
825int ath12k_ce_init_pipes(struct ath12k_base *ab)
826{
827	struct ath12k_ce_pipe *pipe;
828	int i;
829	int ret;
830
831	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
832				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
833
834	for (i = 0; i < ab->hw_params->ce_count; i++) {
835		pipe = &ab->ce.ce_pipe[i];
836
837		if (pipe->src_ring) {
838			ret = ath12k_ce_init_ring(ab, pipe->src_ring, i,
839						  HAL_CE_SRC);
840			if (ret) {
841				ath12k_warn(ab, "failed to init src ring: %d\n",
842					    ret);
843				/* Should we clear any partial init */
844				return ret;
845			}
846
847			pipe->src_ring->write_index = 0;
848			pipe->src_ring->sw_index = 0;
849		}
850
851		if (pipe->dest_ring) {
852			ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i,
853						  HAL_CE_DST);
854			if (ret) {
855				ath12k_warn(ab, "failed to init dest ring: %d\n",
856					    ret);
857				/* Should we clear any partial init */
858				return ret;
859			}
860
861			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
862					      pipe->dest_ring->nentries - 2 : 0;
863
864			pipe->dest_ring->write_index = 0;
865			pipe->dest_ring->sw_index = 0;
866		}
867
868		if (pipe->status_ring) {
869			ret = ath12k_ce_init_ring(ab, pipe->status_ring, i,
870						  HAL_CE_DST_STATUS);
871			if (ret) {
872				ath12k_warn(ab, "failed to init dest status ing: %d\n",
873					    ret);
874				/* Should we clear any partial init */
875				return ret;
876			}
877
878			pipe->status_ring->write_index = 0;
879			pipe->status_ring->sw_index = 0;
880		}
881	}
882
883	return 0;
884}
885
886void ath12k_ce_free_pipes(struct ath12k_base *ab)
887{
888	struct ath12k_ce_pipe *pipe;
889	int desc_sz;
890	int i;
891
892	for (i = 0; i < ab->hw_params->ce_count; i++) {
893		pipe = &ab->ce.ce_pipe[i];
894
895		if (pipe->src_ring) {
896			desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
897			dma_free_coherent(ab->dev,
898					  pipe->src_ring->nentries * desc_sz +
899					  CE_DESC_RING_ALIGN,
900					  pipe->src_ring->base_addr_owner_space,
901					  pipe->src_ring->base_addr_ce_space);
902			kfree(pipe->src_ring);
903			pipe->src_ring = NULL;
904		}
905
906		if (pipe->dest_ring) {
907			desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
908			dma_free_coherent(ab->dev,
909					  pipe->dest_ring->nentries * desc_sz +
910					  CE_DESC_RING_ALIGN,
911					  pipe->dest_ring->base_addr_owner_space,
912					  pipe->dest_ring->base_addr_ce_space);
913			kfree(pipe->dest_ring);
914			pipe->dest_ring = NULL;
915		}
916
917		if (pipe->status_ring) {
918			desc_sz =
919			  ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
920			dma_free_coherent(ab->dev,
921					  pipe->status_ring->nentries * desc_sz +
922					  CE_DESC_RING_ALIGN,
923					  pipe->status_ring->base_addr_owner_space,
924					  pipe->status_ring->base_addr_ce_space);
925			kfree(pipe->status_ring);
926			pipe->status_ring = NULL;
927		}
928	}
929}
930
931int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
932{
933	struct ath12k_ce_pipe *pipe;
934	int i;
935	int ret;
936	const struct ce_attr *attr;
937
938	spin_lock_init(&ab->ce.ce_lock);
939
940	for (i = 0; i < ab->hw_params->ce_count; i++) {
941		attr = &ab->hw_params->host_ce_config[i];
942		pipe = &ab->ce.ce_pipe[i];
943		pipe->pipe_num = i;
944		pipe->ab = ab;
945		pipe->buf_sz = attr->src_sz_max;
946
947		ret = ath12k_ce_alloc_pipe(ab, i);
948		if (ret) {
949			/* Free any partial successful allocation */
950			ath12k_ce_free_pipes(ab);
951			return ret;
952		}
953	}
954
955	return 0;
956}
957
958int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id)
959{
960	if (ce_id >= ab->hw_params->ce_count)
961		return -EINVAL;
962
963	return ab->hw_params->host_ce_config[ce_id].flags;
964}
965