1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include "core.h"
8#include "debug.h"
9
10#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
11
12int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
13{
14	u32 *temp;
15	int idx;
16
17	size = size >> 2;
18
19	for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
20		if (*temp == ATH11K_DB_MAGIC_VALUE)
21			return -EINVAL;
22	}
23
24	return 0;
25}
26
27static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
28					   void *buffer, u32 size)
29{
30	/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
31	 * and the variable size is expected to be the number of u32 values
32	 * to be stored, not the number of bytes.
33	 */
34	size = size / sizeof(u32);
35
36	memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
37}
38
39static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
40					struct ath11k_dbring *ring,
41					struct ath11k_dbring_element *buff,
42					enum wmi_direct_buffer_module id)
43{
44	struct ath11k_base *ab = ar->ab;
45	struct hal_srng *srng;
46	dma_addr_t paddr;
47	void *ptr_aligned, *ptr_unaligned, *desc;
48	int ret;
49	int buf_id;
50	u32 cookie;
51
52	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
53
54	lockdep_assert_held(&srng->lock);
55
56	ath11k_hal_srng_access_begin(ab, srng);
57
58	ptr_unaligned = buff->payload;
59	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
60	ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
61	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
62			       DMA_FROM_DEVICE);
63
64	ret = dma_mapping_error(ab->dev, paddr);
65	if (ret)
66		goto err;
67
68	spin_lock_bh(&ring->idr_lock);
69	buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
70	spin_unlock_bh(&ring->idr_lock);
71	if (buf_id < 0) {
72		ret = -ENOBUFS;
73		goto err_dma_unmap;
74	}
75
76	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
77	if (!desc) {
78		ret = -ENOENT;
79		goto err_idr_remove;
80	}
81
82	buff->paddr = paddr;
83
84	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
85		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
86
87	ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
88
89	ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
90	ath11k_hal_srng_access_end(ab, srng);
91
92	return 0;
93
94err_idr_remove:
95	spin_lock_bh(&ring->idr_lock);
96	idr_remove(&ring->bufs_idr, buf_id);
97	spin_unlock_bh(&ring->idr_lock);
98err_dma_unmap:
99	dma_unmap_single(ab->dev, paddr, ring->buf_sz,
100			 DMA_FROM_DEVICE);
101err:
102	ath11k_hal_srng_access_end(ab, srng);
103	return ret;
104}
105
106static int ath11k_dbring_fill_bufs(struct ath11k *ar,
107				   struct ath11k_dbring *ring,
108				   enum wmi_direct_buffer_module id)
109{
110	struct ath11k_dbring_element *buff;
111	struct hal_srng *srng;
112	int num_remain, req_entries, num_free;
113	u32 align;
114	int size, ret;
115
116	srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
117
118	spin_lock_bh(&srng->lock);
119
120	num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
121	req_entries = min(num_free, ring->bufs_max);
122	num_remain = req_entries;
123	align = ring->buf_align;
124	size = ring->buf_sz + align - 1;
125
126	while (num_remain > 0) {
127		buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
128		if (!buff)
129			break;
130
131		buff->payload = kzalloc(size, GFP_ATOMIC);
132		if (!buff->payload) {
133			kfree(buff);
134			break;
135		}
136		ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
137		if (ret) {
138			ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
139				    num_remain, req_entries);
140			kfree(buff->payload);
141			kfree(buff);
142			break;
143		}
144		num_remain--;
145	}
146
147	spin_unlock_bh(&srng->lock);
148
149	return num_remain;
150}
151
152int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
153				struct ath11k_dbring *ring,
154				enum wmi_direct_buffer_module id)
155{
156	struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
157	int ret;
158
159	if (id >= WMI_DIRECT_BUF_MAX)
160		return -EINVAL;
161
162	param.pdev_id		= DP_SW2HW_MACID(ring->pdev_id);
163	param.module_id		= id;
164	param.base_paddr_lo	= lower_32_bits(ring->refill_srng.paddr);
165	param.base_paddr_hi	= upper_32_bits(ring->refill_srng.paddr);
166	param.head_idx_paddr_lo	= lower_32_bits(ring->hp_addr);
167	param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
168	param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
169	param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
170	param.num_elems		= ring->bufs_max;
171	param.buf_size		= ring->buf_sz;
172	param.num_resp_per_event = ring->num_resp_per_event;
173	param.event_timeout_ms	= ring->event_timeout_ms;
174
175	ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
176	if (ret) {
177		ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
178		return ret;
179	}
180
181	return 0;
182}
183
184int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
185			  u32 num_resp_per_event, u32 event_timeout_ms,
186			  int (*handler)(struct ath11k *,
187					 struct ath11k_dbring_data *))
188{
189	if (WARN_ON(!ring))
190		return -EINVAL;
191
192	ring->num_resp_per_event = num_resp_per_event;
193	ring->event_timeout_ms = event_timeout_ms;
194	ring->handler = handler;
195
196	return 0;
197}
198
199int ath11k_dbring_buf_setup(struct ath11k *ar,
200			    struct ath11k_dbring *ring,
201			    struct ath11k_dbring_cap *db_cap)
202{
203	struct ath11k_base *ab = ar->ab;
204	struct hal_srng *srng;
205	int ret;
206
207	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
208	ring->bufs_max = ring->refill_srng.size /
209		ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
210
211	ring->buf_sz = db_cap->min_buf_sz;
212	ring->buf_align = db_cap->min_buf_align;
213	ring->pdev_id = db_cap->pdev_id;
214	ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
215	ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
216
217	ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
218
219	return ret;
220}
221
222int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
223			     int ring_num, int num_entries)
224{
225	int ret;
226
227	ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
228				   ring_num, ar->pdev_idx, num_entries);
229	if (ret < 0) {
230		ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
231			    ret, ring_num);
232		goto err;
233	}
234
235	return 0;
236err:
237	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
238	return ret;
239}
240
241int ath11k_dbring_get_cap(struct ath11k_base *ab,
242			  u8 pdev_idx,
243			  enum wmi_direct_buffer_module id,
244			  struct ath11k_dbring_cap *db_cap)
245{
246	int i;
247
248	if (!ab->num_db_cap || !ab->db_caps)
249		return -ENOENT;
250
251	if (id >= WMI_DIRECT_BUF_MAX)
252		return -EINVAL;
253
254	for (i = 0; i < ab->num_db_cap; i++) {
255		if (pdev_idx == ab->db_caps[i].pdev_id &&
256		    id == ab->db_caps[i].id) {
257			*db_cap = ab->db_caps[i];
258
259			return 0;
260		}
261	}
262
263	return -ENOENT;
264}
265
266int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
267				       struct ath11k_dbring_buf_release_event *ev)
268{
269	struct ath11k_dbring *ring;
270	struct hal_srng *srng;
271	struct ath11k *ar;
272	struct ath11k_dbring_element *buff;
273	struct ath11k_dbring_data handler_data;
274	struct ath11k_buffer_addr desc;
275	u8 *vaddr_unalign;
276	u32 num_entry, num_buff_reaped;
277	u8 pdev_idx, rbm, module_id;
278	u32 cookie;
279	int buf_id;
280	int size;
281	dma_addr_t paddr;
282	int ret = 0;
283
284	pdev_idx = ev->fixed.pdev_id;
285	module_id = ev->fixed.module_id;
286
287	if (pdev_idx >= ab->num_radios) {
288		ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
289		return -EINVAL;
290	}
291
292	if (ev->fixed.num_buf_release_entry !=
293	    ev->fixed.num_meta_data_entry) {
294		ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
295			    ev->fixed.num_buf_release_entry,
296			    ev->fixed.num_meta_data_entry);
297		return -EINVAL;
298	}
299
300	ar = ab->pdevs[pdev_idx].ar;
301
302	rcu_read_lock();
303	if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
304		ret = -EINVAL;
305		goto rcu_unlock;
306	}
307
308	switch (ev->fixed.module_id) {
309	case WMI_DIRECT_BUF_SPECTRAL:
310		ring = ath11k_spectral_get_dbring(ar);
311		break;
312	default:
313		ring = NULL;
314		ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
315			    ev->fixed.module_id);
316		break;
317	}
318
319	if (!ring) {
320		ret = -EINVAL;
321		goto rcu_unlock;
322	}
323
324	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
325	num_entry = ev->fixed.num_buf_release_entry;
326	size = ring->buf_sz + ring->buf_align - 1;
327	num_buff_reaped = 0;
328
329	spin_lock_bh(&srng->lock);
330
331	while (num_buff_reaped < num_entry) {
332		desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
333		desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
334		handler_data.meta = ev->meta_data[num_buff_reaped];
335
336		num_buff_reaped++;
337
338		ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
339
340		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
341
342		spin_lock_bh(&ring->idr_lock);
343		buff = idr_find(&ring->bufs_idr, buf_id);
344		if (!buff) {
345			spin_unlock_bh(&ring->idr_lock);
346			continue;
347		}
348		idr_remove(&ring->bufs_idr, buf_id);
349		spin_unlock_bh(&ring->idr_lock);
350
351		dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
352				 DMA_FROM_DEVICE);
353
354		ath11k_debugfs_add_dbring_entry(ar, module_id,
355						ATH11K_DBG_DBR_EVENT_RX, srng);
356
357		if (ring->handler) {
358			vaddr_unalign = buff->payload;
359			handler_data.data = PTR_ALIGN(vaddr_unalign,
360						      ring->buf_align);
361			handler_data.data_sz = ring->buf_sz;
362
363			ring->handler(ar, &handler_data);
364		}
365
366		buff->paddr = 0;
367		memset(buff->payload, 0, size);
368		ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
369	}
370
371	spin_unlock_bh(&srng->lock);
372
373rcu_unlock:
374	rcu_read_unlock();
375
376	return ret;
377}
378
379void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
380{
381	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
382}
383
384void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
385{
386	struct ath11k_dbring_element *buff;
387	int buf_id;
388
389	spin_lock_bh(&ring->idr_lock);
390	idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
391		idr_remove(&ring->bufs_idr, buf_id);
392		dma_unmap_single(ar->ab->dev, buff->paddr,
393				 ring->buf_sz, DMA_FROM_DEVICE);
394		kfree(buff->payload);
395		kfree(buff);
396	}
397
398	idr_destroy(&ring->bufs_idr);
399	spin_unlock_bh(&ring->idr_lock);
400}
401