1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* Copyright (c) 2020 Marvell International Ltd. */
3
4#include <linux/dma-mapping.h>
5#include <linux/qed/qed_chain.h>
6#include <linux/vmalloc.h>
7
8#include "qed_dev_api.h"
9
10static void qed_chain_init(struct qed_chain *chain,
11			   const struct qed_chain_init_params *params,
12			   u32 page_cnt)
13{
14	memset(chain, 0, sizeof(*chain));
15
16	chain->elem_size = params->elem_size;
17	chain->intended_use = params->intended_use;
18	chain->mode = params->mode;
19	chain->cnt_type = params->cnt_type;
20
21	chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
22					      params->page_size);
23	chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
24						       params->page_size,
25						       params->mode);
26	chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
27						       params->mode);
28
29	chain->elem_per_page_mask = chain->elem_per_page - 1;
30	chain->next_page_mask = chain->usable_per_page &
31				chain->elem_per_page_mask;
32
33	chain->page_size = params->page_size;
34	chain->page_cnt = page_cnt;
35	chain->capacity = chain->usable_per_page * page_cnt;
36	chain->size = chain->elem_per_page * page_cnt;
37
38	if (params->ext_pbl_virt) {
39		chain->pbl_sp.table_virt = params->ext_pbl_virt;
40		chain->pbl_sp.table_phys = params->ext_pbl_phys;
41
42		chain->b_external_pbl = true;
43	}
44}
45
46static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
47					 void *virt_curr, void *virt_next,
48					 dma_addr_t phys_next)
49{
50	struct qed_chain_next *next;
51	u32 size;
52
53	size = chain->elem_size * chain->usable_per_page;
54	next = virt_curr + size;
55
56	DMA_REGPAIR_LE(next->next_phys, phys_next);
57	next->next_virt = virt_next;
58}
59
60static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
61			       dma_addr_t phys_addr)
62{
63	chain->p_virt_addr = virt_addr;
64	chain->p_phys_addr = phys_addr;
65}
66
67static void qed_chain_free_next_ptr(struct qed_dev *cdev,
68				    struct qed_chain *chain)
69{
70	struct device *dev = &cdev->pdev->dev;
71	struct qed_chain_next *next;
72	dma_addr_t phys, phys_next;
73	void *virt, *virt_next;
74	u32 size, i;
75
76	size = chain->elem_size * chain->usable_per_page;
77	virt = chain->p_virt_addr;
78	phys = chain->p_phys_addr;
79
80	for (i = 0; i < chain->page_cnt; i++) {
81		if (!virt)
82			break;
83
84		next = virt + size;
85		virt_next = next->next_virt;
86		phys_next = HILO_DMA_REGPAIR(next->next_phys);
87
88		dma_free_coherent(dev, chain->page_size, virt, phys);
89
90		virt = virt_next;
91		phys = phys_next;
92	}
93}
94
95static void qed_chain_free_single(struct qed_dev *cdev,
96				  struct qed_chain *chain)
97{
98	if (!chain->p_virt_addr)
99		return;
100
101	dma_free_coherent(&cdev->pdev->dev, chain->page_size,
102			  chain->p_virt_addr, chain->p_phys_addr);
103}
104
105static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
106{
107	struct device *dev = &cdev->pdev->dev;
108	struct addr_tbl_entry *entry;
109	u32 i;
110
111	if (!chain->pbl.pp_addr_tbl)
112		return;
113
114	for (i = 0; i < chain->page_cnt; i++) {
115		entry = chain->pbl.pp_addr_tbl + i;
116		if (!entry->virt_addr)
117			break;
118
119		dma_free_coherent(dev, chain->page_size, entry->virt_addr,
120				  entry->dma_map);
121	}
122
123	if (!chain->b_external_pbl)
124		dma_free_coherent(dev, chain->pbl_sp.table_size,
125				  chain->pbl_sp.table_virt,
126				  chain->pbl_sp.table_phys);
127
128	vfree(chain->pbl.pp_addr_tbl);
129	chain->pbl.pp_addr_tbl = NULL;
130}
131
132/**
133 * qed_chain_free() - Free chain DMA memory.
134 *
135 * @cdev: Main device structure.
136 * @chain: Chain to free.
137 */
138void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
139{
140	switch (chain->mode) {
141	case QED_CHAIN_MODE_NEXT_PTR:
142		qed_chain_free_next_ptr(cdev, chain);
143		break;
144	case QED_CHAIN_MODE_SINGLE:
145		qed_chain_free_single(cdev, chain);
146		break;
147	case QED_CHAIN_MODE_PBL:
148		qed_chain_free_pbl(cdev, chain);
149		break;
150	default:
151		return;
152	}
153
154	qed_chain_init_mem(chain, NULL, 0);
155}
156
157static int
158qed_chain_alloc_sanity_check(struct qed_dev *cdev,
159			     const struct qed_chain_init_params *params,
160			     u32 page_cnt)
161{
162	u64 chain_size;
163
164	chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
165	chain_size *= page_cnt;
166
167	if (!chain_size)
168		return -EINVAL;
169
170	/* The actual chain size can be larger than the maximal possible value
171	 * after rounding up the requested elements number to pages, and after
172	 * taking into account the unusuable elements (next-ptr elements).
173	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
174	 * size/capacity fields are of u32 type.
175	 */
176	switch (params->cnt_type) {
177	case QED_CHAIN_CNT_TYPE_U16:
178		if (chain_size > U16_MAX + 1)
179			break;
180
181		return 0;
182	case QED_CHAIN_CNT_TYPE_U32:
183		if (chain_size > U32_MAX)
184			break;
185
186		return 0;
187	default:
188		return -EINVAL;
189	}
190
191	DP_NOTICE(cdev,
192		  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
193		  chain_size);
194
195	return -EINVAL;
196}
197
198static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
199				    struct qed_chain *chain)
200{
201	struct device *dev = &cdev->pdev->dev;
202	void *virt, *virt_prev = NULL;
203	dma_addr_t phys;
204	u32 i;
205
206	for (i = 0; i < chain->page_cnt; i++) {
207		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
208					  GFP_KERNEL);
209		if (!virt)
210			return -ENOMEM;
211
212		if (i == 0) {
213			qed_chain_init_mem(chain, virt, phys);
214			qed_chain_reset(chain);
215		} else {
216			qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
217						     phys);
218		}
219
220		virt_prev = virt;
221	}
222
223	/* Last page's next element should point to the beginning of the
224	 * chain.
225	 */
226	qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
227				     chain->p_phys_addr);
228
229	return 0;
230}
231
232static int qed_chain_alloc_single(struct qed_dev *cdev,
233				  struct qed_chain *chain)
234{
235	dma_addr_t phys;
236	void *virt;
237
238	virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
239				  &phys, GFP_KERNEL);
240	if (!virt)
241		return -ENOMEM;
242
243	qed_chain_init_mem(chain, virt, phys);
244	qed_chain_reset(chain);
245
246	return 0;
247}
248
249static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
250{
251	struct device *dev = &cdev->pdev->dev;
252	struct addr_tbl_entry *addr_tbl;
253	dma_addr_t phys, pbl_phys;
254	__le64 *pbl_virt;
255	u32 page_cnt, i;
256	size_t size;
257	void *virt;
258
259	page_cnt = chain->page_cnt;
260
261	size = array_size(page_cnt, sizeof(*addr_tbl));
262	if (unlikely(size == SIZE_MAX))
263		return -EOVERFLOW;
264
265	addr_tbl = vzalloc(size);
266	if (!addr_tbl)
267		return -ENOMEM;
268
269	chain->pbl.pp_addr_tbl = addr_tbl;
270
271	if (chain->b_external_pbl) {
272		pbl_virt = chain->pbl_sp.table_virt;
273		goto alloc_pages;
274	}
275
276	size = array_size(page_cnt, sizeof(*pbl_virt));
277	if (unlikely(size == SIZE_MAX))
278		return -EOVERFLOW;
279
280	pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
281	if (!pbl_virt)
282		return -ENOMEM;
283
284	chain->pbl_sp.table_virt = pbl_virt;
285	chain->pbl_sp.table_phys = pbl_phys;
286	chain->pbl_sp.table_size = size;
287
288alloc_pages:
289	for (i = 0; i < page_cnt; i++) {
290		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
291					  GFP_KERNEL);
292		if (!virt)
293			return -ENOMEM;
294
295		if (i == 0) {
296			qed_chain_init_mem(chain, virt, phys);
297			qed_chain_reset(chain);
298		}
299
300		/* Fill the PBL table with the physical address of the page */
301		pbl_virt[i] = cpu_to_le64(phys);
302
303		/* Keep the virtual address of the page */
304		addr_tbl[i].virt_addr = virt;
305		addr_tbl[i].dma_map = phys;
306	}
307
308	return 0;
309}
310
311/**
312 * qed_chain_alloc() - Allocate and initialize a chain.
313 *
314 * @cdev: Main device structure.
315 * @chain: Chain to be processed.
316 * @params: Chain initialization parameters.
317 *
318 * Return: 0 on success, negative errno otherwise.
319 */
320int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
321		    struct qed_chain_init_params *params)
322{
323	u32 page_cnt;
324	int rc;
325
326	if (!params->page_size)
327		params->page_size = QED_CHAIN_PAGE_SIZE;
328
329	if (params->mode == QED_CHAIN_MODE_SINGLE)
330		page_cnt = 1;
331	else
332		page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
333					      params->elem_size,
334					      params->page_size,
335					      params->mode);
336
337	rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
338	if (rc) {
339		DP_NOTICE(cdev,
340			  "Cannot allocate a chain with the given arguments:\n");
341		DP_NOTICE(cdev,
342			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
343			  params->intended_use, params->mode, params->cnt_type,
344			  params->num_elems, params->elem_size,
345			  params->page_size);
346		return rc;
347	}
348
349	qed_chain_init(chain, params, page_cnt);
350
351	switch (params->mode) {
352	case QED_CHAIN_MODE_NEXT_PTR:
353		rc = qed_chain_alloc_next_ptr(cdev, chain);
354		break;
355	case QED_CHAIN_MODE_SINGLE:
356		rc = qed_chain_alloc_single(cdev, chain);
357		break;
358	case QED_CHAIN_MODE_PBL:
359		rc = qed_chain_alloc_pbl(cdev, chain);
360		break;
361	default:
362		return -EINVAL;
363	}
364
365	if (!rc)
366		return 0;
367
368	qed_chain_free(cdev, chain);
369
370	return rc;
371}
372