1/*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 *   Redistribution and use in source and binary forms, with or
13 *   without modification, are permitted provided that the following
14 *   conditions are met:
15 *
16 *    - Redistributions of source code must retain the above
17 *	copyright notice, this list of conditions and the following
18 *	disclaimer.
19 *
20 *    - Redistributions in binary form must reproduce the above
21 *	copyright notice, this list of conditions and the following
22 *	disclaimer in the documentation and/or other materials
23 *	provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "osdep.h"
36#include "irdma_hmc.h"
37#include "irdma_defs.h"
38#include "irdma_type.h"
39#include "irdma_protos.h"
40#include "irdma_pble.h"
41
42static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
43
44/**
45 * irdma_destroy_pble_prm - destroy prm during module unload
46 * @pble_rsrc: pble resources
47 */
48void
49irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
50{
51	struct irdma_chunk *chunk;
52	struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
53
54	while (!list_empty(&pinfo->clist)) {
55		chunk = (struct irdma_chunk *)(&pinfo->clist)->next;
56		list_del(&chunk->list);
57		if (chunk->type == PBLE_SD_PAGED)
58			irdma_pble_free_paged_mem(chunk);
59		bitmap_free(chunk->bitmapbuf);
60		kfree(chunk->chunkmem.va);
61	}
62	spin_lock_destroy(&pinfo->prm_lock);
63	mutex_destroy(&pble_rsrc->pble_mutex_lock);
64}
65
66/**
67 * irdma_hmc_init_pble - Initialize pble resources during module load
68 * @dev: irdma_sc_dev struct
69 * @pble_rsrc: pble resources
70 */
71int
72irdma_hmc_init_pble(struct irdma_sc_dev *dev,
73		    struct irdma_hmc_pble_rsrc *pble_rsrc)
74{
75	struct irdma_hmc_info *hmc_info;
76	u32 fpm_idx = 0;
77	int status = 0;
78
79	hmc_info = dev->hmc_info;
80	pble_rsrc->dev = dev;
81	pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
82	/* Start pble' on 4k boundary */
83	if (pble_rsrc->fpm_base_addr & 0xfff)
84		fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
85	pble_rsrc->unallocated_pble =
86	    hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
87	pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
88	pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
89
90	mutex_init(&pble_rsrc->pble_mutex_lock);
91
92	spin_lock_init(&pble_rsrc->pinfo.prm_lock);
93	INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
94	if (add_pble_prm(pble_rsrc)) {
95		irdma_destroy_pble_prm(pble_rsrc);
96		status = -ENOMEM;
97	}
98
99	return status;
100}
101
102/**
103 * get_sd_pd_idx -  Returns sd index, pd index and rel_pd_idx from fpm address
104 * @pble_rsrc: structure containing fpm address
105 * @idx: where to return indexes
106 */
107static void
108get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
109	      struct sd_pd_idx *idx)
110{
111	idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
112	idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
113	idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
114}
115
116/**
117 * add_sd_direct - add sd direct for pble
118 * @pble_rsrc: pble resource ptr
119 * @info: page info for sd
120 */
121static int
122add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
123	      struct irdma_add_page_info *info)
124{
125	struct irdma_sc_dev *dev = pble_rsrc->dev;
126	int ret_code = 0;
127	struct sd_pd_idx *idx = &info->idx;
128	struct irdma_chunk *chunk = info->chunk;
129	struct irdma_hmc_info *hmc_info = info->hmc_info;
130	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
131	u32 offset = 0;
132
133	if (!sd_entry->valid) {
134		ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
135						    info->idx.sd_idx,
136						    IRDMA_SD_TYPE_DIRECT,
137						    IRDMA_HMC_DIRECT_BP_SIZE);
138		if (ret_code)
139			return ret_code;
140
141		chunk->type = PBLE_SD_CONTIGOUS;
142	}
143
144	offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
145	chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
146	chunk->vaddr = (u8 *)sd_entry->u.bp.addr.va + offset;
147	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
148	irdma_debug(dev, IRDMA_DEBUG_PBLE,
149		    "chunk_size[%ld] = 0x%lx vaddr=0x%p fpm_addr = %lx\n",
150		    chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
151
152	return 0;
153}
154
155/**
156 * fpm_to_idx - given fpm address, get pble index
157 * @pble_rsrc: pble resource management
158 * @addr: fpm address for index
159 */
160static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr){
161	u64 idx;
162
163	idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
164
165	return (u32)idx;
166}
167
168/**
169 * add_bp_pages - add backing pages for sd
170 * @pble_rsrc: pble resource management
171 * @info: page info for sd
172 */
173static int
174add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
175	     struct irdma_add_page_info *info)
176{
177	struct irdma_sc_dev *dev = pble_rsrc->dev;
178	u8 *addr;
179	struct irdma_dma_mem mem;
180	struct irdma_hmc_pd_entry *pd_entry;
181	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
182	struct irdma_hmc_info *hmc_info = info->hmc_info;
183	struct irdma_chunk *chunk = info->chunk;
184	int status = 0;
185	u32 rel_pd_idx = info->idx.rel_pd_idx;
186	u32 pd_idx = info->idx.pd_idx;
187	u32 i;
188
189	if (irdma_pble_get_paged_mem(chunk, info->pages))
190		return -ENOMEM;
191
192	status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
193					  IRDMA_SD_TYPE_PAGED,
194					  IRDMA_HMC_DIRECT_BP_SIZE);
195	if (status)
196		goto error;
197
198	addr = chunk->vaddr;
199	for (i = 0; i < info->pages; i++) {
200		mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
201		mem.size = 4096;
202		mem.va = addr;
203		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
204		if (!pd_entry->valid) {
205			status = irdma_add_pd_table_entry(dev, hmc_info,
206							  pd_idx++, &mem);
207			if (status)
208				goto error;
209
210			addr += 4096;
211		}
212	}
213
214	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
215	return 0;
216
217error:
218	irdma_pble_free_paged_mem(chunk);
219
220	return status;
221}
222
223/**
224 * irdma_get_type - add a sd entry type for sd
225 * @dev: irdma_sc_dev struct
226 * @idx: index of sd
227 * @pages: pages in the sd
228 */
229static enum irdma_sd_entry_type
230irdma_get_type(struct irdma_sc_dev *dev,
231	       struct sd_pd_idx *idx, u32 pages)
232{
233	enum irdma_sd_entry_type sd_entry_type;
234
235	sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
236	    IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
237	return sd_entry_type;
238}
239
240/**
241 * add_pble_prm - add a sd entry for pble resoure
242 * @pble_rsrc: pble resource management
243 */
244static int
245add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
246{
247	struct irdma_sc_dev *dev = pble_rsrc->dev;
248	struct irdma_hmc_sd_entry *sd_entry;
249	struct irdma_hmc_info *hmc_info;
250	struct irdma_chunk *chunk;
251	struct irdma_add_page_info info;
252	struct sd_pd_idx *idx = &info.idx;
253	int ret_code = 0;
254	enum irdma_sd_entry_type sd_entry_type;
255	u64 sd_reg_val = 0;
256	struct irdma_virt_mem chunkmem;
257	u32 pages;
258
259	if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
260		return -ENOMEM;
261
262	if (pble_rsrc->next_fpm_addr & 0xfff)
263		return -EINVAL;
264
265	chunkmem.size = sizeof(*chunk);
266	chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
267	if (!chunkmem.va)
268		return -ENOMEM;
269
270	chunk = chunkmem.va;
271	chunk->chunkmem = chunkmem;
272	hmc_info = dev->hmc_info;
273	chunk->dev = dev;
274	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
275	get_sd_pd_idx(pble_rsrc, idx);
276	sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
277	pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
278	    IRDMA_HMC_PD_CNT_IN_SD;
279	pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
280	info.chunk = chunk;
281	info.hmc_info = hmc_info;
282	info.pages = pages;
283	info.sd_entry = sd_entry;
284	if (!sd_entry->valid)
285		sd_entry_type = irdma_get_type(dev, idx, pages);
286	else
287		sd_entry_type = sd_entry->entry_type;
288
289	irdma_debug(dev, IRDMA_DEBUG_PBLE,
290		    "pages = %d, unallocated_pble[%d] current_fpm_addr = %lx\n",
291		    pages, pble_rsrc->unallocated_pble,
292		    pble_rsrc->next_fpm_addr);
293	irdma_debug(dev, IRDMA_DEBUG_PBLE, "sd_entry_type = %d\n",
294		    sd_entry_type);
295	if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
296		ret_code = add_sd_direct(pble_rsrc, &info);
297
298	if (ret_code)
299		sd_entry_type = IRDMA_SD_TYPE_PAGED;
300	else
301		pble_rsrc->stats_direct_sds++;
302
303	if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
304		ret_code = add_bp_pages(pble_rsrc, &info);
305		if (ret_code)
306			goto err_bp_pages;
307		else
308			pble_rsrc->stats_paged_sds++;
309	}
310
311	ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
312	if (ret_code)
313		goto err_bp_pages;
314
315	pble_rsrc->next_fpm_addr += chunk->size;
316	irdma_debug(dev, IRDMA_DEBUG_PBLE,
317		    "next_fpm_addr = %lx chunk_size[%lu] = 0x%lx\n",
318		    pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
319	pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
320	sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
321	    sd_entry->u.pd_table.pd_page_addr.pa :
322	    sd_entry->u.bp.addr.pa;
323	if (!sd_entry->valid) {
324		ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
325					    idx->sd_idx, sd_entry->entry_type, true);
326		if (ret_code)
327			goto error;
328	}
329
330	sd_entry->valid = true;
331	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
332	return 0;
333
334error:
335	bitmap_free(chunk->bitmapbuf);
336err_bp_pages:
337	kfree(chunk->chunkmem.va);
338
339	return ret_code;
340}
341
342/**
343 * free_lvl2 - fee level 2 pble
344 * @pble_rsrc: pble resource management
345 * @palloc: level 2 pble allocation
346 */
347static void
348free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
349	  struct irdma_pble_alloc *palloc)
350{
351	u32 i;
352	struct irdma_pble_level2 *lvl2 = &palloc->level2;
353	struct irdma_pble_info *root = &lvl2->root;
354	struct irdma_pble_info *leaf = lvl2->leaf;
355
356	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
357		if (leaf->addr)
358			irdma_prm_return_pbles(&pble_rsrc->pinfo,
359					       &leaf->chunkinfo);
360		else
361			break;
362	}
363
364	if (root->addr)
365		irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
366
367	kfree(lvl2->leafmem.va);
368	lvl2->leaf = NULL;
369}
370
371/**
372 * get_lvl2_pble - get level 2 pble resource
373 * @pble_rsrc: pble resource management
374 * @palloc: level 2 pble allocation
375 */
376static int
377get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
378	      struct irdma_pble_alloc *palloc)
379{
380	u32 lf4k, lflast, total, i;
381	u32 pblcnt = PBLE_PER_PAGE;
382	u64 *addr;
383	struct irdma_pble_level2 *lvl2 = &palloc->level2;
384	struct irdma_pble_info *root = &lvl2->root;
385	struct irdma_pble_info *leaf;
386	int ret_code;
387	u64 fpm_addr;
388
389	/* number of full 512 (4K) leafs) */
390	lf4k = palloc->total_cnt >> 9;
391	lflast = palloc->total_cnt % PBLE_PER_PAGE;
392	total = (lflast == 0) ? lf4k : lf4k + 1;
393	lvl2->leaf_cnt = total;
394
395	lvl2->leafmem.size = (sizeof(*leaf) * total);
396	lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
397	if (!lvl2->leafmem.va)
398		return -ENOMEM;
399
400	lvl2->leaf = lvl2->leafmem.va;
401	leaf = lvl2->leaf;
402	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
403				       total << 3, &root->addr, &fpm_addr);
404	if (ret_code) {
405		kfree(lvl2->leafmem.va);
406		lvl2->leaf = NULL;
407		return -ENOMEM;
408	}
409
410	root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
411	root->cnt = total;
412	addr = root->addr;
413	for (i = 0; i < total; i++, leaf++) {
414		pblcnt = (lflast && ((i + 1) == total)) ?
415		    lflast : PBLE_PER_PAGE;
416		ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
417					       &leaf->chunkinfo, pblcnt << 3,
418					       &leaf->addr, &fpm_addr);
419		if (ret_code)
420			goto error;
421
422		leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
423
424		leaf->cnt = pblcnt;
425		*addr = (u64)leaf->idx;
426		addr++;
427	}
428
429	palloc->level = PBLE_LEVEL_2;
430	pble_rsrc->stats_lvl2++;
431	return 0;
432
433error:
434	free_lvl2(pble_rsrc, palloc);
435
436	return -ENOMEM;
437}
438
439/**
440 * get_lvl1_pble - get level 1 pble resource
441 * @pble_rsrc: pble resource management
442 * @palloc: level 1 pble allocation
443 */
444static int
445get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
446	      struct irdma_pble_alloc *palloc)
447{
448	int ret_code;
449	u64 fpm_addr;
450	struct irdma_pble_info *lvl1 = &palloc->level1;
451
452	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
453				       palloc->total_cnt << 3, &lvl1->addr,
454				       &fpm_addr);
455	if (ret_code)
456		return -ENOMEM;
457
458	palloc->level = PBLE_LEVEL_1;
459	lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
460	lvl1->cnt = palloc->total_cnt;
461	pble_rsrc->stats_lvl1++;
462
463	return 0;
464}
465
466/**
467 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
468 * @pble_rsrc: pble resources
469 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
470 * @lvl: Bitmask for requested pble level
471 */
472static int
473get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
474		   struct irdma_pble_alloc *palloc, u8 lvl)
475{
476	int status = 0;
477
478	status = get_lvl1_pble(pble_rsrc, palloc);
479	if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
480		return status;
481
482	status = get_lvl2_pble(pble_rsrc, palloc);
483
484	return status;
485}
486
487/**
488 * irdma_get_pble - allocate pbles from the prm
489 * @pble_rsrc: pble resources
490 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
491 * @pble_cnt: #of pbles requested
492 * @lvl: requested pble level mask
493 */
494int
495irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
496	       struct irdma_pble_alloc *palloc, u32 pble_cnt,
497	       u8 lvl)
498{
499	int status = 0;
500	int max_sds = 0;
501	int i;
502
503	palloc->total_cnt = pble_cnt;
504	palloc->level = PBLE_LEVEL_0;
505
506	mutex_lock(&pble_rsrc->pble_mutex_lock);
507
508	/*
509	 * check first to see if we can get pble's without acquiring additional sd's
510	 */
511	status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
512	if (!status)
513		goto exit;
514
515	max_sds = (palloc->total_cnt >> 18) + 1;
516	for (i = 0; i < max_sds; i++) {
517		status = add_pble_prm(pble_rsrc);
518		if (status)
519			break;
520
521		status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
522		/* if level1_only, only go through it once */
523		if (!status || lvl == PBLE_LEVEL_1)
524			break;
525	}
526
527exit:
528	if (!status) {
529		pble_rsrc->allocdpbles += pble_cnt;
530		pble_rsrc->stats_alloc_ok++;
531	} else {
532		pble_rsrc->stats_alloc_fail++;
533	}
534	mutex_unlock(&pble_rsrc->pble_mutex_lock);
535
536	return status;
537}
538
539/**
540 * irdma_free_pble - put pbles back into prm
541 * @pble_rsrc: pble resources
542 * @palloc: contains all information regarding pble resource being freed
543 */
544void
545irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
546		struct irdma_pble_alloc *palloc)
547{
548	pble_rsrc->freedpbles += palloc->total_cnt;
549
550	if (palloc->level == PBLE_LEVEL_2)
551		free_lvl2(pble_rsrc, palloc);
552	else
553		irdma_prm_return_pbles(&pble_rsrc->pinfo,
554				       &palloc->level1.chunkinfo);
555	pble_rsrc->stats_alloc_freed++;
556}
557