1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 341926 2018-12-12 12:06:25Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/delay.h>
31#include <dev/mlx5/driver.h>
32#include "mlx5_core.h"
33
34CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE);
35
36struct mlx5_pages_req {
37	struct mlx5_core_dev *dev;
38	u16	func_id;
39	s32	npages;
40	struct work_struct work;
41};
42
43
44enum {
45	MAX_RECLAIM_TIME_MSECS	= 5000,
46};
47
48static void
49mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
50{
51	struct mlx5_fw_page *fwp;
52	uint8_t owned;
53
54	fwp = (struct mlx5_fw_page *)arg;
55	owned = MLX5_DMA_OWNED(fwp->dev);
56
57	if (!owned)
58		MLX5_DMA_LOCK(fwp->dev);
59
60	if (error == 0) {
61		KASSERT(nseg == 1, ("Number of segments is different from 1"));
62		fwp->dma_addr = segs->ds_addr;
63		fwp->load_done = MLX5_LOAD_ST_SUCCESS;
64	} else {
65		fwp->load_done = MLX5_LOAD_ST_FAILURE;
66	}
67	MLX5_DMA_DONE(fwp->dev);
68
69	if (!owned)
70		MLX5_DMA_UNLOCK(fwp->dev);
71}
72
73void
74mlx5_fwp_flush(struct mlx5_fw_page *fwp)
75{
76	unsigned num = fwp->numpages;
77
78	while (num--)
79		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE);
80}
81
82void
83mlx5_fwp_invalidate(struct mlx5_fw_page *fwp)
84{
85	unsigned num = fwp->numpages;
86
87	while (num--) {
88		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD);
89		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD);
90	}
91}
92
93struct mlx5_fw_page *
94mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num)
95{
96	struct mlx5_fw_page *fwp;
97	unsigned x;
98	int err;
99
100	/* check for special case */
101	if (num == 0) {
102		fwp = kzalloc(sizeof(*fwp), flags);
103		if (fwp != NULL)
104			fwp->dev = dev;
105		return (fwp);
106	}
107
108	/* we need sleeping context for this function */
109	if (flags & M_NOWAIT)
110		return (NULL);
111
112	fwp = kzalloc(sizeof(*fwp) * num, flags);
113
114	/* serialize loading the DMA map(s) */
115	sx_xlock(&dev->cmd.dma_sx);
116
117	for (x = 0; x != num; x++) {
118		/* store pointer to MLX5 core device */
119		fwp[x].dev = dev;
120		/* store number of pages left from the array */
121		fwp[x].numpages = num - x;
122
123		/* allocate memory */
124		err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr,
125		    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map);
126		if (err != 0)
127			goto failure;
128
129		/* load memory into DMA */
130		MLX5_DMA_LOCK(dev);
131		(void) bus_dmamap_load(
132		    dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr,
133		    MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb,
134		    fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
135
136		while (fwp[x].load_done == MLX5_LOAD_ST_NONE)
137			MLX5_DMA_WAIT(dev);
138		MLX5_DMA_UNLOCK(dev);
139
140		/* check for error */
141		if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) {
142			bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr,
143			    fwp[x].dma_map);
144			goto failure;
145		}
146	}
147	sx_xunlock(&dev->cmd.dma_sx);
148	return (fwp);
149
150failure:
151	while (x--) {
152		bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map);
153		bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map);
154	}
155	sx_xunlock(&dev->cmd.dma_sx);
156	kfree(fwp);
157	return (NULL);
158}
159
160void
161mlx5_fwp_free(struct mlx5_fw_page *fwp)
162{
163	struct mlx5_core_dev *dev;
164	unsigned num;
165
166	/* be NULL safe */
167	if (fwp == NULL)
168		return;
169
170	/* check for special case */
171	if (fwp->numpages == 0) {
172		kfree(fwp);
173		return;
174	}
175
176	num = fwp->numpages;
177	dev = fwp->dev;
178
179	while (num--) {
180		bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map);
181		bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map);
182	}
183
184	kfree(fwp);
185}
186
187u64
188mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset)
189{
190	size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
191	KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
192
193	return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
194}
195
196void *
197mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset)
198{
199	size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
200	KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
201
202	return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
203}
204
205static int
206mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp)
207{
208	struct rb_root *root = &dev->priv.page_root;
209	struct rb_node **new = &root->rb_node;
210	struct rb_node *parent = NULL;
211	struct mlx5_fw_page *tfp;
212
213	while (*new) {
214		parent = *new;
215		tfp = rb_entry(parent, struct mlx5_fw_page, rb_node);
216		if (tfp->dma_addr < nfp->dma_addr)
217			new = &parent->rb_left;
218		else if (tfp->dma_addr > nfp->dma_addr)
219			new = &parent->rb_right;
220		else
221			return (-EEXIST);
222	}
223
224	rb_link_node(&nfp->rb_node, parent, new);
225	rb_insert_color(&nfp->rb_node, root);
226	return (0);
227}
228
229static struct mlx5_fw_page *
230mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr)
231{
232	struct rb_root *root = &dev->priv.page_root;
233	struct rb_node *tmp = root->rb_node;
234	struct mlx5_fw_page *result = NULL;
235	struct mlx5_fw_page *tfp;
236
237	while (tmp) {
238		tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node);
239		if (tfp->dma_addr < addr) {
240			tmp = tmp->rb_left;
241		} else if (tfp->dma_addr > addr) {
242			tmp = tmp->rb_right;
243		} else {
244			rb_erase(&tfp->rb_node, &dev->priv.page_root);
245			result = tfp;
246			break;
247		}
248	}
249	return (result);
250}
251
252static int
253alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
254{
255	struct mlx5_fw_page *fwp;
256	int err;
257
258	fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
259	if (fwp == NULL)
260		return (-ENOMEM);
261
262	fwp->func_id = func_id;
263
264	MLX5_DMA_LOCK(dev);
265	err = mlx5_insert_fw_page_locked(dev, fwp);
266	MLX5_DMA_UNLOCK(dev);
267
268	if (err != 0) {
269		mlx5_fwp_free(fwp);
270	} else {
271		/* make sure cached data is cleaned */
272		mlx5_fwp_invalidate(fwp);
273
274		/* store DMA address */
275		*addr = fwp->dma_addr;
276	}
277	return (err);
278}
279
280static void
281free_4k(struct mlx5_core_dev *dev, u64 addr)
282{
283	struct mlx5_fw_page *fwp;
284
285	MLX5_DMA_LOCK(dev);
286	fwp = mlx5_remove_fw_page_locked(dev, addr);
287	MLX5_DMA_UNLOCK(dev);
288
289	if (fwp == NULL) {
290		mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr);
291		return;
292	}
293	mlx5_fwp_free(fwp);
294}
295
296static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
297				s32 *npages, int boot)
298{
299	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
300	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
301	int err;
302
303	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
304	MLX5_SET(query_pages_in, in, op_mod, boot ?
305		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
306		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
307
308	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
309	if (err)
310		return err;
311
312	*npages = MLX5_GET(query_pages_out, out, num_pages);
313	*func_id = MLX5_GET(query_pages_out, out, function_id);
314
315	return 0;
316}
317
318static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
319		      int notify_fail)
320{
321	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
322	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
323	u64 addr;
324	int err;
325	u32 *in, *nin;
326	int i = 0;
327
328	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
329	in = mlx5_vzalloc(inlen);
330	if (!in) {
331		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
332		err = -ENOMEM;
333		goto out_alloc;
334	}
335
336	for (i = 0; i < npages; i++) {
337		err = alloc_4k(dev, &addr, func_id);
338		if (err)
339			goto out_alloc;
340		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
341	}
342
343	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
344	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
345	MLX5_SET(manage_pages_in, in, function_id, func_id);
346	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
347
348	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
349	if (err) {
350		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
351			       func_id, npages, err);
352		goto out_alloc;
353	}
354	dev->priv.fw_pages += npages;
355	dev->priv.pages_per_func[func_id] += npages;
356
357	mlx5_core_dbg(dev, "err %d\n", err);
358
359	goto out_free;
360
361out_alloc:
362	if (notify_fail) {
363		nin = mlx5_vzalloc(inlen);
364		if (!nin)
365			goto out_4k;
366
367		memset(&out, 0, sizeof(out));
368		MLX5_SET(manage_pages_in, nin, opcode, MLX5_CMD_OP_MANAGE_PAGES);
369		MLX5_SET(manage_pages_in, nin, op_mod, MLX5_PAGES_CANT_GIVE);
370		MLX5_SET(manage_pages_in, nin, function_id, func_id);
371		if (mlx5_cmd_exec(dev, nin, inlen, out, sizeof(out)))
372			mlx5_core_warn(dev, "page notify failed\n");
373		kvfree(nin);
374	}
375
376out_4k:
377	for (i--; i >= 0; i--)
378		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
379out_free:
380	kvfree(in);
381	return err;
382}
383
384static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
385			     u32 *in, int in_size, u32 *out, int out_size)
386{
387	struct mlx5_fw_page *fwp;
388	struct rb_node *p;
389	u32 func_id;
390	u32 npages;
391	u32 i = 0;
392
393	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
394		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
395
396	/* No hard feelings, we want our pages back! */
397	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
398	func_id = MLX5_GET(manage_pages_in, in, function_id);
399
400	p = rb_first(&dev->priv.page_root);
401	while (p && i < npages) {
402		fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
403		p = rb_next(p);
404		if (fwp->func_id != func_id)
405			continue;
406
407		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->dma_addr);
408		i++;
409	}
410
411	MLX5_SET(manage_pages_out, out, output_num_entries, i);
412	return 0;
413}
414
415static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
416			 int *nclaimed)
417{
418	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
419	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
420	int num_claimed;
421	u32 *out;
422	int err;
423	int i;
424
425	if (nclaimed)
426		*nclaimed = 0;
427
428	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
429	out = mlx5_vzalloc(outlen);
430	if (!out)
431		return -ENOMEM;
432
433	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
434	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
435	MLX5_SET(manage_pages_in, in, function_id, func_id);
436	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
437
438	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
439	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
440	if (err) {
441		mlx5_core_err(dev, "failed reclaiming pages\n");
442		goto out_free;
443	}
444
445	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
446	if (nclaimed)
447		*nclaimed = num_claimed;
448
449	dev->priv.fw_pages -= num_claimed;
450	dev->priv.pages_per_func[func_id] -= num_claimed;
451	for (i = 0; i < num_claimed; i++)
452		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
453
454out_free:
455	kvfree(out);
456	return err;
457}
458
459static void pages_work_handler(struct work_struct *work)
460{
461	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
462	struct mlx5_core_dev *dev = req->dev;
463	int err = 0;
464
465	if (req->npages < 0)
466		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
467	else if (req->npages > 0)
468		err = give_pages(dev, req->func_id, req->npages, 1);
469
470	if (err)
471		mlx5_core_warn(dev, "%s fail %d\n",
472			       req->npages < 0 ? "reclaim" : "give", err);
473
474	kfree(req);
475}
476
477void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
478				 s32 npages)
479{
480	struct mlx5_pages_req *req;
481
482	req = kzalloc(sizeof(*req), GFP_ATOMIC);
483	if (!req) {
484		mlx5_core_warn(dev, "failed to allocate pages request\n");
485		return;
486	}
487
488	req->dev = dev;
489	req->func_id = func_id;
490	req->npages = npages;
491	INIT_WORK(&req->work, pages_work_handler);
492	if (!queue_work(dev->priv.pg_wq, &req->work))
493		mlx5_core_warn(dev, "failed to queue pages handler work\n");
494}
495
496int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
497{
498	u16 uninitialized_var(func_id);
499	s32 uninitialized_var(npages);
500	int err;
501
502	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
503	if (err)
504		return err;
505
506	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
507		      npages, boot ? "boot" : "init", func_id);
508
509	return give_pages(dev, func_id, npages, 0);
510}
511
512enum {
513	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
514};
515
516s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
517{
518	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
519	s64 prevpages = 0;
520	s64 npages = 0;
521
522	while (!time_after(jiffies, end)) {
523		/* exclude own function, VFs only */
524		npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
525		if (!npages)
526			break;
527
528		if (npages != prevpages)
529			end = end + msecs_to_jiffies(100);
530
531		prevpages = npages;
532		msleep(1);
533	}
534
535	if (npages)
536		mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
537
538	return -npages;
539}
540
541static int optimal_reclaimed_pages(void)
542{
543	struct mlx5_cmd_prot_block *block;
544	struct mlx5_cmd_layout *lay;
545	int ret;
546
547	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
548	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
549	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
550
551	return ret;
552}
553
554int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
555{
556	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
557	struct mlx5_fw_page *fwp;
558	struct rb_node *p;
559	int nclaimed = 0;
560	int err;
561
562	do {
563		p = rb_first(&dev->priv.page_root);
564		if (p) {
565			fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
566			err = reclaim_pages(dev, fwp->func_id,
567					    optimal_reclaimed_pages(),
568					    &nclaimed);
569			if (err) {
570				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
571					       err);
572				return err;
573			}
574
575			if (nclaimed)
576				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
577		}
578		if (time_after(jiffies, end)) {
579			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
580			break;
581		}
582	} while (p);
583
584	return 0;
585}
586
587void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
588{
589
590	dev->priv.page_root = RB_ROOT;
591}
592
593void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
594{
595	/* nothing */
596}
597
598int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
599{
600	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
601	if (!dev->priv.pg_wq)
602		return -ENOMEM;
603
604	return 0;
605}
606
607void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
608{
609	destroy_workqueue(dev->priv.pg_wq);
610}
611