mlx5_pagealloc.c revision 322144
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 322144 2017-08-07 12:36:48Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/delay.h>
31#include <dev/mlx5/driver.h>
32#include "mlx5_core.h"
33
34struct mlx5_pages_req {
35	struct mlx5_core_dev *dev;
36	u16	func_id;
37	s32	npages;
38	struct work_struct work;
39};
40
41struct fw_page {
42	struct rb_node		rb_node;
43	u64			addr;
44	struct page	       *page;
45	u16			func_id;
46	unsigned long		bitmask;
47	struct list_head	list;
48	unsigned		free_count;
49};
50
51struct mlx5_manage_pages_inbox {
52	struct mlx5_inbox_hdr	hdr;
53	__be16			rsvd;
54	__be16			func_id;
55	__be32			num_entries;
56	__be64			pas[0];
57};
58
59struct mlx5_manage_pages_outbox {
60	struct mlx5_outbox_hdr	hdr;
61	__be32			num_entries;
62	u8			rsvd[4];
63	__be64			pas[0];
64};
65
66enum {
67	MAX_RECLAIM_TIME_MSECS	= 5000,
68};
69
70enum {
71	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
72	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
73};
74
75static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
76{
77	struct rb_root *root = &dev->priv.page_root;
78	struct rb_node **new = &root->rb_node;
79	struct rb_node *parent = NULL;
80	struct fw_page *nfp;
81	struct fw_page *tfp;
82	int i;
83
84	while (*new) {
85		parent = *new;
86		tfp = rb_entry(parent, struct fw_page, rb_node);
87		if (tfp->addr < addr)
88			new = &parent->rb_left;
89		else if (tfp->addr > addr)
90			new = &parent->rb_right;
91		else
92			return -EEXIST;
93	}
94
95	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
96
97	nfp->addr = addr;
98	nfp->page = page;
99	nfp->func_id = func_id;
100	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
101	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
102		set_bit(i, &nfp->bitmask);
103
104	rb_link_node(&nfp->rb_node, parent, new);
105	rb_insert_color(&nfp->rb_node, root);
106	list_add(&nfp->list, &dev->priv.free_list);
107
108	return 0;
109}
110
111static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
112{
113	struct rb_root *root = &dev->priv.page_root;
114	struct rb_node *tmp = root->rb_node;
115	struct fw_page *result = NULL;
116	struct fw_page *tfp;
117
118	while (tmp) {
119		tfp = rb_entry(tmp, struct fw_page, rb_node);
120		if (tfp->addr < addr) {
121			tmp = tmp->rb_left;
122		} else if (tfp->addr > addr) {
123			tmp = tmp->rb_right;
124		} else {
125			result = tfp;
126			break;
127		}
128	}
129
130	return result;
131}
132
133static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
134				s32 *npages, int boot)
135{
136	u32 in[MLX5_ST_SZ_DW(query_pages_in)];
137	u32 out[MLX5_ST_SZ_DW(query_pages_out)];
138	int err;
139
140	memset(in, 0, sizeof(in));
141
142	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
143	MLX5_SET(query_pages_in, in, op_mod,
144		 boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
145
146	memset(out, 0, sizeof(out));
147	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
148	if (err)
149		return err;
150
151	*npages = MLX5_GET(query_pages_out, out, num_pages);
152	*func_id = MLX5_GET(query_pages_out, out, function_id);
153
154	return 0;
155}
156
157static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
158{
159	struct fw_page *fp;
160	unsigned n;
161
162	if (list_empty(&dev->priv.free_list))
163		return -ENOMEM;
164
165	fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
166	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
167	if (n >= MLX5_NUM_4K_IN_PAGE) {
168		mlx5_core_warn(dev, "alloc 4k bug\n");
169		return -ENOENT;
170	}
171	clear_bit(n, &fp->bitmask);
172	fp->free_count--;
173	if (!fp->free_count)
174		list_del(&fp->list);
175
176	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
177
178	return 0;
179}
180
181static void free_4k(struct mlx5_core_dev *dev, u64 addr)
182{
183	struct fw_page *fwp;
184	int n;
185
186	fwp = find_fw_page(dev, addr & PAGE_MASK);
187	if (!fwp) {
188		mlx5_core_warn(dev, "page not found\n");
189		return;
190	}
191
192	n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
193	fwp->free_count++;
194	set_bit(n, &fwp->bitmask);
195	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
196		rb_erase(&fwp->rb_node, &dev->priv.page_root);
197		if (fwp->free_count != 1)
198			list_del(&fwp->list);
199		dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
200			       DMA_BIDIRECTIONAL);
201		__free_page(fwp->page);
202		kfree(fwp);
203	} else if (fwp->free_count == 1) {
204		list_add(&fwp->list, &dev->priv.free_list);
205	}
206}
207
208static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
209{
210	struct page *page;
211	u64 addr;
212	int err;
213
214	page = alloc_page(GFP_HIGHUSER);
215	if (!page) {
216		mlx5_core_warn(dev, "failed to allocate page\n");
217		return -ENOMEM;
218	}
219	addr = dma_map_page(&dev->pdev->dev, page, 0,
220			    PAGE_SIZE, DMA_BIDIRECTIONAL);
221	if (dma_mapping_error(&dev->pdev->dev, addr)) {
222		mlx5_core_warn(dev, "failed dma mapping page\n");
223		err = -ENOMEM;
224		goto out_alloc;
225	}
226	err = insert_page(dev, addr, page, func_id);
227	if (err) {
228		mlx5_core_err(dev, "failed to track allocated page\n");
229		goto out_mapping;
230	}
231
232	return 0;
233
234out_mapping:
235	dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
236
237out_alloc:
238	__free_page(page);
239	return err;
240}
241static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
242		      int notify_fail)
243{
244	struct mlx5_manage_pages_inbox *in;
245	struct mlx5_manage_pages_outbox out;
246	struct mlx5_manage_pages_inbox *nin;
247	int inlen;
248	u64 addr;
249	int err;
250	int i = 0;
251
252	inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
253	in = mlx5_vzalloc(inlen);
254	if (!in) {
255		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
256		err = -ENOMEM;
257		goto out_alloc;
258	}
259	memset(&out, 0, sizeof(out));
260
261	for (i = 0; i < npages; i++) {
262retry:
263		err = alloc_4k(dev, &addr);
264		if (err) {
265			if (err == -ENOMEM)
266				err = alloc_system_page(dev, func_id);
267			if (err)
268				goto out_alloc;
269
270			goto retry;
271		}
272		in->pas[i] = cpu_to_be64(addr);
273	}
274
275	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
276	in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
277	in->func_id = cpu_to_be16(func_id);
278	in->num_entries = cpu_to_be32(npages);
279	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
280	if (err) {
281		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
282			       func_id, npages, err);
283		goto out_alloc;
284	}
285	dev->priv.fw_pages += npages;
286	dev->priv.pages_per_func[func_id] += npages;
287
288	if (out.hdr.status) {
289		err = mlx5_cmd_status_to_err(&out.hdr);
290		if (err) {
291			mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
292				       func_id, npages, out.hdr.status);
293			goto out_alloc;
294		}
295	}
296
297	mlx5_core_dbg(dev, "err %d\n", err);
298
299	goto out_free;
300
301out_alloc:
302	if (notify_fail) {
303		nin = kzalloc(sizeof(*nin), GFP_KERNEL);
304		memset(&out, 0, sizeof(out));
305		nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
306		nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
307		nin->func_id = cpu_to_be16(func_id);
308		if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
309			mlx5_core_warn(dev, "page notify failed\n");
310		kfree(nin);
311	}
312	for (i--; i >= 0; i--)
313		free_4k(dev, be64_to_cpu(in->pas[i]));
314out_free:
315	kvfree(in);
316	return err;
317}
318
319static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
320			 int *nclaimed)
321{
322	struct mlx5_manage_pages_inbox   in;
323	struct mlx5_manage_pages_outbox *out;
324	int num_claimed;
325	int outlen;
326	u64 addr;
327	int err;
328	int i;
329
330	if (nclaimed)
331		*nclaimed = 0;
332
333	memset(&in, 0, sizeof(in));
334	outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
335	out = mlx5_vzalloc(outlen);
336	if (!out)
337		return -ENOMEM;
338
339	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
340	in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
341	in.func_id = cpu_to_be16(func_id);
342	in.num_entries = cpu_to_be32(npages);
343	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
344	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
345	if (err) {
346		mlx5_core_err(dev, "failed reclaiming pages\n");
347		goto out_free;
348	}
349
350	if (out->hdr.status) {
351		err = mlx5_cmd_status_to_err(&out->hdr);
352		goto out_free;
353	}
354
355	num_claimed = be32_to_cpu(out->num_entries);
356	if (nclaimed)
357		*nclaimed = num_claimed;
358
359	dev->priv.fw_pages -= num_claimed;
360	dev->priv.pages_per_func[func_id] -= num_claimed;
361	for (i = 0; i < num_claimed; i++) {
362		addr = be64_to_cpu(out->pas[i]);
363		free_4k(dev, addr);
364	}
365
366out_free:
367	kvfree(out);
368	return err;
369}
370
371static void pages_work_handler(struct work_struct *work)
372{
373	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
374	struct mlx5_core_dev *dev = req->dev;
375	int err = 0;
376
377	if (req->npages < 0)
378		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
379	else if (req->npages > 0)
380		err = give_pages(dev, req->func_id, req->npages, 1);
381
382	if (err)
383		mlx5_core_warn(dev, "%s fail %d\n",
384			       req->npages < 0 ? "reclaim" : "give", err);
385
386	kfree(req);
387}
388
389void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
390				 s32 npages)
391{
392	struct mlx5_pages_req *req;
393
394	req = kzalloc(sizeof(*req), GFP_ATOMIC);
395	if (!req) {
396		mlx5_core_warn(dev, "failed to allocate pages request\n");
397		return;
398	}
399
400	req->dev = dev;
401	req->func_id = func_id;
402	req->npages = npages;
403	INIT_WORK(&req->work, pages_work_handler);
404	if (!queue_work(dev->priv.pg_wq, &req->work))
405		mlx5_core_warn(dev, "failed to queue pages handler work\n");
406}
407
408int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
409{
410	u16 uninitialized_var(func_id);
411	s32 uninitialized_var(npages);
412	int err;
413
414	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
415	if (err)
416		return err;
417
418	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
419		      npages, boot ? "boot" : "init", func_id);
420
421	return give_pages(dev, func_id, npages, 0);
422}
423
424enum {
425	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
426};
427
428s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
429{
430	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
431	s64 prevpages = 0;
432	s64 npages = 0;
433
434	while (!time_after(jiffies, end)) {
435		/* exclude own function, VFs only */
436		npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
437		if (!npages)
438			break;
439
440		if (npages != prevpages)
441			end = end + msecs_to_jiffies(100);
442
443		prevpages = npages;
444		msleep(1);
445	}
446
447	if (npages)
448		mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
449
450	return -npages;
451}
452
453static int optimal_reclaimed_pages(void)
454{
455	struct mlx5_cmd_prot_block *block;
456	struct mlx5_cmd_layout *lay;
457	int ret;
458
459	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
460	       sizeof(struct mlx5_manage_pages_outbox)) /
461	       FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
462
463	return ret;
464}
465
466int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
467{
468	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
469	struct fw_page *fwp;
470	struct rb_node *p;
471	int nclaimed = 0;
472	int err;
473
474	do {
475		p = rb_first(&dev->priv.page_root);
476		if (p) {
477			fwp = rb_entry(p, struct fw_page, rb_node);
478			err = reclaim_pages(dev, fwp->func_id,
479					    optimal_reclaimed_pages(),
480					    &nclaimed);
481			if (err) {
482				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
483					       err);
484				return err;
485			}
486			if (nclaimed)
487				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
488		}
489		if (time_after(jiffies, end)) {
490			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
491			break;
492		}
493	} while (p);
494
495	return 0;
496}
497
498void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
499{
500	dev->priv.page_root = RB_ROOT;
501	INIT_LIST_HEAD(&dev->priv.free_list);
502}
503
504void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
505{
506	/* nothing */
507}
508
509int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
510{
511	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
512	if (!dev->priv.pg_wq)
513		return -ENOMEM;
514
515	return 0;
516}
517
518void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
519{
520	destroy_workqueue(dev->priv.pg_wq);
521}
522