mlx5_pagealloc.c revision 290650
1290650Shselasky/*-
2290650Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3290650Shselasky *
4290650Shselasky * Redistribution and use in source and binary forms, with or without
5290650Shselasky * modification, are permitted provided that the following conditions
6290650Shselasky * are met:
7290650Shselasky * 1. Redistributions of source code must retain the above copyright
8290650Shselasky *    notice, this list of conditions and the following disclaimer.
9290650Shselasky * 2. Redistributions in binary form must reproduce the above copyright
10290650Shselasky *    notice, this list of conditions and the following disclaimer in the
11290650Shselasky *    documentation and/or other materials provided with the distribution.
12290650Shselasky *
13290650Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14290650Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15290650Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16290650Shselasky * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17290650Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18290650Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19290650Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20290650Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21290650Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22290650Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23290650Shselasky * SUCH DAMAGE.
24290650Shselasky *
25290650Shselasky * $FreeBSD: head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 290650 2015-11-10 12:20:22Z hselasky $
26290650Shselasky */
27290650Shselasky
28290650Shselasky#include <linux/kernel.h>
29290650Shselasky#include <linux/module.h>
30290650Shselasky#include <dev/mlx5/driver.h>
31290650Shselasky#include "mlx5_core.h"
32290650Shselasky
33290650Shselaskystruct mlx5_pages_req {
34290650Shselasky	struct mlx5_core_dev *dev;
35290650Shselasky	u16	func_id;
36290650Shselasky	s32	npages;
37290650Shselasky	struct work_struct work;
38290650Shselasky};
39290650Shselasky
40290650Shselaskystruct fw_page {
41290650Shselasky	struct rb_node		rb_node;
42290650Shselasky	u64			addr;
43290650Shselasky	struct page	       *page;
44290650Shselasky	u16			func_id;
45290650Shselasky	unsigned long		bitmask;
46290650Shselasky	struct list_head	list;
47290650Shselasky	unsigned		free_count;
48290650Shselasky};
49290650Shselasky
50290650Shselaskystruct mlx5_manage_pages_inbox {
51290650Shselasky	struct mlx5_inbox_hdr	hdr;
52290650Shselasky	__be16			rsvd;
53290650Shselasky	__be16			func_id;
54290650Shselasky	__be32			num_entries;
55290650Shselasky	__be64			pas[0];
56290650Shselasky};
57290650Shselasky
58290650Shselaskystruct mlx5_manage_pages_outbox {
59290650Shselasky	struct mlx5_outbox_hdr	hdr;
60290650Shselasky	__be32			num_entries;
61290650Shselasky	u8			rsvd[4];
62290650Shselasky	__be64			pas[0];
63290650Shselasky};
64290650Shselasky
65290650Shselaskyenum {
66290650Shselasky	MAX_RECLAIM_TIME_MSECS	= 5000,
67290650Shselasky};
68290650Shselasky
69290650Shselaskyenum {
70290650Shselasky	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
71290650Shselasky	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
72290650Shselasky};
73290650Shselasky
74290650Shselaskystatic int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
75290650Shselasky{
76290650Shselasky	struct rb_root *root = &dev->priv.page_root;
77290650Shselasky	struct rb_node **new = &root->rb_node;
78290650Shselasky	struct rb_node *parent = NULL;
79290650Shselasky	struct fw_page *nfp;
80290650Shselasky	struct fw_page *tfp;
81290650Shselasky	int i;
82290650Shselasky
83290650Shselasky	while (*new) {
84290650Shselasky		parent = *new;
85290650Shselasky		tfp = rb_entry(parent, struct fw_page, rb_node);
86290650Shselasky		if (tfp->addr < addr)
87290650Shselasky			new = &parent->rb_left;
88290650Shselasky		else if (tfp->addr > addr)
89290650Shselasky			new = &parent->rb_right;
90290650Shselasky		else
91290650Shselasky			return -EEXIST;
92290650Shselasky	}
93290650Shselasky
94290650Shselasky	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
95290650Shselasky
96290650Shselasky	nfp->addr = addr;
97290650Shselasky	nfp->page = page;
98290650Shselasky	nfp->func_id = func_id;
99290650Shselasky	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
100290650Shselasky	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
101290650Shselasky		set_bit(i, &nfp->bitmask);
102290650Shselasky
103290650Shselasky	rb_link_node(&nfp->rb_node, parent, new);
104290650Shselasky	rb_insert_color(&nfp->rb_node, root);
105290650Shselasky	list_add(&nfp->list, &dev->priv.free_list);
106290650Shselasky
107290650Shselasky	return 0;
108290650Shselasky}
109290650Shselasky
110290650Shselaskystatic struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
111290650Shselasky{
112290650Shselasky	struct rb_root *root = &dev->priv.page_root;
113290650Shselasky	struct rb_node *tmp = root->rb_node;
114290650Shselasky	struct fw_page *result = NULL;
115290650Shselasky	struct fw_page *tfp;
116290650Shselasky
117290650Shselasky	while (tmp) {
118290650Shselasky		tfp = rb_entry(tmp, struct fw_page, rb_node);
119290650Shselasky		if (tfp->addr < addr) {
120290650Shselasky			tmp = tmp->rb_left;
121290650Shselasky		} else if (tfp->addr > addr) {
122290650Shselasky			tmp = tmp->rb_right;
123290650Shselasky		} else {
124290650Shselasky			result = tfp;
125290650Shselasky			break;
126290650Shselasky		}
127290650Shselasky	}
128290650Shselasky
129290650Shselasky	return result;
130290650Shselasky}
131290650Shselasky
132290650Shselaskystatic int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
133290650Shselasky				s32 *npages, int boot)
134290650Shselasky{
135290650Shselasky	u32 in[MLX5_ST_SZ_DW(query_pages_in)];
136290650Shselasky	u32 out[MLX5_ST_SZ_DW(query_pages_out)];
137290650Shselasky	int err;
138290650Shselasky
139290650Shselasky	memset(in, 0, sizeof(in));
140290650Shselasky
141290650Shselasky	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142290650Shselasky	MLX5_SET(query_pages_in, in, op_mod,
143290650Shselasky		 boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
144290650Shselasky
145290650Shselasky	memset(out, 0, sizeof(out));
146290650Shselasky	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
147290650Shselasky	if (err)
148290650Shselasky		return err;
149290650Shselasky
150290650Shselasky	*npages = MLX5_GET(query_pages_out, out, num_pages);
151290650Shselasky	*func_id = MLX5_GET(query_pages_out, out, function_id);
152290650Shselasky
153290650Shselasky	return 0;
154290650Shselasky}
155290650Shselasky
156290650Shselaskystatic int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
157290650Shselasky{
158290650Shselasky	struct fw_page *fp;
159290650Shselasky	unsigned n;
160290650Shselasky
161290650Shselasky	if (list_empty(&dev->priv.free_list))
162290650Shselasky		return -ENOMEM;
163290650Shselasky
164290650Shselasky	fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165290650Shselasky	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166290650Shselasky	if (n >= MLX5_NUM_4K_IN_PAGE) {
167290650Shselasky		mlx5_core_warn(dev, "alloc 4k bug\n");
168290650Shselasky		return -ENOENT;
169290650Shselasky	}
170290650Shselasky	clear_bit(n, &fp->bitmask);
171290650Shselasky	fp->free_count--;
172290650Shselasky	if (!fp->free_count)
173290650Shselasky		list_del(&fp->list);
174290650Shselasky
175290650Shselasky	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
176290650Shselasky
177290650Shselasky	return 0;
178290650Shselasky}
179290650Shselasky
180290650Shselaskystatic void free_4k(struct mlx5_core_dev *dev, u64 addr)
181290650Shselasky{
182290650Shselasky	struct fw_page *fwp;
183290650Shselasky	int n;
184290650Shselasky
185290650Shselasky	fwp = find_fw_page(dev, addr & PAGE_MASK);
186290650Shselasky	if (!fwp) {
187290650Shselasky		mlx5_core_warn(dev, "page not found\n");
188290650Shselasky		return;
189290650Shselasky	}
190290650Shselasky
191290650Shselasky	n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
192290650Shselasky	fwp->free_count++;
193290650Shselasky	set_bit(n, &fwp->bitmask);
194290650Shselasky	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
195290650Shselasky		rb_erase(&fwp->rb_node, &dev->priv.page_root);
196290650Shselasky		if (fwp->free_count != 1)
197290650Shselasky			list_del(&fwp->list);
198290650Shselasky		dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
199290650Shselasky			       DMA_BIDIRECTIONAL);
200290650Shselasky		__free_page(fwp->page);
201290650Shselasky		kfree(fwp);
202290650Shselasky	} else if (fwp->free_count == 1) {
203290650Shselasky		list_add(&fwp->list, &dev->priv.free_list);
204290650Shselasky	}
205290650Shselasky}
206290650Shselasky
207290650Shselaskystatic int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
208290650Shselasky{
209290650Shselasky	struct page *page;
210290650Shselasky	u64 addr;
211290650Shselasky	int err;
212290650Shselasky
213290650Shselasky	page = alloc_page(GFP_HIGHUSER);
214290650Shselasky	if (!page) {
215290650Shselasky		mlx5_core_warn(dev, "failed to allocate page\n");
216290650Shselasky		return -ENOMEM;
217290650Shselasky	}
218290650Shselasky	addr = dma_map_page(&dev->pdev->dev, page, 0,
219290650Shselasky			    PAGE_SIZE, DMA_BIDIRECTIONAL);
220290650Shselasky	if (dma_mapping_error(&dev->pdev->dev, addr)) {
221290650Shselasky		mlx5_core_warn(dev, "failed dma mapping page\n");
222290650Shselasky		err = -ENOMEM;
223290650Shselasky		goto out_alloc;
224290650Shselasky	}
225290650Shselasky	err = insert_page(dev, addr, page, func_id);
226290650Shselasky	if (err) {
227290650Shselasky		mlx5_core_err(dev, "failed to track allocated page\n");
228290650Shselasky		goto out_mapping;
229290650Shselasky	}
230290650Shselasky
231290650Shselasky	return 0;
232290650Shselasky
233290650Shselaskyout_mapping:
234290650Shselasky	dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
235290650Shselasky
236290650Shselaskyout_alloc:
237290650Shselasky	__free_page(page);
238290650Shselasky	return err;
239290650Shselasky}
240290650Shselaskystatic int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
241290650Shselasky		      int notify_fail)
242290650Shselasky{
243290650Shselasky	struct mlx5_manage_pages_inbox *in;
244290650Shselasky	struct mlx5_manage_pages_outbox out;
245290650Shselasky	struct mlx5_manage_pages_inbox *nin;
246290650Shselasky	int inlen;
247290650Shselasky	u64 addr;
248290650Shselasky	int err;
249290650Shselasky	int i;
250290650Shselasky
251290650Shselasky	inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
252290650Shselasky	in = mlx5_vzalloc(inlen);
253290650Shselasky	if (!in) {
254290650Shselasky		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
255290650Shselasky		return -ENOMEM;
256290650Shselasky	}
257290650Shselasky	memset(&out, 0, sizeof(out));
258290650Shselasky
259290650Shselasky	for (i = 0; i < npages; i++) {
260290650Shselaskyretry:
261290650Shselasky		err = alloc_4k(dev, &addr);
262290650Shselasky		if (err) {
263290650Shselasky			if (err == -ENOMEM)
264290650Shselasky				err = alloc_system_page(dev, func_id);
265290650Shselasky			if (err)
266290650Shselasky				goto out_4k;
267290650Shselasky
268290650Shselasky			goto retry;
269290650Shselasky		}
270290650Shselasky		in->pas[i] = cpu_to_be64(addr);
271290650Shselasky	}
272290650Shselasky
273290650Shselasky	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
274290650Shselasky	in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
275290650Shselasky	in->func_id = cpu_to_be16(func_id);
276290650Shselasky	in->num_entries = cpu_to_be32(npages);
277290650Shselasky	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
278290650Shselasky	if (err) {
279290650Shselasky		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
280290650Shselasky			       func_id, npages, err);
281290650Shselasky		goto out_alloc;
282290650Shselasky	}
283290650Shselasky	dev->priv.fw_pages += npages;
284290650Shselasky
285290650Shselasky	if (out.hdr.status) {
286290650Shselasky		err = mlx5_cmd_status_to_err(&out.hdr);
287290650Shselasky		if (err) {
288290650Shselasky			mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
289290650Shselasky				       func_id, npages, out.hdr.status);
290290650Shselasky			goto out_alloc;
291290650Shselasky		}
292290650Shselasky	}
293290650Shselasky
294290650Shselasky	mlx5_core_dbg(dev, "err %d\n", err);
295290650Shselasky
296290650Shselasky	goto out_free;
297290650Shselasky
298290650Shselaskyout_alloc:
299290650Shselasky	if (notify_fail) {
300290650Shselasky		nin = kzalloc(sizeof(*nin), GFP_KERNEL);
301290650Shselasky		memset(&out, 0, sizeof(out));
302290650Shselasky		nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
303290650Shselasky		nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
304290650Shselasky		if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
305290650Shselasky			mlx5_core_warn(dev, "page notify failed\n");
306290650Shselasky		kfree(nin);
307290650Shselasky	}
308290650Shselasky
309290650Shselaskyout_4k:
310290650Shselasky	for (i--; i >= 0; i--)
311290650Shselasky		free_4k(dev, be64_to_cpu(in->pas[i]));
312290650Shselaskyout_free:
313290650Shselasky	kvfree(in);
314290650Shselasky	return err;
315290650Shselasky}
316290650Shselasky
317290650Shselaskystatic int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
318290650Shselasky			 int *nclaimed)
319290650Shselasky{
320290650Shselasky	struct mlx5_manage_pages_inbox   in;
321290650Shselasky	struct mlx5_manage_pages_outbox *out;
322290650Shselasky	int num_claimed;
323290650Shselasky	int outlen;
324290650Shselasky	u64 addr;
325290650Shselasky	int err;
326290650Shselasky	int i;
327290650Shselasky
328290650Shselasky	if (nclaimed)
329290650Shselasky		*nclaimed = 0;
330290650Shselasky
331290650Shselasky	memset(&in, 0, sizeof(in));
332290650Shselasky	outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
333290650Shselasky	out = mlx5_vzalloc(outlen);
334290650Shselasky	if (!out)
335290650Shselasky		return -ENOMEM;
336290650Shselasky
337290650Shselasky	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
338290650Shselasky	in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
339290650Shselasky	in.func_id = cpu_to_be16(func_id);
340290650Shselasky	in.num_entries = cpu_to_be32(npages);
341290650Shselasky	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
342290650Shselasky	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
343290650Shselasky	if (err) {
344290650Shselasky		mlx5_core_err(dev, "failed reclaiming pages\n");
345290650Shselasky		goto out_free;
346290650Shselasky	}
347290650Shselasky
348290650Shselasky	if (out->hdr.status) {
349290650Shselasky		err = mlx5_cmd_status_to_err(&out->hdr);
350290650Shselasky		goto out_free;
351290650Shselasky	}
352290650Shselasky
353290650Shselasky	num_claimed = be32_to_cpu(out->num_entries);
354290650Shselasky	if (nclaimed)
355290650Shselasky		*nclaimed = num_claimed;
356290650Shselasky
357290650Shselasky	dev->priv.fw_pages -= num_claimed;
358290650Shselasky
359290650Shselasky	for (i = 0; i < num_claimed; i++) {
360290650Shselasky		addr = be64_to_cpu(out->pas[i]);
361290650Shselasky		free_4k(dev, addr);
362290650Shselasky	}
363290650Shselasky
364290650Shselaskyout_free:
365290650Shselasky	kvfree(out);
366290650Shselasky	return err;
367290650Shselasky}
368290650Shselasky
369290650Shselaskystatic void pages_work_handler(struct work_struct *work)
370290650Shselasky{
371290650Shselasky	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
372290650Shselasky	struct mlx5_core_dev *dev = req->dev;
373290650Shselasky	int err = 0;
374290650Shselasky
375290650Shselasky	if (req->npages < 0)
376290650Shselasky		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
377290650Shselasky	else if (req->npages > 0)
378290650Shselasky		err = give_pages(dev, req->func_id, req->npages, 1);
379290650Shselasky
380290650Shselasky	if (err)
381290650Shselasky		mlx5_core_warn(dev, "%s fail %d\n",
382290650Shselasky			       req->npages < 0 ? "reclaim" : "give", err);
383290650Shselasky
384290650Shselasky	kfree(req);
385290650Shselasky}
386290650Shselasky
387290650Shselaskyvoid mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
388290650Shselasky				 s32 npages)
389290650Shselasky{
390290650Shselasky	struct mlx5_pages_req *req;
391290650Shselasky
392290650Shselasky	req = kzalloc(sizeof(*req), GFP_ATOMIC);
393290650Shselasky	if (!req) {
394290650Shselasky		mlx5_core_warn(dev, "failed to allocate pages request\n");
395290650Shselasky		return;
396290650Shselasky	}
397290650Shselasky
398290650Shselasky	req->dev = dev;
399290650Shselasky	req->func_id = func_id;
400290650Shselasky	req->npages = npages;
401290650Shselasky	INIT_WORK(&req->work, pages_work_handler);
402290650Shselasky	if (!queue_work(dev->priv.pg_wq, &req->work))
403290650Shselasky		mlx5_core_warn(dev, "failed to queue pages handler work\n");
404290650Shselasky}
405290650Shselasky
406290650Shselaskyint mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
407290650Shselasky{
408290650Shselasky	u16 uninitialized_var(func_id);
409290650Shselasky	s32 uninitialized_var(npages);
410290650Shselasky	int err;
411290650Shselasky
412290650Shselasky	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
413290650Shselasky	if (err)
414290650Shselasky		return err;
415290650Shselasky
416290650Shselasky	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
417290650Shselasky		      npages, boot ? "boot" : "init", func_id);
418290650Shselasky
419290650Shselasky	return give_pages(dev, func_id, npages, 0);
420290650Shselasky}
421290650Shselasky
422290650Shselaskyenum {
423290650Shselasky	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
424290650Shselasky};
425290650Shselasky
426290650Shselaskystatic int optimal_reclaimed_pages(void)
427290650Shselasky{
428290650Shselasky	struct mlx5_cmd_prot_block *block;
429290650Shselasky	struct mlx5_cmd_layout *lay;
430290650Shselasky	int ret;
431290650Shselasky
432290650Shselasky	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
433290650Shselasky	       sizeof(struct mlx5_manage_pages_outbox)) /
434290650Shselasky	       FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
435290650Shselasky
436290650Shselasky	return ret;
437290650Shselasky}
438290650Shselasky
439290650Shselaskyint mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
440290650Shselasky{
441290650Shselasky	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
442290650Shselasky	struct fw_page *fwp;
443290650Shselasky	struct rb_node *p;
444290650Shselasky	int nclaimed = 0;
445290650Shselasky	int err;
446290650Shselasky
447290650Shselasky	do {
448290650Shselasky		p = rb_first(&dev->priv.page_root);
449290650Shselasky		if (p) {
450290650Shselasky			fwp = rb_entry(p, struct fw_page, rb_node);
451290650Shselasky			err = reclaim_pages(dev, fwp->func_id,
452290650Shselasky					    optimal_reclaimed_pages(),
453290650Shselasky					    &nclaimed);
454290650Shselasky			if (err) {
455290650Shselasky				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
456290650Shselasky					       err);
457290650Shselasky				return err;
458290650Shselasky			}
459290650Shselasky			if (nclaimed)
460290650Shselasky				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
461290650Shselasky		}
462290650Shselasky		if (time_after(jiffies, end)) {
463290650Shselasky			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
464290650Shselasky			break;
465290650Shselasky		}
466290650Shselasky	} while (p);
467290650Shselasky
468290650Shselasky	return 0;
469290650Shselasky}
470290650Shselasky
471290650Shselaskyvoid mlx5_pagealloc_init(struct mlx5_core_dev *dev)
472290650Shselasky{
473290650Shselasky	dev->priv.page_root = RB_ROOT;
474290650Shselasky	INIT_LIST_HEAD(&dev->priv.free_list);
475290650Shselasky}
476290650Shselasky
477290650Shselaskyvoid mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
478290650Shselasky{
479290650Shselasky	/* nothing */
480290650Shselasky}
481290650Shselasky
482290650Shselaskyint mlx5_pagealloc_start(struct mlx5_core_dev *dev)
483290650Shselasky{
484290650Shselasky	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
485290650Shselasky	if (!dev->priv.pg_wq)
486290650Shselasky		return -ENOMEM;
487290650Shselasky
488290650Shselasky	return 0;
489290650Shselasky}
490290650Shselasky
491290650Shselaskyvoid mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
492290650Shselasky{
493290650Shselasky	destroy_workqueue(dev->priv.pg_wq);
494290650Shselasky}
495