mlx5_pagealloc.c revision 322148
1290650Shselasky/*-
2290650Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3290650Shselasky *
4290650Shselasky * Redistribution and use in source and binary forms, with or without
5290650Shselasky * modification, are permitted provided that the following conditions
6290650Shselasky * are met:
7290650Shselasky * 1. Redistributions of source code must retain the above copyright
8290650Shselasky *    notice, this list of conditions and the following disclaimer.
9290650Shselasky * 2. Redistributions in binary form must reproduce the above copyright
10290650Shselasky *    notice, this list of conditions and the following disclaimer in the
11290650Shselasky *    documentation and/or other materials provided with the distribution.
12290650Shselasky *
13290650Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14290650Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15290650Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16290650Shselasky * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17290650Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18290650Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19290650Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20290650Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21290650Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22290650Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23290650Shselasky * SUCH DAMAGE.
24290650Shselasky *
25290650Shselasky * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 322148 2017-08-07 12:44:18Z hselasky $
26290650Shselasky */
27290650Shselasky
28290650Shselasky#include <linux/kernel.h>
29290650Shselasky#include <linux/module.h>
30322144Shselasky#include <linux/delay.h>
31290650Shselasky#include <dev/mlx5/driver.h>
32290650Shselasky#include "mlx5_core.h"
33290650Shselasky
34290650Shselaskystruct mlx5_pages_req {
35290650Shselasky	struct mlx5_core_dev *dev;
36290650Shselasky	u16	func_id;
37290650Shselasky	s32	npages;
38290650Shselasky	struct work_struct work;
39290650Shselasky};
40290650Shselasky
41322146Shselaskystruct mlx5_fw_page {
42290650Shselasky	struct rb_node		rb_node;
43290650Shselasky	u64			addr;
44290650Shselasky	struct page	       *page;
45290650Shselasky	u16			func_id;
46290650Shselasky	unsigned long		bitmask;
47290650Shselasky	struct list_head	list;
48290650Shselasky	unsigned		free_count;
49290650Shselasky};
50290650Shselasky
51290650Shselaskystruct mlx5_manage_pages_inbox {
52290650Shselasky	struct mlx5_inbox_hdr	hdr;
53290650Shselasky	__be16			rsvd;
54290650Shselasky	__be16			func_id;
55290650Shselasky	__be32			num_entries;
56290650Shselasky	__be64			pas[0];
57290650Shselasky};
58290650Shselasky
59290650Shselaskystruct mlx5_manage_pages_outbox {
60290650Shselasky	struct mlx5_outbox_hdr	hdr;
61290650Shselasky	__be32			num_entries;
62290650Shselasky	u8			rsvd[4];
63290650Shselasky	__be64			pas[0];
64290650Shselasky};
65290650Shselasky
66290650Shselaskyenum {
67290650Shselasky	MAX_RECLAIM_TIME_MSECS	= 5000,
68290650Shselasky};
69290650Shselasky
70290650Shselaskyenum {
71290650Shselasky	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
72290650Shselasky	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
73290650Shselasky};
74290650Shselasky
75290650Shselaskystatic int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
76290650Shselasky{
77290650Shselasky	struct rb_root *root = &dev->priv.page_root;
78290650Shselasky	struct rb_node **new = &root->rb_node;
79290650Shselasky	struct rb_node *parent = NULL;
80322146Shselasky	struct mlx5_fw_page *nfp;
81322146Shselasky	struct mlx5_fw_page *tfp;
82290650Shselasky	int i;
83290650Shselasky
84290650Shselasky	while (*new) {
85290650Shselasky		parent = *new;
86322146Shselasky		tfp = rb_entry(parent, struct mlx5_fw_page, rb_node);
87290650Shselasky		if (tfp->addr < addr)
88290650Shselasky			new = &parent->rb_left;
89290650Shselasky		else if (tfp->addr > addr)
90290650Shselasky			new = &parent->rb_right;
91290650Shselasky		else
92290650Shselasky			return -EEXIST;
93290650Shselasky	}
94290650Shselasky
95290650Shselasky	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
96290650Shselasky
97290650Shselasky	nfp->addr = addr;
98290650Shselasky	nfp->page = page;
99290650Shselasky	nfp->func_id = func_id;
100290650Shselasky	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
101290650Shselasky	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
102290650Shselasky		set_bit(i, &nfp->bitmask);
103290650Shselasky
104290650Shselasky	rb_link_node(&nfp->rb_node, parent, new);
105290650Shselasky	rb_insert_color(&nfp->rb_node, root);
106290650Shselasky	list_add(&nfp->list, &dev->priv.free_list);
107290650Shselasky
108290650Shselasky	return 0;
109290650Shselasky}
110290650Shselasky
111322146Shselaskystatic struct mlx5_fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
112290650Shselasky{
113290650Shselasky	struct rb_root *root = &dev->priv.page_root;
114290650Shselasky	struct rb_node *tmp = root->rb_node;
115322146Shselasky	struct mlx5_fw_page *result = NULL;
116322146Shselasky	struct mlx5_fw_page *tfp;
117290650Shselasky
118290650Shselasky	while (tmp) {
119322146Shselasky		tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node);
120290650Shselasky		if (tfp->addr < addr) {
121290650Shselasky			tmp = tmp->rb_left;
122290650Shselasky		} else if (tfp->addr > addr) {
123290650Shselasky			tmp = tmp->rb_right;
124290650Shselasky		} else {
125290650Shselasky			result = tfp;
126290650Shselasky			break;
127290650Shselasky		}
128290650Shselasky	}
129290650Shselasky
130290650Shselasky	return result;
131290650Shselasky}
132290650Shselasky
133290650Shselaskystatic int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
134290650Shselasky				s32 *npages, int boot)
135290650Shselasky{
136290650Shselasky	u32 in[MLX5_ST_SZ_DW(query_pages_in)];
137290650Shselasky	u32 out[MLX5_ST_SZ_DW(query_pages_out)];
138290650Shselasky	int err;
139290650Shselasky
140290650Shselasky	memset(in, 0, sizeof(in));
141290650Shselasky
142290650Shselasky	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
143290650Shselasky	MLX5_SET(query_pages_in, in, op_mod,
144290650Shselasky		 boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
145290650Shselasky
146290650Shselasky	memset(out, 0, sizeof(out));
147290650Shselasky	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
148290650Shselasky	if (err)
149290650Shselasky		return err;
150290650Shselasky
151290650Shselasky	*npages = MLX5_GET(query_pages_out, out, num_pages);
152290650Shselasky	*func_id = MLX5_GET(query_pages_out, out, function_id);
153290650Shselasky
154290650Shselasky	return 0;
155290650Shselasky}
156290650Shselasky
157290650Shselaskystatic int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
158290650Shselasky{
159322146Shselasky	struct mlx5_fw_page *fp;
160290650Shselasky	unsigned n;
161290650Shselasky
162290650Shselasky	if (list_empty(&dev->priv.free_list))
163290650Shselasky		return -ENOMEM;
164290650Shselasky
165322146Shselasky	fp = list_entry(dev->priv.free_list.next, struct mlx5_fw_page, list);
166290650Shselasky	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
167290650Shselasky	if (n >= MLX5_NUM_4K_IN_PAGE) {
168290650Shselasky		mlx5_core_warn(dev, "alloc 4k bug\n");
169290650Shselasky		return -ENOENT;
170290650Shselasky	}
171290650Shselasky	clear_bit(n, &fp->bitmask);
172290650Shselasky	fp->free_count--;
173290650Shselasky	if (!fp->free_count)
174290650Shselasky		list_del(&fp->list);
175290650Shselasky
176290650Shselasky	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
177290650Shselasky
178290650Shselasky	return 0;
179290650Shselasky}
180290650Shselasky
181290650Shselaskystatic void free_4k(struct mlx5_core_dev *dev, u64 addr)
182290650Shselasky{
183322146Shselasky	struct mlx5_fw_page *fwp;
184290650Shselasky	int n;
185290650Shselasky
186290650Shselasky	fwp = find_fw_page(dev, addr & PAGE_MASK);
187290650Shselasky	if (!fwp) {
188290650Shselasky		mlx5_core_warn(dev, "page not found\n");
189290650Shselasky		return;
190290650Shselasky	}
191290650Shselasky
192290650Shselasky	n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
193290650Shselasky	fwp->free_count++;
194290650Shselasky	set_bit(n, &fwp->bitmask);
195290650Shselasky	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
196290650Shselasky		rb_erase(&fwp->rb_node, &dev->priv.page_root);
197290650Shselasky		if (fwp->free_count != 1)
198290650Shselasky			list_del(&fwp->list);
199290650Shselasky		dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
200290650Shselasky			       DMA_BIDIRECTIONAL);
201290650Shselasky		__free_page(fwp->page);
202290650Shselasky		kfree(fwp);
203290650Shselasky	} else if (fwp->free_count == 1) {
204290650Shselasky		list_add(&fwp->list, &dev->priv.free_list);
205290650Shselasky	}
206290650Shselasky}
207290650Shselasky
208290650Shselaskystatic int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
209290650Shselasky{
210290650Shselasky	struct page *page;
211290650Shselasky	u64 addr;
212290650Shselasky	int err;
213290650Shselasky
214290650Shselasky	page = alloc_page(GFP_HIGHUSER);
215290650Shselasky	if (!page) {
216290650Shselasky		mlx5_core_warn(dev, "failed to allocate page\n");
217290650Shselasky		return -ENOMEM;
218290650Shselasky	}
219290650Shselasky	addr = dma_map_page(&dev->pdev->dev, page, 0,
220290650Shselasky			    PAGE_SIZE, DMA_BIDIRECTIONAL);
221290650Shselasky	if (dma_mapping_error(&dev->pdev->dev, addr)) {
222290650Shselasky		mlx5_core_warn(dev, "failed dma mapping page\n");
223290650Shselasky		err = -ENOMEM;
224290650Shselasky		goto out_alloc;
225290650Shselasky	}
226290650Shselasky	err = insert_page(dev, addr, page, func_id);
227290650Shselasky	if (err) {
228290650Shselasky		mlx5_core_err(dev, "failed to track allocated page\n");
229290650Shselasky		goto out_mapping;
230290650Shselasky	}
231290650Shselasky
232290650Shselasky	return 0;
233290650Shselasky
234290650Shselaskyout_mapping:
235290650Shselasky	dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
236290650Shselasky
237290650Shselaskyout_alloc:
238290650Shselasky	__free_page(page);
239290650Shselasky	return err;
240290650Shselasky}
241290650Shselaskystatic int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
242290650Shselasky		      int notify_fail)
243290650Shselasky{
244290650Shselasky	struct mlx5_manage_pages_inbox *in;
245290650Shselasky	struct mlx5_manage_pages_outbox out;
246290650Shselasky	struct mlx5_manage_pages_inbox *nin;
247290650Shselasky	int inlen;
248290650Shselasky	u64 addr;
249290650Shselasky	int err;
250308675Shselasky	int i = 0;
251290650Shselasky
252290650Shselasky	inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
253290650Shselasky	in = mlx5_vzalloc(inlen);
254290650Shselasky	if (!in) {
255290650Shselasky		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
256308675Shselasky		err = -ENOMEM;
257308675Shselasky		goto out_alloc;
258290650Shselasky	}
259290650Shselasky	memset(&out, 0, sizeof(out));
260290650Shselasky
261290650Shselasky	for (i = 0; i < npages; i++) {
262290650Shselaskyretry:
263290650Shselasky		err = alloc_4k(dev, &addr);
264290650Shselasky		if (err) {
265290650Shselasky			if (err == -ENOMEM)
266290650Shselasky				err = alloc_system_page(dev, func_id);
267290650Shselasky			if (err)
268308675Shselasky				goto out_alloc;
269290650Shselasky
270290650Shselasky			goto retry;
271290650Shselasky		}
272290650Shselasky		in->pas[i] = cpu_to_be64(addr);
273290650Shselasky	}
274290650Shselasky
275290650Shselasky	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
276290650Shselasky	in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
277290650Shselasky	in->func_id = cpu_to_be16(func_id);
278290650Shselasky	in->num_entries = cpu_to_be32(npages);
279290650Shselasky	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
280290650Shselasky	if (err) {
281290650Shselasky		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
282290650Shselasky			       func_id, npages, err);
283290650Shselasky		goto out_alloc;
284290650Shselasky	}
285290650Shselasky	dev->priv.fw_pages += npages;
286322144Shselasky	dev->priv.pages_per_func[func_id] += npages;
287290650Shselasky
288290650Shselasky	if (out.hdr.status) {
289290650Shselasky		err = mlx5_cmd_status_to_err(&out.hdr);
290290650Shselasky		if (err) {
291290650Shselasky			mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
292290650Shselasky				       func_id, npages, out.hdr.status);
293290650Shselasky			goto out_alloc;
294290650Shselasky		}
295290650Shselasky	}
296290650Shselasky
297290650Shselasky	mlx5_core_dbg(dev, "err %d\n", err);
298290650Shselasky
299290650Shselasky	goto out_free;
300290650Shselasky
301290650Shselaskyout_alloc:
302290650Shselasky	if (notify_fail) {
303290650Shselasky		nin = kzalloc(sizeof(*nin), GFP_KERNEL);
304290650Shselasky		memset(&out, 0, sizeof(out));
305290650Shselasky		nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
306290650Shselasky		nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
307308675Shselasky		nin->func_id = cpu_to_be16(func_id);
308290650Shselasky		if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
309290650Shselasky			mlx5_core_warn(dev, "page notify failed\n");
310290650Shselasky		kfree(nin);
311290650Shselasky	}
312290650Shselasky	for (i--; i >= 0; i--)
313290650Shselasky		free_4k(dev, be64_to_cpu(in->pas[i]));
314290650Shselaskyout_free:
315290650Shselasky	kvfree(in);
316290650Shselasky	return err;
317290650Shselasky}
318290650Shselasky
319290650Shselaskystatic int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
320290650Shselasky			 int *nclaimed)
321290650Shselasky{
322290650Shselasky	struct mlx5_manage_pages_inbox   in;
323290650Shselasky	struct mlx5_manage_pages_outbox *out;
324290650Shselasky	int num_claimed;
325290650Shselasky	int outlen;
326290650Shselasky	u64 addr;
327290650Shselasky	int err;
328290650Shselasky	int i;
329290650Shselasky
330290650Shselasky	if (nclaimed)
331290650Shselasky		*nclaimed = 0;
332290650Shselasky
333290650Shselasky	memset(&in, 0, sizeof(in));
334290650Shselasky	outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
335290650Shselasky	out = mlx5_vzalloc(outlen);
336290650Shselasky	if (!out)
337290650Shselasky		return -ENOMEM;
338290650Shselasky
339290650Shselasky	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
340290650Shselasky	in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
341290650Shselasky	in.func_id = cpu_to_be16(func_id);
342290650Shselasky	in.num_entries = cpu_to_be32(npages);
343290650Shselasky	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
344290650Shselasky	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
345290650Shselasky	if (err) {
346290650Shselasky		mlx5_core_err(dev, "failed reclaiming pages\n");
347290650Shselasky		goto out_free;
348290650Shselasky	}
349290650Shselasky
350290650Shselasky	if (out->hdr.status) {
351290650Shselasky		err = mlx5_cmd_status_to_err(&out->hdr);
352290650Shselasky		goto out_free;
353290650Shselasky	}
354290650Shselasky
355290650Shselasky	num_claimed = be32_to_cpu(out->num_entries);
356290650Shselasky	if (nclaimed)
357290650Shselasky		*nclaimed = num_claimed;
358290650Shselasky
359290650Shselasky	dev->priv.fw_pages -= num_claimed;
360322144Shselasky	dev->priv.pages_per_func[func_id] -= num_claimed;
361290650Shselasky	for (i = 0; i < num_claimed; i++) {
362290650Shselasky		addr = be64_to_cpu(out->pas[i]);
363290650Shselasky		free_4k(dev, addr);
364290650Shselasky	}
365290650Shselasky
366290650Shselaskyout_free:
367290650Shselasky	kvfree(out);
368290650Shselasky	return err;
369290650Shselasky}
370290650Shselasky
371290650Shselaskystatic void pages_work_handler(struct work_struct *work)
372290650Shselasky{
373290650Shselasky	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
374290650Shselasky	struct mlx5_core_dev *dev = req->dev;
375290650Shselasky	int err = 0;
376290650Shselasky
377290650Shselasky	if (req->npages < 0)
378290650Shselasky		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
379290650Shselasky	else if (req->npages > 0)
380290650Shselasky		err = give_pages(dev, req->func_id, req->npages, 1);
381290650Shselasky
382290650Shselasky	if (err)
383290650Shselasky		mlx5_core_warn(dev, "%s fail %d\n",
384290650Shselasky			       req->npages < 0 ? "reclaim" : "give", err);
385290650Shselasky
386290650Shselasky	kfree(req);
387290650Shselasky}
388290650Shselasky
389290650Shselaskyvoid mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
390290650Shselasky				 s32 npages)
391290650Shselasky{
392290650Shselasky	struct mlx5_pages_req *req;
393290650Shselasky
394290650Shselasky	req = kzalloc(sizeof(*req), GFP_ATOMIC);
395290650Shselasky	if (!req) {
396290650Shselasky		mlx5_core_warn(dev, "failed to allocate pages request\n");
397290650Shselasky		return;
398290650Shselasky	}
399290650Shselasky
400290650Shselasky	req->dev = dev;
401290650Shselasky	req->func_id = func_id;
402290650Shselasky	req->npages = npages;
403290650Shselasky	INIT_WORK(&req->work, pages_work_handler);
404290650Shselasky	if (!queue_work(dev->priv.pg_wq, &req->work))
405290650Shselasky		mlx5_core_warn(dev, "failed to queue pages handler work\n");
406290650Shselasky}
407290650Shselasky
408290650Shselaskyint mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
409290650Shselasky{
410290650Shselasky	u16 uninitialized_var(func_id);
411290650Shselasky	s32 uninitialized_var(npages);
412290650Shselasky	int err;
413290650Shselasky
414290650Shselasky	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
415290650Shselasky	if (err)
416290650Shselasky		return err;
417290650Shselasky
418290650Shselasky	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
419290650Shselasky		      npages, boot ? "boot" : "init", func_id);
420290650Shselasky
421290650Shselasky	return give_pages(dev, func_id, npages, 0);
422290650Shselasky}
423290650Shselasky
424290650Shselaskyenum {
425290650Shselasky	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
426290650Shselasky};
427290650Shselasky
428322144Shselaskys64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
429322144Shselasky{
430322144Shselasky	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
431322144Shselasky	s64 prevpages = 0;
432322144Shselasky	s64 npages = 0;
433322144Shselasky
434322144Shselasky	while (!time_after(jiffies, end)) {
435322144Shselasky		/* exclude own function, VFs only */
436322144Shselasky		npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
437322144Shselasky		if (!npages)
438322144Shselasky			break;
439322144Shselasky
440322144Shselasky		if (npages != prevpages)
441322144Shselasky			end = end + msecs_to_jiffies(100);
442322144Shselasky
443322144Shselasky		prevpages = npages;
444322144Shselasky		msleep(1);
445322144Shselasky	}
446322144Shselasky
447322144Shselasky	if (npages)
448322144Shselasky		mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
449322144Shselasky
450322144Shselasky	return -npages;
451322144Shselasky}
452322144Shselasky
453290650Shselaskystatic int optimal_reclaimed_pages(void)
454290650Shselasky{
455290650Shselasky	struct mlx5_cmd_prot_block *block;
456290650Shselasky	struct mlx5_cmd_layout *lay;
457290650Shselasky	int ret;
458290650Shselasky
459290650Shselasky	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
460290650Shselasky	       sizeof(struct mlx5_manage_pages_outbox)) /
461290650Shselasky	       FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
462290650Shselasky
463290650Shselasky	return ret;
464290650Shselasky}
465290650Shselasky
466290650Shselaskyint mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
467290650Shselasky{
468290650Shselasky	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
469322146Shselasky	struct mlx5_fw_page *fwp;
470290650Shselasky	struct rb_node *p;
471290650Shselasky	int nclaimed = 0;
472290650Shselasky	int err;
473290650Shselasky
474290650Shselasky	do {
475290650Shselasky		p = rb_first(&dev->priv.page_root);
476290650Shselasky		if (p) {
477322146Shselasky			fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
478322148Shselasky			if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
479322148Shselasky				--dev->priv.fw_pages;
480322148Shselasky				free_4k(dev, fwp->addr);
481322148Shselasky				nclaimed = 1;
482322148Shselasky			} else {
483322148Shselasky				err = reclaim_pages(dev, fwp->func_id,
484322148Shselasky						    optimal_reclaimed_pages(),
485322148Shselasky						    &nclaimed);
486322148Shselasky				if (err) {
487322148Shselasky					mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
488322148Shselasky						       err);
489322148Shselasky					return err;
490322148Shselasky				}
491290650Shselasky			}
492322148Shselasky
493290650Shselasky			if (nclaimed)
494290650Shselasky				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
495290650Shselasky		}
496290650Shselasky		if (time_after(jiffies, end)) {
497290650Shselasky			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
498290650Shselasky			break;
499290650Shselasky		}
500290650Shselasky	} while (p);
501290650Shselasky
502290650Shselasky	return 0;
503290650Shselasky}
504290650Shselasky
505290650Shselaskyvoid mlx5_pagealloc_init(struct mlx5_core_dev *dev)
506290650Shselasky{
507290650Shselasky	dev->priv.page_root = RB_ROOT;
508290650Shselasky	INIT_LIST_HEAD(&dev->priv.free_list);
509290650Shselasky}
510290650Shselasky
511290650Shselaskyvoid mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
512290650Shselasky{
513290650Shselasky	/* nothing */
514290650Shselasky}
515290650Shselasky
516290650Shselaskyint mlx5_pagealloc_start(struct mlx5_core_dev *dev)
517290650Shselasky{
518290650Shselasky	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
519290650Shselasky	if (!dev->priv.pg_wq)
520290650Shselasky		return -ENOMEM;
521290650Shselasky
522290650Shselasky	return 0;
523290650Shselasky}
524290650Shselasky
525290650Shselaskyvoid mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
526290650Shselasky{
527290650Shselasky	destroy_workqueue(dev->priv.pg_wq);
528290650Shselasky}
529