1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/mlx5/driver.h>
35#include "mlx5_core.h"
36
37static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
38{
39	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
40	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
41	int err;
42
43	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
44	err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
45	if (err)
46		return err;
47
48	*uarn = MLX5_GET(alloc_uar_out, out, uar);
49	return 0;
50}
51
52static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
53{
54	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
55
56	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
57	MLX5_SET(dealloc_uar_in, in, uar, uarn);
58	return mlx5_cmd_exec_in(dev, dealloc_uar, in);
59}
60
61static int uars_per_sys_page(struct mlx5_core_dev *mdev)
62{
63	if (MLX5_CAP_GEN(mdev, uar_4k))
64		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
65
66	return 1;
67}
68
69static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
70{
71	u32 system_page_index;
72
73	if (MLX5_CAP_GEN(mdev, uar_4k))
74		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
75	else
76		system_page_index = index;
77
78	return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
79}
80
81static void up_rel_func(struct kref *kref)
82{
83	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
84
85	list_del(&up->list);
86	iounmap(up->map);
87	if (mlx5_cmd_free_uar(up->mdev, up->index))
88		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
89	bitmap_free(up->reg_bitmap);
90	bitmap_free(up->fp_bitmap);
91	kfree(up);
92}
93
94static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
95					      bool map_wc)
96{
97	struct mlx5_uars_page *up;
98	int err = -ENOMEM;
99	phys_addr_t pfn;
100	int bfregs;
101	int node;
102	int i;
103
104	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
105	node = mdev->priv.numa_node;
106	up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
107	if (!up)
108		return ERR_PTR(err);
109
110	up->mdev = mdev;
111	up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
112	if (!up->reg_bitmap)
113		goto error1;
114
115	up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
116	if (!up->fp_bitmap)
117		goto error1;
118
119	for (i = 0; i < bfregs; i++)
120		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
121			set_bit(i, up->reg_bitmap);
122		else
123			set_bit(i, up->fp_bitmap);
124
125	up->bfregs = bfregs;
126	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
127	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
128
129	err = mlx5_cmd_alloc_uar(mdev, &up->index);
130	if (err) {
131		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
132		goto error1;
133	}
134
135	pfn = uar2pfn(mdev, up->index);
136	if (map_wc) {
137		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
138		if (!up->map) {
139			err = -EAGAIN;
140			goto error2;
141		}
142	} else {
143		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
144		if (!up->map) {
145			err = -ENOMEM;
146			goto error2;
147		}
148	}
149	kref_init(&up->ref_count);
150	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
151		      up->index, up->bfregs);
152	return up;
153
154error2:
155	if (mlx5_cmd_free_uar(mdev, up->index))
156		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
157error1:
158	bitmap_free(up->fp_bitmap);
159	bitmap_free(up->reg_bitmap);
160	kfree(up);
161	return ERR_PTR(err);
162}
163
164struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
165{
166	struct mlx5_uars_page *ret;
167
168	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
169	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
170		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
171				       struct mlx5_uars_page, list);
172		kref_get(&ret->ref_count);
173		goto out;
174	}
175	ret = alloc_uars_page(mdev, false);
176	if (IS_ERR(ret))
177		goto out;
178	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
179out:
180	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
181
182	return ret;
183}
184EXPORT_SYMBOL(mlx5_get_uars_page);
185
186void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
187{
188	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
189	kref_put(&up->ref_count, up_rel_func);
190	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
191}
192EXPORT_SYMBOL(mlx5_put_uars_page);
193
194static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
195{
196	/* return the offset in bytes from the start of the page to the
197	 * blue flame area of the UAR
198	 */
199	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
200	       (dbi % MLX5_BFREGS_PER_UAR) *
201	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
202}
203
204static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
205		       bool map_wc, bool fast_path)
206{
207	struct mlx5_bfreg_data *bfregs;
208	struct mlx5_uars_page *up;
209	struct list_head *head;
210	unsigned long *bitmap;
211	unsigned int *avail;
212	struct mutex *lock;  /* pointer to right mutex */
213	int dbi;
214
215	bfregs = &mdev->priv.bfregs;
216	if (map_wc) {
217		head = &bfregs->wc_head.list;
218		lock = &bfregs->wc_head.lock;
219	} else {
220		head = &bfregs->reg_head.list;
221		lock = &bfregs->reg_head.lock;
222	}
223	mutex_lock(lock);
224	if (list_empty(head)) {
225		up = alloc_uars_page(mdev, map_wc);
226		if (IS_ERR(up)) {
227			mutex_unlock(lock);
228			return PTR_ERR(up);
229		}
230		list_add(&up->list, head);
231	} else {
232		up = list_entry(head->next, struct mlx5_uars_page, list);
233		kref_get(&up->ref_count);
234	}
235	if (fast_path) {
236		bitmap = up->fp_bitmap;
237		avail = &up->fp_avail;
238	} else {
239		bitmap = up->reg_bitmap;
240		avail = &up->reg_avail;
241	}
242	dbi = find_first_bit(bitmap, up->bfregs);
243	clear_bit(dbi, bitmap);
244	(*avail)--;
245	if (!(*avail))
246		list_del(&up->list);
247
248	bfreg->map = up->map + map_offset(mdev, dbi);
249	bfreg->up = up;
250	bfreg->wc = map_wc;
251	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
252	mutex_unlock(lock);
253
254	return 0;
255}
256
257int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
258		     bool map_wc, bool fast_path)
259{
260	int err;
261
262	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
263	if (!err)
264		return 0;
265
266	if (err == -EAGAIN && map_wc)
267		return alloc_bfreg(mdev, bfreg, false, fast_path);
268
269	return err;
270}
271EXPORT_SYMBOL(mlx5_alloc_bfreg);
272
273static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
274					   struct mlx5_uars_page *up,
275					   struct mlx5_sq_bfreg *bfreg)
276{
277	unsigned int uar_idx;
278	unsigned int bfreg_idx;
279	unsigned int bf_reg_size;
280
281	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
282
283	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
284	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
285
286	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
287}
288
289void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
290{
291	struct mlx5_bfreg_data *bfregs;
292	struct mlx5_uars_page *up;
293	struct mutex *lock; /* pointer to right mutex */
294	unsigned int dbi;
295	bool fp;
296	unsigned int *avail;
297	unsigned long *bitmap;
298	struct list_head *head;
299
300	bfregs = &mdev->priv.bfregs;
301	if (bfreg->wc) {
302		head = &bfregs->wc_head.list;
303		lock = &bfregs->wc_head.lock;
304	} else {
305		head = &bfregs->reg_head.list;
306		lock = &bfregs->reg_head.lock;
307	}
308	up = bfreg->up;
309	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
310	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
311	if (fp) {
312		avail = &up->fp_avail;
313		bitmap = up->fp_bitmap;
314	} else {
315		avail = &up->reg_avail;
316		bitmap = up->reg_bitmap;
317	}
318	mutex_lock(lock);
319	(*avail)++;
320	set_bit(dbi, bitmap);
321	if (*avail == 1)
322		list_add_tail(&up->list, head);
323
324	kref_put(&up->ref_count, up_rel_func);
325	mutex_unlock(lock);
326}
327EXPORT_SYMBOL(mlx5_free_bfreg);
328