1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/export.h>
37#include <linux/bitmap.h>
38#include <linux/dma-mapping.h>
39#include <linux/vmalloc.h>
40#include <linux/mlx5/driver.h>
41
42#include "mlx5_core.h"
43
44struct mlx5_db_pgdir {
45	struct list_head	list;
46	unsigned long	       *bitmap;
47	__be32		       *db_page;
48	dma_addr_t		db_dma;
49};
50
51/* Handling for queue buffers -- we allocate a bunch of memory and
52 * register it in a memory region at HCA virtual address 0.
53 */
54
55static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
56					   size_t size, dma_addr_t *dma_handle,
57					   int node)
58{
59	struct device *device = mlx5_core_dma_dev(dev);
60	struct mlx5_priv *priv = &dev->priv;
61	int original_node;
62	void *cpu_handle;
63
64	mutex_lock(&priv->alloc_mutex);
65	original_node = dev_to_node(device);
66	set_dev_node(device, node);
67	cpu_handle = dma_alloc_coherent(device, size, dma_handle,
68					GFP_KERNEL);
69	set_dev_node(device, original_node);
70	mutex_unlock(&priv->alloc_mutex);
71	return cpu_handle;
72}
73
74int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
75			     struct mlx5_frag_buf *buf, int node)
76{
77	int i;
78
79	buf->size = size;
80	buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
81	buf->page_shift = PAGE_SHIFT;
82	buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
83			     GFP_KERNEL);
84	if (!buf->frags)
85		goto err_out;
86
87	for (i = 0; i < buf->npages; i++) {
88		struct mlx5_buf_list *frag = &buf->frags[i];
89		int frag_sz = min_t(int, size, PAGE_SIZE);
90
91		frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
92							  &frag->map, node);
93		if (!frag->buf)
94			goto err_free_buf;
95		if (frag->map & ((1 << buf->page_shift) - 1)) {
96			dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
97					  buf->frags[i].buf, buf->frags[i].map);
98			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
99				       &frag->map, buf->page_shift);
100			goto err_free_buf;
101		}
102		size -= frag_sz;
103	}
104
105	return 0;
106
107err_free_buf:
108	while (i--)
109		dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
110				  buf->frags[i].map);
111	kfree(buf->frags);
112err_out:
113	return -ENOMEM;
114}
115EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
116
117void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
118{
119	int size = buf->size;
120	int i;
121
122	for (i = 0; i < buf->npages; i++) {
123		int frag_sz = min_t(int, size, PAGE_SIZE);
124
125		dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
126				  buf->frags[i].map);
127		size -= frag_sz;
128	}
129	kfree(buf->frags);
130}
131EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
132
133static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
134						 int node)
135{
136	u32 db_per_page = PAGE_SIZE / cache_line_size();
137	struct mlx5_db_pgdir *pgdir;
138
139	pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
140	if (!pgdir)
141		return NULL;
142
143	pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
144	if (!pgdir->bitmap) {
145		kfree(pgdir);
146		return NULL;
147	}
148
149	bitmap_fill(pgdir->bitmap, db_per_page);
150
151	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
152						       &pgdir->db_dma, node);
153	if (!pgdir->db_page) {
154		bitmap_free(pgdir->bitmap);
155		kfree(pgdir);
156		return NULL;
157	}
158
159	return pgdir;
160}
161
162static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
163				    struct mlx5_db *db)
164{
165	u32 db_per_page = PAGE_SIZE / cache_line_size();
166	int offset;
167	int i;
168
169	i = find_first_bit(pgdir->bitmap, db_per_page);
170	if (i >= db_per_page)
171		return -ENOMEM;
172
173	__clear_bit(i, pgdir->bitmap);
174
175	db->u.pgdir = pgdir;
176	db->index   = i;
177	offset = db->index * cache_line_size();
178	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
179	db->dma     = pgdir->db_dma  + offset;
180
181	db->db[0] = 0;
182	db->db[1] = 0;
183
184	return 0;
185}
186
187int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
188{
189	struct mlx5_db_pgdir *pgdir;
190	int ret = 0;
191
192	mutex_lock(&dev->priv.pgdir_mutex);
193
194	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
195		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
196			goto out;
197
198	pgdir = mlx5_alloc_db_pgdir(dev, node);
199	if (!pgdir) {
200		ret = -ENOMEM;
201		goto out;
202	}
203
204	list_add(&pgdir->list, &dev->priv.pgdir_list);
205
206	/* This should never fail -- we just allocated an empty page: */
207	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
208
209out:
210	mutex_unlock(&dev->priv.pgdir_mutex);
211
212	return ret;
213}
214EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
215
216void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
217{
218	u32 db_per_page = PAGE_SIZE / cache_line_size();
219
220	mutex_lock(&dev->priv.pgdir_mutex);
221
222	__set_bit(db->index, db->u.pgdir->bitmap);
223
224	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
225		dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
226				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
227		list_del(&db->u.pgdir->list);
228		bitmap_free(db->u.pgdir->bitmap);
229		kfree(db->u.pgdir);
230	}
231
232	mutex_unlock(&dev->priv.pgdir_mutex);
233}
234EXPORT_SYMBOL_GPL(mlx5_db_free);
235
236void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
237{
238	int i;
239
240	WARN_ON(perm & 0xfc);
241	for (i = 0; i < buf->npages; i++)
242		pas[i] = cpu_to_be64(buf->frags[i].map | perm);
243}
244EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
245
246void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
247{
248	mlx5_fill_page_frag_array_perm(buf, pas, 0);
249}
250EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
251