1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include "opt_rss.h"
27#include "opt_ratelimit.h"
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/mm.h>
32#include <linux/dma-mapping.h>
33#include <linux/vmalloc.h>
34#include <dev/mlx5/driver.h>
35#include <dev/mlx5/mlx5_core/mlx5_core.h>
36
37/* Handling for queue buffers -- we allocate a bunch of memory and
38 * register it in a memory region at HCA virtual address 0.  If the
39 * requested size is > max_direct, we split the allocation into
40 * multiple pages, so we don't require too much contiguous memory.
41 */
42
43static void
44mlx5_buf_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
45{
46	struct mlx5_buf *buf;
47	uint8_t owned;
48	int x;
49
50	buf = (struct mlx5_buf *)arg;
51	owned = MLX5_DMA_OWNED(buf->dev);
52
53	if (!owned)
54		MLX5_DMA_LOCK(buf->dev);
55
56	if (error == 0) {
57		for (x = 0; x != nseg; x++) {
58			buf->page_list[x] = segs[x].ds_addr;
59			KASSERT(segs[x].ds_len == PAGE_SIZE, ("Invalid segment size"));
60		}
61		buf->load_done = MLX5_LOAD_ST_SUCCESS;
62	} else {
63		buf->load_done = MLX5_LOAD_ST_FAILURE;
64	}
65	MLX5_DMA_DONE(buf->dev);
66
67	if (!owned)
68		MLX5_DMA_UNLOCK(buf->dev);
69}
70
71int
72mlx5_buf_alloc(struct mlx5_core_dev *dev, int size,
73    int max_direct, struct mlx5_buf *buf)
74{
75	int err;
76
77	buf->npages = howmany(size, PAGE_SIZE);
78	buf->page_shift = PAGE_SHIFT;
79	buf->load_done = MLX5_LOAD_ST_NONE;
80	buf->dev = dev;
81	buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list),
82	    GFP_KERNEL);
83
84	err = -bus_dma_tag_create(
85	    bus_get_dma_tag(dev->pdev->dev.bsddev),
86	    PAGE_SIZE,		/* alignment */
87	    0,			/* no boundary */
88	    BUS_SPACE_MAXADDR,	/* lowaddr */
89	    BUS_SPACE_MAXADDR,	/* highaddr */
90	    NULL, NULL,		/* filter, filterarg */
91	    PAGE_SIZE * buf->npages,	/* maxsize */
92	    buf->npages,	/* nsegments */
93	    PAGE_SIZE,		/* maxsegsize */
94	    0,			/* flags */
95	    NULL, NULL,		/* lockfunc, lockfuncarg */
96	    &buf->dma_tag);
97
98	if (err != 0)
99		goto err_dma_tag;
100
101	/* allocate memory */
102	err = -bus_dmamem_alloc(buf->dma_tag, &buf->direct.buf,
103	    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &buf->dma_map);
104	if (err != 0)
105		goto err_dma_alloc;
106
107	/* load memory into DMA */
108	MLX5_DMA_LOCK(dev);
109	err = bus_dmamap_load(
110	    buf->dma_tag, buf->dma_map, buf->direct.buf,
111	    PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb,
112	    buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
113
114	while (buf->load_done == MLX5_LOAD_ST_NONE)
115		MLX5_DMA_WAIT(dev);
116	MLX5_DMA_UNLOCK(dev);
117
118	/* check for error */
119	if (buf->load_done != MLX5_LOAD_ST_SUCCESS) {
120		err = -ENOMEM;
121		goto err_dma_load;
122	}
123
124	/* clean memory */
125	memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages);
126
127	/* flush memory to RAM */
128	bus_dmamap_sync(buf->dev->cmd.dma_tag, buf->dma_map, BUS_DMASYNC_PREWRITE);
129	return (0);
130
131err_dma_load:
132	bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
133err_dma_alloc:
134	bus_dma_tag_destroy(buf->dma_tag);
135err_dma_tag:
136	kfree(buf->page_list);
137	return (err);
138}
139
140void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
141{
142
143	bus_dmamap_unload(buf->dma_tag, buf->dma_map);
144	bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
145	bus_dma_tag_destroy(buf->dma_tag);
146	kfree(buf->page_list);
147}
148EXPORT_SYMBOL_GPL(mlx5_buf_free);
149
150static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev)
151{
152	struct mlx5_db_pgdir *pgdir;
153
154	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
155
156	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
157
158	pgdir->fw_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
159	if (pgdir->fw_page != NULL) {
160		pgdir->db_page = pgdir->fw_page->virt_addr;
161		pgdir->db_dma = pgdir->fw_page->dma_addr;
162
163		/* clean allocated memory */
164		memset(pgdir->db_page, 0, MLX5_ADAPTER_PAGE_SIZE);
165
166		/* flush memory to RAM */
167		mlx5_fwp_flush(pgdir->fw_page);
168	}
169	if (!pgdir->db_page) {
170		kfree(pgdir);
171		return NULL;
172	}
173
174	return pgdir;
175}
176
177static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
178				    struct mlx5_db *db)
179{
180	int offset;
181	int i;
182
183	i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
184	if (i >= MLX5_DB_PER_PAGE)
185		return -ENOMEM;
186
187	__clear_bit(i, pgdir->bitmap);
188
189	db->u.pgdir = pgdir;
190	db->index   = i;
191	offset = db->index * L1_CACHE_BYTES;
192	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
193	db->dma     = pgdir->db_dma  + offset;
194
195	db->db[0] = 0;
196	db->db[1] = 0;
197
198	return 0;
199}
200
201int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
202{
203	struct mlx5_db_pgdir *pgdir;
204	int ret = 0;
205
206	mutex_lock(&dev->priv.pgdir_mutex);
207
208	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
209		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
210			goto out;
211
212	pgdir = mlx5_alloc_db_pgdir(dev);
213	if (!pgdir) {
214		ret = -ENOMEM;
215		goto out;
216	}
217
218	list_add(&pgdir->list, &dev->priv.pgdir_list);
219
220	/* This should never fail -- we just allocated an empty page: */
221	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
222
223out:
224	mutex_unlock(&dev->priv.pgdir_mutex);
225
226	return ret;
227}
228EXPORT_SYMBOL_GPL(mlx5_db_alloc);
229
230void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
231{
232	mutex_lock(&dev->priv.pgdir_mutex);
233
234	__set_bit(db->index, db->u.pgdir->bitmap);
235
236	if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
237		mlx5_fwp_free(db->u.pgdir->fw_page);
238		list_del(&db->u.pgdir->list);
239		kfree(db->u.pgdir);
240	}
241
242	mutex_unlock(&dev->priv.pgdir_mutex);
243}
244EXPORT_SYMBOL_GPL(mlx5_db_free);
245
246void
247mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
248{
249	int i;
250
251	for (i = 0; i != buf->npages; i++)
252		pas[i] = cpu_to_be64(buf->page_list[i]);
253}
254EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
255