1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/slab.h>
36#include <linux/mm.h>
37//#include <linux/export.h>  /* XXX SK probabaly not needed in freeBSD XXX */
38#include <linux/bitmap.h>
39#include <linux/dma-mapping.h>
40#include <linux/vmalloc.h>
41
42#include "mlx4.h"
43
44u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
45{
46	u32 obj;
47
48	spin_lock(&bitmap->lock);
49
50	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51	if (obj >= bitmap->max) {
52		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53				& bitmap->mask;
54		obj = find_first_zero_bit(bitmap->table, bitmap->max);
55	}
56
57	if (obj < bitmap->max) {
58		set_bit(obj, bitmap->table);
59		bitmap->last = (obj + 1);
60		if (bitmap->last == bitmap->max)
61			bitmap->last = 0;
62		obj |= bitmap->top;
63	} else
64		obj = -1;
65
66	if (obj != -1)
67		--bitmap->avail;
68
69	spin_unlock(&bitmap->lock);
70
71	return obj;
72}
73
74void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
75{
76	mlx4_bitmap_free_range(bitmap, obj, 1);
77}
78
79static unsigned long find_aligned_range(unsigned long *bitmap,
80					u32 start, u32 nbits,
81					int len, int align, u32 skip_mask)
82{
83	unsigned long end, i;
84
85again:
86	start = ALIGN(start, align);
87
88	while ((start < nbits) && (test_bit(start, bitmap) ||
89				   (start & skip_mask)))
90		start += align;
91
92	if (start >= nbits)
93		return -1;
94
95	end = start+len;
96	if (end > nbits)
97		return -1;
98
99	for (i = start + 1; i < end; i++) {
100		if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
101			start = i + 1;
102			goto again;
103		}
104	}
105
106	return start;
107}
108
109u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
110			    int align, u32 skip_mask)
111{
112	u32 obj;
113
114	if (likely(cnt == 1 && align == 1 && !skip_mask))
115		return mlx4_bitmap_alloc(bitmap);
116
117	spin_lock(&bitmap->lock);
118
119	obj = find_aligned_range(bitmap->table, bitmap->last,
120				bitmap->max, cnt, align, skip_mask);
121	if (obj >= bitmap->max) {
122		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
123				& bitmap->mask;
124		obj = find_aligned_range(bitmap->table, 0, bitmap->max,
125						cnt, align, skip_mask);
126	}
127
128	if (obj < bitmap->max) {
129		bitmap_set(bitmap->table, obj, cnt);
130		if (obj == bitmap->last) {
131			bitmap->last = (obj + cnt);
132			if (bitmap->last >= bitmap->max)
133				bitmap->last = 0;
134		}
135		obj |= bitmap->top;
136	} else
137		obj = -1;
138
139	if (obj != -1)
140		bitmap->avail -= cnt;
141
142	spin_unlock(&bitmap->lock);
143
144	return obj;
145}
146
147u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
148{
149	return bitmap->avail;
150}
151
152void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
153{
154	obj &= bitmap->max + bitmap->reserved_top - 1;
155
156	spin_lock(&bitmap->lock);
157	bitmap_clear(bitmap->table, obj, cnt);
158	bitmap->avail += cnt;
159	spin_unlock(&bitmap->lock);
160}
161
162int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
163		     u32 reserved_bot, u32 reserved_top)
164{
165	/* sanity check */
166	if (num <= (u64)reserved_top + reserved_bot)
167		return -EINVAL;
168
169	/* num must be a power of 2 */
170	if (num != roundup_pow_of_two(num))
171		return -EINVAL;
172
173	if (reserved_bot + reserved_top >= num)
174		return -EINVAL;
175
176	bitmap->last = 0;
177	bitmap->top  = 0;
178	bitmap->max  = num - reserved_top;
179	bitmap->mask = mask;
180	bitmap->reserved_top = reserved_top;
181	bitmap->avail = num - reserved_top - reserved_bot;
182	spin_lock_init(&bitmap->lock);
183	bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
184				sizeof (long), GFP_KERNEL);
185	if (!bitmap->table)
186		return -ENOMEM;
187
188	bitmap_set(bitmap->table, 0, reserved_bot);
189
190	return 0;
191}
192
193void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
194{
195	kfree(bitmap->table);
196}
197
198/*
199 * Handling for queue buffers -- we allocate a bunch of memory and
200 * register it in a memory region at HCA virtual address 0.  If the
201 * requested size is > max_direct, we split the allocation into
202 * multiple pages, so we don't require too much contiguous memory.
203 */
204
205int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
206		   struct mlx4_buf *buf)
207{
208	dma_addr_t t;
209
210	if (size <= max_direct) {
211		buf->nbufs        = 1;
212		buf->npages       = 1;
213		buf->page_shift   = get_order(size) + PAGE_SHIFT;
214		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
215						       size, &t, GFP_KERNEL);
216		if (!buf->direct.buf)
217			return -ENOMEM;
218
219		buf->direct.map = t;
220
221		while (t & ((1 << buf->page_shift) - 1)) {
222			--buf->page_shift;
223			buf->npages *= 2;
224		}
225
226		memset(buf->direct.buf, 0, size);
227	} else {
228		int i;
229
230		buf->direct.buf  = NULL;
231		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
232		buf->npages      = buf->nbufs;
233		buf->page_shift  = PAGE_SHIFT;
234		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
235					   GFP_KERNEL);
236		if (!buf->page_list)
237			return -ENOMEM;
238
239		for (i = 0; i < buf->nbufs; ++i) {
240			buf->page_list[i].buf =
241				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
242						   &t, GFP_KERNEL);
243			if (!buf->page_list[i].buf)
244				goto err_free;
245
246			buf->page_list[i].map = t;
247
248			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
249		}
250
251		if (BITS_PER_LONG == 64) {
252			struct page **pages;
253			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
254			if (!pages)
255				goto err_free;
256			for (i = 0; i < buf->nbufs; ++i)
257				pages[i] = virt_to_page(buf->page_list[i].buf);
258			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
259			kfree(pages);
260			if (!buf->direct.buf)
261				goto err_free;
262		}
263	}
264
265	return 0;
266
267err_free:
268	mlx4_buf_free(dev, size, buf);
269
270	return -ENOMEM;
271}
272EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
273
274void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
275{
276	int i;
277
278	if (buf->nbufs == 1)
279		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
280				  buf->direct.map);
281	else {
282		if (BITS_PER_LONG == 64 && buf->direct.buf)
283			vunmap(buf->direct.buf);
284
285		for (i = 0; i < buf->nbufs; ++i)
286			if (buf->page_list[i].buf)
287				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
288						  buf->page_list[i].buf,
289						  buf->page_list[i].map);
290		kfree(buf->page_list);
291	}
292}
293EXPORT_SYMBOL_GPL(mlx4_buf_free);
294
295static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
296{
297	struct mlx4_db_pgdir *pgdir;
298
299	pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
300	if (!pgdir)
301		return NULL;
302
303	bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
304	pgdir->bits[0] = pgdir->order0;
305	pgdir->bits[1] = pgdir->order1;
306	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
307					    &pgdir->db_dma, GFP_KERNEL);
308	if (!pgdir->db_page) {
309		kfree(pgdir);
310		return NULL;
311	}
312
313	return pgdir;
314}
315
316static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
317				    struct mlx4_db *db, int order)
318{
319	int o;
320	int i;
321
322	for (o = order; o <= 1; ++o) {
323		i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
324		if (i < MLX4_DB_PER_PAGE >> o)
325			goto found;
326	}
327
328	return -ENOMEM;
329
330found:
331	clear_bit(i, pgdir->bits[o]);
332
333	i <<= o;
334
335	if (o > order)
336		set_bit(i ^ 1, pgdir->bits[order]);
337
338	db->u.pgdir = pgdir;
339	db->index   = i;
340	db->db      = pgdir->db_page + db->index;
341	db->dma     = pgdir->db_dma  + db->index * 4;
342	db->order   = order;
343
344	return 0;
345}
346
347int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
348{
349	struct mlx4_priv *priv = mlx4_priv(dev);
350	struct mlx4_db_pgdir *pgdir;
351	int ret = 0;
352
353	mutex_lock(&priv->pgdir_mutex);
354
355	list_for_each_entry(pgdir, &priv->pgdir_list, list)
356		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
357			goto out;
358
359	pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
360	if (!pgdir) {
361		ret = -ENOMEM;
362		goto out;
363	}
364
365	list_add(&pgdir->list, &priv->pgdir_list);
366
367	/* This should never fail -- we just allocated an empty page: */
368	WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
369
370out:
371	mutex_unlock(&priv->pgdir_mutex);
372
373	return ret;
374}
375EXPORT_SYMBOL_GPL(mlx4_db_alloc);
376
377void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
378{
379	struct mlx4_priv *priv = mlx4_priv(dev);
380	int o;
381	int i;
382
383	mutex_lock(&priv->pgdir_mutex);
384
385	o = db->order;
386	i = db->index;
387
388	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
389		clear_bit(i ^ 1, db->u.pgdir->order0);
390		++o;
391	}
392	i >>= o;
393	set_bit(i, db->u.pgdir->bits[o]);
394
395	if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
396		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
397				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
398		list_del(&db->u.pgdir->list);
399		kfree(db->u.pgdir);
400	}
401
402	mutex_unlock(&priv->pgdir_mutex);
403}
404EXPORT_SYMBOL_GPL(mlx4_db_free);
405
406int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
407		       int size, int max_direct)
408{
409	int err;
410
411	err = mlx4_db_alloc(dev, &wqres->db, 1);
412	if (err)
413		return err;
414
415	*wqres->db.db = 0;
416
417	err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
418	if (err)
419		goto err_db;
420
421	err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
422			    &wqres->mtt);
423	if (err)
424		goto err_buf;
425
426	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
427	if (err)
428		goto err_mtt;
429
430	return 0;
431
432err_mtt:
433	mlx4_mtt_cleanup(dev, &wqres->mtt);
434err_buf:
435	mlx4_buf_free(dev, size, &wqres->buf);
436err_db:
437	mlx4_db_free(dev, &wqres->db);
438
439	return err;
440}
441EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
442
443void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
444		       int size)
445{
446	mlx4_mtt_cleanup(dev, &wqres->mtt);
447	mlx4_buf_free(dev, size, &wqres->buf);
448	mlx4_db_free(dev, &wqres->db);
449}
450EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
451