1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/errno.h> 35#include <linux/slab.h> 36#include <linux/mm.h> 37#include <linux/bitmap.h> 38#include <linux/dma-mapping.h> 39#include <linux/vmalloc.h> 40 41#include "mlx4.h" 42 43u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) 44{ 45 u32 obj; 46 47 spin_lock(&bitmap->lock); 48 49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 50 if (obj >= bitmap->max) { 51 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 52 & bitmap->mask; 53 obj = find_first_zero_bit(bitmap->table, bitmap->max); 54 } 55 56 if (obj < bitmap->max) { 57 set_bit(obj, bitmap->table); 58 bitmap->last = (obj + 1); 59 if (bitmap->last == bitmap->max) 60 bitmap->last = 0; 61 obj |= bitmap->top; 62 } else 63 obj = -1; 64 65 spin_unlock(&bitmap->lock); 66 67 return obj; 68} 69 70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) 71{ 72 mlx4_bitmap_free_range(bitmap, obj, 1); 73} 74 75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 76{ 77 u32 obj, i; 78 79 if (likely(cnt == 1 && align == 1)) 80 return mlx4_bitmap_alloc(bitmap); 81 82 spin_lock(&bitmap->lock); 83 84 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 85 bitmap->last, cnt, align - 1); 86 if (obj >= bitmap->max) { 87 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 88 & bitmap->mask; 89 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 90 0, cnt, align - 1); 91 } 92 93 if (obj < bitmap->max) { 94 for (i = 0; i < cnt; i++) 95 set_bit(obj + i, bitmap->table); 96 if (obj == bitmap->last) { 97 bitmap->last = (obj + cnt); 98 if (bitmap->last >= bitmap->max) 99 bitmap->last = 0; 100 } 101 obj |= bitmap->top; 102 } else 103 obj = -1; 104 105 spin_unlock(&bitmap->lock); 106 107 return obj; 108} 109 110void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 111{ 112 u32 i; 113 114 obj &= bitmap->max + bitmap->reserved_top - 1; 115 116 spin_lock(&bitmap->lock); 117 for (i = 0; i < cnt; i++) 118 clear_bit(obj + i, bitmap->table); 119 bitmap->last = min(bitmap->last, obj); 120 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 121 & bitmap->mask; 122 spin_unlock(&bitmap->lock); 123} 124 125int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 126 u32 reserved_bot, u32 reserved_top) 127{ 128 int i; 129 130 /* num must be a power of 2 */ 131 if (num != roundup_pow_of_two(num)) 132 return -EINVAL; 133 134 bitmap->last = 0; 135 bitmap->top = 0; 136 bitmap->max = num - reserved_top; 137 bitmap->mask = mask; 138 bitmap->reserved_top = reserved_top; 139 spin_lock_init(&bitmap->lock); 140 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 141 sizeof (long), GFP_KERNEL); 142 if (!bitmap->table) 143 return -ENOMEM; 144 145 for (i = 0; i < reserved_bot; ++i) 146 set_bit(i, bitmap->table); 147 148 return 0; 149} 150 151void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) 152{ 153 kfree(bitmap->table); 154} 155 156/* 157 * Handling for queue buffers -- we allocate a bunch of memory and 158 * register it in a memory region at HCA virtual address 0. If the 159 * requested size is > max_direct, we split the allocation into 160 * multiple pages, so we don't require too much contiguous memory. 161 */ 162 163int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 164 struct mlx4_buf *buf) 165{ 166 dma_addr_t t; 167 168 if (size <= max_direct) { 169 buf->nbufs = 1; 170 buf->npages = 1; 171 buf->page_shift = get_order(size) + PAGE_SHIFT; 172 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, 173 size, &t, GFP_KERNEL); 174 if (!buf->direct.buf) 175 return -ENOMEM; 176 177 buf->direct.map = t; 178 179 while (t & ((1 << buf->page_shift) - 1)) { 180 --buf->page_shift; 181 buf->npages *= 2; 182 } 183 184 memset(buf->direct.buf, 0, size); 185 } else { 186 int i; 187 188 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 189 buf->npages = buf->nbufs; 190 buf->page_shift = PAGE_SHIFT; 191 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, 192 GFP_KERNEL); 193 if (!buf->page_list) 194 return -ENOMEM; 195 196 for (i = 0; i < buf->nbufs; ++i) { 197 buf->page_list[i].buf = 198 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 199 &t, GFP_KERNEL); 200 if (!buf->page_list[i].buf) 201 goto err_free; 202 203 buf->page_list[i].map = t; 204 205 memset(buf->page_list[i].buf, 0, PAGE_SIZE); 206 } 207 208 if (BITS_PER_LONG == 64) { 209 struct page **pages; 210 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); 211 if (!pages) 212 goto err_free; 213 for (i = 0; i < buf->nbufs; ++i) 214 pages[i] = virt_to_page(buf->page_list[i].buf); 215 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); 216 kfree(pages); 217 if (!buf->direct.buf) 218 goto err_free; 219 } 220 } 221 222 return 0; 223 224err_free: 225 mlx4_buf_free(dev, size, buf); 226 227 return -ENOMEM; 228} 229EXPORT_SYMBOL_GPL(mlx4_buf_alloc); 230 231void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) 232{ 233 int i; 234 235 if (buf->nbufs == 1) 236 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 237 buf->direct.map); 238 else { 239 if (BITS_PER_LONG == 64) 240 vunmap(buf->direct.buf); 241 242 for (i = 0; i < buf->nbufs; ++i) 243 if (buf->page_list[i].buf) 244 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 245 buf->page_list[i].buf, 246 buf->page_list[i].map); 247 kfree(buf->page_list); 248 } 249} 250EXPORT_SYMBOL_GPL(mlx4_buf_free); 251 252static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) 253{ 254 struct mlx4_db_pgdir *pgdir; 255 256 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); 257 if (!pgdir) 258 return NULL; 259 260 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); 261 pgdir->bits[0] = pgdir->order0; 262 pgdir->bits[1] = pgdir->order1; 263 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 264 &pgdir->db_dma, GFP_KERNEL); 265 if (!pgdir->db_page) { 266 kfree(pgdir); 267 return NULL; 268 } 269 270 return pgdir; 271} 272 273static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, 274 struct mlx4_db *db, int order) 275{ 276 int o; 277 int i; 278 279 for (o = order; o <= 1; ++o) { 280 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); 281 if (i < MLX4_DB_PER_PAGE >> o) 282 goto found; 283 } 284 285 return -ENOMEM; 286 287found: 288 clear_bit(i, pgdir->bits[o]); 289 290 i <<= o; 291 292 if (o > order) 293 set_bit(i ^ 1, pgdir->bits[order]); 294 295 db->u.pgdir = pgdir; 296 db->index = i; 297 db->db = pgdir->db_page + db->index; 298 db->dma = pgdir->db_dma + db->index * 4; 299 db->order = order; 300 301 return 0; 302} 303 304int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) 305{ 306 struct mlx4_priv *priv = mlx4_priv(dev); 307 struct mlx4_db_pgdir *pgdir; 308 int ret = 0; 309 310 mutex_lock(&priv->pgdir_mutex); 311 312 list_for_each_entry(pgdir, &priv->pgdir_list, list) 313 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 314 goto out; 315 316 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); 317 if (!pgdir) { 318 ret = -ENOMEM; 319 goto out; 320 } 321 322 list_add(&pgdir->list, &priv->pgdir_list); 323 324 /* This should never fail -- we just allocated an empty page: */ 325 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); 326 327out: 328 mutex_unlock(&priv->pgdir_mutex); 329 330 return ret; 331} 332EXPORT_SYMBOL_GPL(mlx4_db_alloc); 333 334void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) 335{ 336 struct mlx4_priv *priv = mlx4_priv(dev); 337 int o; 338 int i; 339 340 mutex_lock(&priv->pgdir_mutex); 341 342 o = db->order; 343 i = db->index; 344 345 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 346 clear_bit(i ^ 1, db->u.pgdir->order0); 347 ++o; 348 } 349 i >>= o; 350 set_bit(i, db->u.pgdir->bits[o]); 351 352 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 353 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 354 db->u.pgdir->db_page, db->u.pgdir->db_dma); 355 list_del(&db->u.pgdir->list); 356 kfree(db->u.pgdir); 357 } 358 359 mutex_unlock(&priv->pgdir_mutex); 360} 361EXPORT_SYMBOL_GPL(mlx4_db_free); 362 363int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 364 int size, int max_direct) 365{ 366 int err; 367 368 err = mlx4_db_alloc(dev, &wqres->db, 1); 369 if (err) 370 return err; 371 372 *wqres->db.db = 0; 373 374 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); 375 if (err) 376 goto err_db; 377 378 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, 379 &wqres->mtt); 380 if (err) 381 goto err_buf; 382 383 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); 384 if (err) 385 goto err_mtt; 386 387 return 0; 388 389err_mtt: 390 mlx4_mtt_cleanup(dev, &wqres->mtt); 391err_buf: 392 mlx4_buf_free(dev, size, &wqres->buf); 393err_db: 394 mlx4_db_free(dev, &wqres->db); 395 396 return err; 397} 398EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); 399 400void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 401 int size) 402{ 403 mlx4_mtt_cleanup(dev, &wqres->mtt); 404 mlx4_buf_free(dev, size, &wqres->buf); 405 mlx4_db_free(dev, &wqres->db); 406} 407EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); 408