1/* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/errno.h> 35#include <linux/mm.h> 36#include <linux/scatterlist.h> 37#include <linux/slab.h> 38 39#include <linux/mlx4/cmd.h> 40 41#include "mlx4.h" 42#include "icm.h" 43#include "fw.h" 44 45/* 46 * We allocate in as big chunks as we can, up to a maximum of 256 KB 47 * per chunk. 48 */ 49enum { 50 MLX4_ICM_ALLOC_SIZE = 1 << 18, 51 MLX4_TABLE_CHUNK_SIZE = 1 << 18 52}; 53 54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 55{ 56 int i; 57 58 if (chunk->nsg > 0) 59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 60 PCI_DMA_BIDIRECTIONAL); 61 62 for (i = 0; i < chunk->npages; ++i) 63 __free_pages(sg_page(&chunk->mem[i]), 64 get_order(chunk->mem[i].length)); 65} 66 67static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 68{ 69 int i; 70 71 for (i = 0; i < chunk->npages; ++i) 72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 73 lowmem_page_address(sg_page(&chunk->mem[i])), 74 sg_dma_address(&chunk->mem[i])); 75} 76 77void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 78{ 79 struct mlx4_icm_chunk *chunk, *tmp; 80 81 if (!icm) 82 return; 83 84 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 85 if (coherent) 86 mlx4_free_icm_coherent(dev, chunk); 87 else 88 mlx4_free_icm_pages(dev, chunk); 89 90 kfree(chunk); 91 } 92 93 kfree(icm); 94} 95 96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 97{ 98 struct page *page; 99 100 page = alloc_pages(gfp_mask, order); 101 if (!page) 102 return -ENOMEM; 103 104 sg_set_page(mem, page, PAGE_SIZE << order, 0); 105 return 0; 106} 107 108static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 109 int order, gfp_t gfp_mask) 110{ 111 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 112 &sg_dma_address(mem), gfp_mask); 113 if (!buf) 114 return -ENOMEM; 115 116 sg_set_buf(mem, buf, PAGE_SIZE << order); 117 BUG_ON(mem->offset); 118 sg_dma_len(mem) = PAGE_SIZE << order; 119 return 0; 120} 121 122struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, 123 gfp_t gfp_mask, int coherent) 124{ 125 struct mlx4_icm *icm; 126 struct mlx4_icm_chunk *chunk = NULL; 127 int cur_order; 128 int ret; 129 130 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 132 133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 134 if (!icm) 135 return NULL; 136 137 icm->refcount = 0; 138 INIT_LIST_HEAD(&icm->chunk_list); 139 140 cur_order = get_order(MLX4_ICM_ALLOC_SIZE); 141 142 while (npages > 0) { 143 if (!chunk) { 144 chunk = kmalloc(sizeof *chunk, 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 146 if (!chunk) 147 goto fail; 148 149 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 150 chunk->npages = 0; 151 chunk->nsg = 0; 152 list_add_tail(&chunk->list, &icm->chunk_list); 153 } 154 155 while (1 << cur_order > npages) 156 --cur_order; 157 158 if (coherent) 159 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, 160 &chunk->mem[chunk->npages], 161 cur_order, gfp_mask); 162 else 163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 164 cur_order, gfp_mask); 165 166 if (ret) { 167 if (--cur_order < 0) 168 goto fail; 169 else 170 continue; 171 } 172 173 ++chunk->npages; 174 175 if (coherent) 176 ++chunk->nsg; 177 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 178 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 179 chunk->npages, 180 PCI_DMA_BIDIRECTIONAL); 181 182 if (chunk->nsg <= 0) 183 goto fail; 184 } 185 186 if (chunk->npages == MLX4_ICM_CHUNK_LEN) 187 chunk = NULL; 188 189 npages -= 1 << cur_order; 190 } 191 192 if (!coherent && chunk) { 193 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 194 chunk->npages, 195 PCI_DMA_BIDIRECTIONAL); 196 197 if (chunk->nsg <= 0) 198 goto fail; 199 } 200 201 return icm; 202 203fail: 204 mlx4_free_icm(dev, icm, coherent); 205 return NULL; 206} 207 208static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) 209{ 210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 211} 212 213int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 214{ 215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 216 MLX4_CMD_TIME_CLASS_B); 217} 218 219int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt) 220{ 221 struct mlx4_cmd_mailbox *mailbox; 222 __be64 *inbox; 223 int err; 224 225 mailbox = mlx4_alloc_cmd_mailbox(dev); 226 if (IS_ERR(mailbox)) 227 return PTR_ERR(mailbox); 228 inbox = mailbox->buf; 229 230 inbox[0] = cpu_to_be64(virt); 231 inbox[1] = cpu_to_be64(dma_addr); 232 233 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM, 234 MLX4_CMD_TIME_CLASS_B); 235 236 mlx4_free_cmd_mailbox(dev, mailbox); 237 238 if (!err) 239 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", 240 (unsigned long long) dma_addr, (unsigned long long) virt); 241 242 return err; 243} 244 245int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 246{ 247 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 248} 249 250int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) 251{ 252 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B); 253} 254 255int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) 256{ 257 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 258 int ret = 0; 259 260 mutex_lock(&table->mutex); 261 262 if (table->icm[i]) { 263 ++table->icm[i]->refcount; 264 goto out; 265 } 266 267 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 268 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 269 __GFP_NOWARN, table->coherent); 270 if (!table->icm[i]) { 271 ret = -ENOMEM; 272 goto out; 273 } 274 275 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + 276 (u64) i * MLX4_TABLE_CHUNK_SIZE)) { 277 mlx4_free_icm(dev, table->icm[i], table->coherent); 278 table->icm[i] = NULL; 279 ret = -ENOMEM; 280 goto out; 281 } 282 283 ++table->icm[i]->refcount; 284 285out: 286 mutex_unlock(&table->mutex); 287 return ret; 288} 289 290void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) 291{ 292 int i; 293 294 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 295 296 mutex_lock(&table->mutex); 297 298 if (--table->icm[i]->refcount == 0) { 299 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 300 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 301 mlx4_free_icm(dev, table->icm[i], table->coherent); 302 table->icm[i] = NULL; 303 } 304 305 mutex_unlock(&table->mutex); 306} 307 308void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle) 309{ 310 int idx, offset, dma_offset, i; 311 struct mlx4_icm_chunk *chunk; 312 struct mlx4_icm *icm; 313 struct page *page = NULL; 314 315 if (!table->lowmem) 316 return NULL; 317 318 mutex_lock(&table->mutex); 319 320 idx = (obj & (table->num_obj - 1)) * table->obj_size; 321 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; 322 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; 323 324 if (!icm) 325 goto out; 326 327 list_for_each_entry(chunk, &icm->chunk_list, list) { 328 for (i = 0; i < chunk->npages; ++i) { 329 if (dma_handle && dma_offset >= 0) { 330 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 331 *dma_handle = sg_dma_address(&chunk->mem[i]) + 332 dma_offset; 333 dma_offset -= sg_dma_len(&chunk->mem[i]); 334 } 335 /* 336 * DMA mapping can merge pages but not split them, 337 * so if we found the page, dma_handle has already 338 * been assigned to. 339 */ 340 if (chunk->mem[i].length > offset) { 341 page = sg_page(&chunk->mem[i]); 342 goto out; 343 } 344 offset -= chunk->mem[i].length; 345 } 346 } 347 348out: 349 mutex_unlock(&table->mutex); 350 return page ? lowmem_page_address(page) + offset : NULL; 351} 352 353int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 354 int start, int end) 355{ 356 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; 357 int i, err; 358 359 for (i = start; i <= end; i += inc) { 360 err = mlx4_table_get(dev, table, i); 361 if (err) 362 goto fail; 363 } 364 365 return 0; 366 367fail: 368 while (i > start) { 369 i -= inc; 370 mlx4_table_put(dev, table, i); 371 } 372 373 return err; 374} 375 376void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 377 int start, int end) 378{ 379 int i; 380 381 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) 382 mlx4_table_put(dev, table, i); 383} 384 385int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, 386 u64 virt, int obj_size, int nobj, int reserved, 387 int use_lowmem, int use_coherent) 388{ 389 int obj_per_chunk; 390 int num_icm; 391 unsigned chunk_size; 392 int i; 393 394 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 395 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; 396 397 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); 398 if (!table->icm) 399 return -ENOMEM; 400 table->virt = virt; 401 table->num_icm = num_icm; 402 table->num_obj = nobj; 403 table->obj_size = obj_size; 404 table->lowmem = use_lowmem; 405 table->coherent = use_coherent; 406 mutex_init(&table->mutex); 407 408 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 409 chunk_size = MLX4_TABLE_CHUNK_SIZE; 410 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) 411 chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); 412 413 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 414 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 415 __GFP_NOWARN, use_coherent); 416 if (!table->icm[i]) 417 goto err; 418 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { 419 mlx4_free_icm(dev, table->icm[i], use_coherent); 420 table->icm[i] = NULL; 421 goto err; 422 } 423 424 /* 425 * Add a reference to this ICM chunk so that it never 426 * gets freed (since it contains reserved firmware objects). 427 */ 428 ++table->icm[i]->refcount; 429 } 430 431 return 0; 432 433err: 434 for (i = 0; i < num_icm; ++i) 435 if (table->icm[i]) { 436 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, 437 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 438 mlx4_free_icm(dev, table->icm[i], use_coherent); 439 } 440 441 return -ENOMEM; 442} 443 444void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) 445{ 446 int i; 447 448 for (i = 0; i < table->num_icm; ++i) 449 if (table->icm[i]) { 450 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 451 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 452 mlx4_free_icm(dev, table->icm[i], table->coherent); 453 } 454 455 kfree(table->icm); 456} 457