1219820Sjeff/* 2272407Shselasky * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 3219820Sjeff * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 4219820Sjeff * 5219820Sjeff * This software is available to you under a choice of one of two 6219820Sjeff * licenses. You may choose to be licensed under the terms of the GNU 7219820Sjeff * General Public License (GPL) Version 2, available from the file 8219820Sjeff * COPYING in the main directory of this source tree, or the 9219820Sjeff * OpenIB.org BSD license below: 10219820Sjeff * 11219820Sjeff * Redistribution and use in source and binary forms, with or 12219820Sjeff * without modification, are permitted provided that the following 13219820Sjeff * conditions are met: 14219820Sjeff * 15219820Sjeff * - Redistributions of source code must retain the above 16219820Sjeff * copyright notice, this list of conditions and the following 17219820Sjeff * disclaimer. 18219820Sjeff * 19219820Sjeff * - Redistributions in binary form must reproduce the above 20219820Sjeff * copyright notice, this list of conditions and the following 21219820Sjeff * disclaimer in the documentation and/or other materials 22219820Sjeff * provided with the distribution. 23219820Sjeff * 24219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31219820Sjeff * SOFTWARE. 32219820Sjeff */ 33219820Sjeff 34219820Sjeff#include <linux/errno.h> 35219820Sjeff#include <linux/mm.h> 36219820Sjeff#include <linux/scatterlist.h> 37255932Salfred#include <linux/slab.h> 38272407Shselasky#include <linux/math64.h> 39219820Sjeff 40219820Sjeff#include <linux/mlx4/cmd.h> 41219820Sjeff 42219820Sjeff#include "mlx4.h" 43219820Sjeff#include "icm.h" 44219820Sjeff#include "fw.h" 45219820Sjeff 46219820Sjeff/* 47219820Sjeff * We allocate in as big chunks as we can, up to a maximum of 256 KB 48219820Sjeff * per chunk. 49219820Sjeff */ 50219820Sjeffenum { 51219820Sjeff MLX4_ICM_ALLOC_SIZE = 1 << 18, 52219820Sjeff MLX4_TABLE_CHUNK_SIZE = 1 << 18 53219820Sjeff}; 54219820Sjeff 55219820Sjeffstatic void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 56219820Sjeff{ 57219820Sjeff int i; 58219820Sjeff 59219820Sjeff if (chunk->nsg > 0) 60219820Sjeff pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 61219820Sjeff PCI_DMA_BIDIRECTIONAL); 62219820Sjeff 63219820Sjeff for (i = 0; i < chunk->npages; ++i) 64219820Sjeff __free_pages(sg_page(&chunk->mem[i]), 65219820Sjeff get_order(chunk->mem[i].length)); 66219820Sjeff} 67219820Sjeff 68219820Sjeffstatic void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 69219820Sjeff{ 70219820Sjeff int i; 71219820Sjeff 72219820Sjeff for (i = 0; i < chunk->npages; ++i) 73219820Sjeff dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 74219820Sjeff lowmem_page_address(sg_page(&chunk->mem[i])), 75219820Sjeff sg_dma_address(&chunk->mem[i])); 76219820Sjeff} 77219820Sjeff 78219820Sjeffvoid mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 79219820Sjeff{ 80219820Sjeff struct mlx4_icm_chunk *chunk, *tmp; 81219820Sjeff 82219820Sjeff if (!icm) 83219820Sjeff return; 84219820Sjeff 85219820Sjeff list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 86219820Sjeff if (coherent) 87219820Sjeff mlx4_free_icm_coherent(dev, chunk); 88219820Sjeff else 89219820Sjeff mlx4_free_icm_pages(dev, chunk); 90219820Sjeff 91219820Sjeff kfree(chunk); 92219820Sjeff } 93219820Sjeff 94219820Sjeff kfree(icm); 95219820Sjeff} 96219820Sjeff 97255932Salfredstatic int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, 98255932Salfred gfp_t gfp_mask, int node) 99219820Sjeff{ 100219820Sjeff struct page *page; 101219820Sjeff 102255932Salfred page = alloc_pages_node(node, gfp_mask, order); 103255932Salfred if (!page) { 104255932Salfred page = alloc_pages(gfp_mask, order); 105255932Salfred if (!page) 106255932Salfred return -ENOMEM; 107255932Salfred } 108219820Sjeff 109219820Sjeff sg_set_page(mem, page, PAGE_SIZE << order, 0); 110219820Sjeff return 0; 111219820Sjeff} 112219820Sjeff 113219820Sjeffstatic int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 114219820Sjeff int order, gfp_t gfp_mask) 115219820Sjeff{ 116219820Sjeff void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 117219820Sjeff &sg_dma_address(mem), gfp_mask); 118219820Sjeff if (!buf) 119219820Sjeff return -ENOMEM; 120219820Sjeff 121219820Sjeff sg_set_buf(mem, buf, PAGE_SIZE << order); 122219820Sjeff BUG_ON(mem->offset); 123219820Sjeff sg_dma_len(mem) = PAGE_SIZE << order; 124219820Sjeff return 0; 125219820Sjeff} 126219820Sjeff 127219820Sjeffstruct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, 128219820Sjeff gfp_t gfp_mask, int coherent) 129219820Sjeff{ 130219820Sjeff struct mlx4_icm *icm; 131219820Sjeff struct mlx4_icm_chunk *chunk = NULL; 132219820Sjeff int cur_order; 133219820Sjeff int ret; 134219820Sjeff 135219820Sjeff /* We use sg_set_buf for coherent allocs, which assumes low memory */ 136219820Sjeff BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 137219820Sjeff 138255932Salfred icm = kmalloc_node(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), 139255932Salfred dev->numa_node); 140255932Salfred if (!icm) { 141255932Salfred icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 142255932Salfred if (!icm) 143255932Salfred return NULL; 144255932Salfred } 145219820Sjeff 146219820Sjeff icm->refcount = 0; 147219820Sjeff INIT_LIST_HEAD(&icm->chunk_list); 148219820Sjeff 149219820Sjeff cur_order = get_order(MLX4_ICM_ALLOC_SIZE); 150219820Sjeff 151219820Sjeff while (npages > 0) { 152219820Sjeff if (!chunk) { 153255932Salfred chunk = kmalloc_node(sizeof *chunk, 154255932Salfred gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), 155255932Salfred dev->numa_node); 156255932Salfred if (!chunk) { 157255932Salfred chunk = kmalloc(sizeof *chunk, 158255932Salfred gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 159255932Salfred if (!chunk) 160255932Salfred goto fail; 161255932Salfred } 162219820Sjeff 163219820Sjeff sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 164219820Sjeff chunk->npages = 0; 165219820Sjeff chunk->nsg = 0; 166219820Sjeff list_add_tail(&chunk->list, &icm->chunk_list); 167219820Sjeff } 168219820Sjeff 169219820Sjeff while (1 << cur_order > npages) 170219820Sjeff --cur_order; 171219820Sjeff 172219820Sjeff if (coherent) 173219820Sjeff ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, 174219820Sjeff &chunk->mem[chunk->npages], 175219820Sjeff cur_order, gfp_mask); 176219820Sjeff else 177219820Sjeff ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 178255932Salfred cur_order, gfp_mask, 179255932Salfred dev->numa_node); 180219820Sjeff 181255932Salfred if (ret) { 182255932Salfred if (--cur_order < 0) 183255932Salfred goto fail; 184255932Salfred else 185255932Salfred continue; 186255932Salfred } 187219820Sjeff 188255932Salfred ++chunk->npages; 189219820Sjeff 190255932Salfred if (coherent) 191255932Salfred ++chunk->nsg; 192255932Salfred else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 193255932Salfred chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 194255932Salfred chunk->npages, 195255932Salfred PCI_DMA_BIDIRECTIONAL); 196219820Sjeff 197255932Salfred if (chunk->nsg <= 0) 198219820Sjeff goto fail; 199219820Sjeff } 200255932Salfred 201255932Salfred if (chunk->npages == MLX4_ICM_CHUNK_LEN) 202255932Salfred chunk = NULL; 203255932Salfred 204255932Salfred npages -= 1 << cur_order; 205219820Sjeff } 206219820Sjeff 207219820Sjeff if (!coherent && chunk) { 208219820Sjeff chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 209219820Sjeff chunk->npages, 210219820Sjeff PCI_DMA_BIDIRECTIONAL); 211219820Sjeff 212219820Sjeff if (chunk->nsg <= 0) 213219820Sjeff goto fail; 214219820Sjeff } 215219820Sjeff 216219820Sjeff return icm; 217219820Sjeff 218219820Sjefffail: 219219820Sjeff mlx4_free_icm(dev, icm, coherent); 220219820Sjeff return NULL; 221219820Sjeff} 222219820Sjeff 223219820Sjeffstatic int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) 224219820Sjeff{ 225219820Sjeff return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 226219820Sjeff} 227219820Sjeff 228255932Salfredstatic int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 229219820Sjeff{ 230219820Sjeff return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 231255932Salfred MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 232219820Sjeff} 233219820Sjeff 234219820Sjeffint mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 235219820Sjeff{ 236219820Sjeff return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 237219820Sjeff} 238219820Sjeff 239219820Sjeffint mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) 240219820Sjeff{ 241255932Salfred return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, 242255932Salfred MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 243219820Sjeff} 244219820Sjeff 245255932Salfredint mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) 246219820Sjeff{ 247255932Salfred u32 i = (obj & (table->num_obj - 1)) / 248255932Salfred (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 249219820Sjeff int ret = 0; 250219820Sjeff 251219820Sjeff mutex_lock(&table->mutex); 252219820Sjeff 253219820Sjeff if (table->icm[i]) { 254219820Sjeff ++table->icm[i]->refcount; 255219820Sjeff goto out; 256219820Sjeff } 257219820Sjeff 258219820Sjeff table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 259219820Sjeff (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 260219820Sjeff __GFP_NOWARN, table->coherent); 261219820Sjeff if (!table->icm[i]) { 262219820Sjeff ret = -ENOMEM; 263219820Sjeff goto out; 264219820Sjeff } 265219820Sjeff 266219820Sjeff if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + 267219820Sjeff (u64) i * MLX4_TABLE_CHUNK_SIZE)) { 268219820Sjeff mlx4_free_icm(dev, table->icm[i], table->coherent); 269219820Sjeff table->icm[i] = NULL; 270219820Sjeff ret = -ENOMEM; 271219820Sjeff goto out; 272219820Sjeff } 273219820Sjeff 274219820Sjeff ++table->icm[i]->refcount; 275219820Sjeff 276219820Sjeffout: 277219820Sjeff mutex_unlock(&table->mutex); 278219820Sjeff return ret; 279219820Sjeff} 280219820Sjeff 281255932Salfredvoid mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) 282219820Sjeff{ 283255932Salfred u32 i; 284255932Salfred u64 offset; 285219820Sjeff 286219820Sjeff i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 287219820Sjeff 288219820Sjeff mutex_lock(&table->mutex); 289219820Sjeff 290219820Sjeff if (--table->icm[i]->refcount == 0) { 291255932Salfred offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; 292272407Shselasky 293272407Shselasky if (!mlx4_UNMAP_ICM(dev, table->virt + offset, 294272407Shselasky MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) { 295272407Shselasky mlx4_free_icm(dev, table->icm[i], table->coherent); 296272407Shselasky table->icm[i] = NULL; 297272407Shselasky } else { 298272407Shselasky pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n"); 299272407Shselasky } 300219820Sjeff } 301219820Sjeff 302219820Sjeff mutex_unlock(&table->mutex); 303219820Sjeff} 304219820Sjeff 305255932Salfredvoid *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, 306255932Salfred dma_addr_t *dma_handle) 307219820Sjeff{ 308255932Salfred int offset, dma_offset, i; 309255932Salfred u64 idx; 310219820Sjeff struct mlx4_icm_chunk *chunk; 311219820Sjeff struct mlx4_icm *icm; 312219820Sjeff struct page *page = NULL; 313219820Sjeff 314219820Sjeff if (!table->lowmem) 315219820Sjeff return NULL; 316219820Sjeff 317219820Sjeff mutex_lock(&table->mutex); 318219820Sjeff 319255932Salfred idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; 320219820Sjeff icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; 321219820Sjeff dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; 322219820Sjeff 323219820Sjeff if (!icm) 324219820Sjeff goto out; 325219820Sjeff 326219820Sjeff list_for_each_entry(chunk, &icm->chunk_list, list) { 327219820Sjeff for (i = 0; i < chunk->npages; ++i) { 328219820Sjeff if (dma_handle && dma_offset >= 0) { 329219820Sjeff if (sg_dma_len(&chunk->mem[i]) > dma_offset) 330219820Sjeff *dma_handle = sg_dma_address(&chunk->mem[i]) + 331219820Sjeff dma_offset; 332219820Sjeff dma_offset -= sg_dma_len(&chunk->mem[i]); 333219820Sjeff } 334219820Sjeff /* 335219820Sjeff * DMA mapping can merge pages but not split them, 336219820Sjeff * so if we found the page, dma_handle has already 337219820Sjeff * been assigned to. 338219820Sjeff */ 339219820Sjeff if (chunk->mem[i].length > offset) { 340219820Sjeff page = sg_page(&chunk->mem[i]); 341219820Sjeff goto out; 342219820Sjeff } 343219820Sjeff offset -= chunk->mem[i].length; 344219820Sjeff } 345219820Sjeff } 346219820Sjeff 347219820Sjeffout: 348219820Sjeff mutex_unlock(&table->mutex); 349219820Sjeff return page ? lowmem_page_address(page) + offset : NULL; 350219820Sjeff} 351219820Sjeff 352219820Sjeffint mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 353255932Salfred u32 start, u32 end) 354219820Sjeff{ 355219820Sjeff int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; 356255932Salfred int err; 357255932Salfred u32 i; 358219820Sjeff 359219820Sjeff for (i = start; i <= end; i += inc) { 360219820Sjeff err = mlx4_table_get(dev, table, i); 361219820Sjeff if (err) 362219820Sjeff goto fail; 363219820Sjeff } 364219820Sjeff 365219820Sjeff return 0; 366219820Sjeff 367219820Sjefffail: 368219820Sjeff while (i > start) { 369219820Sjeff i -= inc; 370219820Sjeff mlx4_table_put(dev, table, i); 371219820Sjeff } 372219820Sjeff 373219820Sjeff return err; 374219820Sjeff} 375219820Sjeff 376219820Sjeffvoid mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 377255932Salfred u32 start, u32 end) 378219820Sjeff{ 379255932Salfred u32 i; 380219820Sjeff 381219820Sjeff for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) 382219820Sjeff mlx4_table_put(dev, table, i); 383219820Sjeff} 384219820Sjeff 385219820Sjeffint mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, 386272407Shselasky u64 virt, int obj_size, u64 nobj, int reserved, 387219820Sjeff int use_lowmem, int use_coherent) 388219820Sjeff{ 389219820Sjeff int obj_per_chunk; 390219820Sjeff int num_icm; 391219820Sjeff unsigned chunk_size; 392219820Sjeff int i; 393255932Salfred u64 size; 394219820Sjeff 395219820Sjeff obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 396272407Shselasky num_icm = div_u64((nobj + obj_per_chunk - 1), obj_per_chunk); 397219820Sjeff 398219820Sjeff table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); 399219820Sjeff if (!table->icm) 400219820Sjeff return -ENOMEM; 401219820Sjeff table->virt = virt; 402219820Sjeff table->num_icm = num_icm; 403219820Sjeff table->num_obj = nobj; 404219820Sjeff table->obj_size = obj_size; 405219820Sjeff table->lowmem = use_lowmem; 406219820Sjeff table->coherent = use_coherent; 407219820Sjeff mutex_init(&table->mutex); 408219820Sjeff 409255932Salfred size = (u64) nobj * obj_size; 410219820Sjeff for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 411219820Sjeff chunk_size = MLX4_TABLE_CHUNK_SIZE; 412255932Salfred if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) 413255932Salfred chunk_size = PAGE_ALIGN(size - 414255932Salfred i * MLX4_TABLE_CHUNK_SIZE); 415219820Sjeff 416219820Sjeff table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 417219820Sjeff (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 418219820Sjeff __GFP_NOWARN, use_coherent); 419219820Sjeff if (!table->icm[i]) 420219820Sjeff goto err; 421219820Sjeff if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { 422219820Sjeff mlx4_free_icm(dev, table->icm[i], use_coherent); 423219820Sjeff table->icm[i] = NULL; 424219820Sjeff goto err; 425219820Sjeff } 426219820Sjeff 427219820Sjeff /* 428219820Sjeff * Add a reference to this ICM chunk so that it never 429219820Sjeff * gets freed (since it contains reserved firmware objects). 430219820Sjeff */ 431219820Sjeff ++table->icm[i]->refcount; 432219820Sjeff } 433219820Sjeff 434219820Sjeff return 0; 435219820Sjeff 436219820Sjefferr: 437219820Sjeff for (i = 0; i < num_icm; ++i) 438219820Sjeff if (table->icm[i]) { 439272407Shselasky if (!mlx4_UNMAP_ICM(dev, 440272407Shselasky virt + i * MLX4_TABLE_CHUNK_SIZE, 441272407Shselasky MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) { 442272407Shselasky mlx4_free_icm(dev, table->icm[i], use_coherent); 443272407Shselasky } else { 444272407Shselasky pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n"); 445272407Shselasky return -ENOMEM; 446272407Shselasky } 447219820Sjeff } 448255932Salfred kfree(table->icm); 449255932Salfred 450219820Sjeff return -ENOMEM; 451219820Sjeff} 452219820Sjeff 453219820Sjeffvoid mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) 454219820Sjeff{ 455272407Shselasky int i, err = 0; 456219820Sjeff 457219820Sjeff for (i = 0; i < table->num_icm; ++i) 458219820Sjeff if (table->icm[i]) { 459272407Shselasky err = mlx4_UNMAP_ICM(dev, 460272407Shselasky table->virt + i * MLX4_TABLE_CHUNK_SIZE, 461272407Shselasky MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 462272407Shselasky if (!err) { 463272407Shselasky mlx4_free_icm(dev, table->icm[i], 464272407Shselasky table->coherent); 465272407Shselasky } else { 466272407Shselasky pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n"); 467272407Shselasky break; 468272407Shselasky } 469219820Sjeff } 470219820Sjeff 471272407Shselasky if (!err) 472272407Shselasky kfree(table->icm); 473219820Sjeff} 474