1219820Sjeff/* 2219820Sjeff * Copyright (c) 2004 Topspin Communications. All rights reserved. 3219820Sjeff * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4219820Sjeff * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5219820Sjeff * 6219820Sjeff * This software is available to you under a choice of one of two 7219820Sjeff * licenses. You may choose to be licensed under the terms of the GNU 8219820Sjeff * General Public License (GPL) Version 2, available from the file 9219820Sjeff * COPYING in the main directory of this source tree, or the 10219820Sjeff * OpenIB.org BSD license below: 11219820Sjeff * 12219820Sjeff * Redistribution and use in source and binary forms, with or 13219820Sjeff * without modification, are permitted provided that the following 14219820Sjeff * conditions are met: 15219820Sjeff * 16219820Sjeff * - Redistributions of source code must retain the above 17219820Sjeff * copyright notice, this list of conditions and the following 18219820Sjeff * disclaimer. 19219820Sjeff * 20219820Sjeff * - Redistributions in binary form must reproduce the above 21219820Sjeff * copyright notice, this list of conditions and the following 22219820Sjeff * disclaimer in the documentation and/or other materials 23219820Sjeff * provided with the distribution. 24219820Sjeff * 25219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32219820Sjeff * SOFTWARE. 33219820Sjeff */ 34219820Sjeff 35219820Sjeff#include <linux/init.h> 36219820Sjeff#include <linux/errno.h> 37219820Sjeff 38219820Sjeff#include <linux/mlx4/cmd.h> 39219820Sjeff 40219820Sjeff#include "mlx4.h" 41219820Sjeff#include "icm.h" 42219820Sjeff 43219820Sjeff/* 44219820Sjeff * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 45219820Sjeff */ 46219820Sjeffstruct mlx4_mpt_entry { 47219820Sjeff __be32 flags; 48219820Sjeff __be32 qpn; 49219820Sjeff __be32 key; 50219820Sjeff __be32 pd_flags; 51219820Sjeff __be64 start; 52219820Sjeff __be64 length; 53219820Sjeff __be32 lkey; 54219820Sjeff __be32 win_cnt; 55219820Sjeff u8 reserved1; 56219820Sjeff u8 flags2; 57219820Sjeff u8 reserved2; 58219820Sjeff u8 mtt_rep; 59219820Sjeff __be64 mtt_seg; 60219820Sjeff __be32 mtt_sz; 61219820Sjeff __be32 entity_size; 62219820Sjeff __be32 first_byte_offset; 63219820Sjeff} __attribute__((packed)); 64219820Sjeff 65219820Sjeff#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 66219820Sjeff#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 67219820Sjeff#define MLX4_MPT_FLAG_MIO (1 << 17) 68219820Sjeff#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 69219820Sjeff#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 70219820Sjeff#define MLX4_MPT_FLAG_REGION (1 << 8) 71219820Sjeff 72219820Sjeff#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 73219820Sjeff#define MLX4_MPT_PD_FLAG_RAE (1 << 28) 74219820Sjeff#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 75219820Sjeff 76219820Sjeff#define MLX4_MPT_FLAG2_FBO_EN (1 << 7) 77219820Sjeff 78219820Sjeff#define MLX4_MPT_STATUS_SW 0xF0 79219820Sjeff#define MLX4_MPT_STATUS_HW 0x00 80219820Sjeff 81219820Sjeffstatic u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 82219820Sjeff{ 83219820Sjeff int o; 84219820Sjeff int m; 85219820Sjeff u32 seg; 86219820Sjeff 87219820Sjeff spin_lock(&buddy->lock); 88219820Sjeff 89219820Sjeff for (o = order; o <= buddy->max_order; ++o) 90219820Sjeff if (buddy->num_free[o]) { 91219820Sjeff m = 1 << (buddy->max_order - o); 92219820Sjeff seg = find_first_bit(buddy->bits[o], m); 93219820Sjeff if (seg < m) 94219820Sjeff goto found; 95219820Sjeff } 96219820Sjeff 97219820Sjeff spin_unlock(&buddy->lock); 98219820Sjeff return -1; 99219820Sjeff 100219820Sjeff found: 101219820Sjeff clear_bit(seg, buddy->bits[o]); 102219820Sjeff --buddy->num_free[o]; 103219820Sjeff 104219820Sjeff while (o > order) { 105219820Sjeff --o; 106219820Sjeff seg <<= 1; 107219820Sjeff set_bit(seg ^ 1, buddy->bits[o]); 108219820Sjeff ++buddy->num_free[o]; 109219820Sjeff } 110219820Sjeff 111219820Sjeff spin_unlock(&buddy->lock); 112219820Sjeff 113219820Sjeff seg <<= order; 114219820Sjeff 115219820Sjeff return seg; 116219820Sjeff} 117219820Sjeff 118219820Sjeffstatic void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 119219820Sjeff{ 120219820Sjeff seg >>= order; 121219820Sjeff 122219820Sjeff spin_lock(&buddy->lock); 123219820Sjeff 124219820Sjeff while (test_bit(seg ^ 1, buddy->bits[order])) { 125219820Sjeff clear_bit(seg ^ 1, buddy->bits[order]); 126219820Sjeff --buddy->num_free[order]; 127219820Sjeff seg >>= 1; 128219820Sjeff ++order; 129219820Sjeff } 130219820Sjeff 131219820Sjeff set_bit(seg, buddy->bits[order]); 132219820Sjeff ++buddy->num_free[order]; 133219820Sjeff 134219820Sjeff spin_unlock(&buddy->lock); 135219820Sjeff} 136219820Sjeff 137219820Sjeffstatic int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 138219820Sjeff{ 139219820Sjeff int i, s; 140219820Sjeff 141219820Sjeff buddy->max_order = max_order; 142219820Sjeff spin_lock_init(&buddy->lock); 143219820Sjeff 144219820Sjeff buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 145219820Sjeff GFP_KERNEL); 146219820Sjeff buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), 147219820Sjeff GFP_KERNEL); 148219820Sjeff if (!buddy->bits || !buddy->num_free) 149219820Sjeff goto err_out; 150219820Sjeff 151219820Sjeff for (i = 0; i <= buddy->max_order; ++i) { 152219820Sjeff s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 153219820Sjeff buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 154219820Sjeff if (!buddy->bits[i]) 155219820Sjeff goto err_out_free; 156219820Sjeff bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); 157219820Sjeff } 158219820Sjeff 159219820Sjeff set_bit(0, buddy->bits[buddy->max_order]); 160219820Sjeff buddy->num_free[buddy->max_order] = 1; 161219820Sjeff 162219820Sjeff return 0; 163219820Sjeff 164219820Sjefferr_out_free: 165219820Sjeff for (i = 0; i <= buddy->max_order; ++i) 166219820Sjeff kfree(buddy->bits[i]); 167219820Sjeff 168219820Sjefferr_out: 169219820Sjeff kfree(buddy->bits); 170219820Sjeff kfree(buddy->num_free); 171219820Sjeff 172219820Sjeff return -ENOMEM; 173219820Sjeff} 174219820Sjeff 175219820Sjeffstatic void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 176219820Sjeff{ 177219820Sjeff int i; 178219820Sjeff 179219820Sjeff for (i = 0; i <= buddy->max_order; ++i) 180219820Sjeff kfree(buddy->bits[i]); 181219820Sjeff 182219820Sjeff kfree(buddy->bits); 183219820Sjeff kfree(buddy->num_free); 184219820Sjeff} 185219820Sjeff 186219820Sjeffstatic u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 187219820Sjeff{ 188219820Sjeff struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 189219820Sjeff u32 seg; 190219820Sjeff 191219820Sjeff seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); 192219820Sjeff if (seg == -1) 193219820Sjeff return -1; 194219820Sjeff 195219820Sjeff if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, 196219820Sjeff seg + (1 << order) - 1)) { 197219820Sjeff mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); 198219820Sjeff return -1; 199219820Sjeff } 200219820Sjeff 201219820Sjeff return seg; 202219820Sjeff} 203219820Sjeff 204219820Sjeffint mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 205219820Sjeff struct mlx4_mtt *mtt) 206219820Sjeff{ 207219820Sjeff int i; 208219820Sjeff 209219820Sjeff if (!npages) { 210219820Sjeff mtt->order = -1; 211219820Sjeff mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 212219820Sjeff return 0; 213219820Sjeff } else 214219820Sjeff mtt->page_shift = page_shift; 215219820Sjeff 216219820Sjeff for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) 217219820Sjeff ++mtt->order; 218219820Sjeff 219219820Sjeff mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 220219820Sjeff if (mtt->first_seg == -1) 221219820Sjeff return -ENOMEM; 222219820Sjeff 223219820Sjeff return 0; 224219820Sjeff} 225219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_init); 226219820Sjeff 227219820Sjeffvoid mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 228219820Sjeff{ 229219820Sjeff struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 230219820Sjeff 231219820Sjeff if (mtt->order < 0) 232219820Sjeff return; 233219820Sjeff 234219820Sjeff mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); 235219820Sjeff mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, 236219820Sjeff mtt->first_seg + (1 << mtt->order) - 1); 237219820Sjeff} 238219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 239219820Sjeff 240219820Sjeffu64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 241219820Sjeff{ 242219820Sjeff return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; 243219820Sjeff} 244219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_addr); 245219820Sjeff 246219820Sjeffstatic u32 hw_index_to_key(u32 ind) 247219820Sjeff{ 248219820Sjeff return (ind >> 24) | (ind << 8); 249219820Sjeff} 250219820Sjeff 251219820Sjeffstatic u32 key_to_hw_index(u32 key) 252219820Sjeff{ 253219820Sjeff return (key << 24) | (key >> 8); 254219820Sjeff} 255219820Sjeff 256219820Sjeffstatic int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 257219820Sjeff int mpt_index) 258219820Sjeff{ 259219820Sjeff return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, 260219820Sjeff MLX4_CMD_TIME_CLASS_B); 261219820Sjeff} 262219820Sjeff 263219820Sjeffstatic int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 264219820Sjeff int mpt_index) 265219820Sjeff{ 266219820Sjeff return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 267219820Sjeff !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); 268219820Sjeff} 269219820Sjeff 270219820Sjeffint mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32 *base_mridx) 271219820Sjeff{ 272219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 273219820Sjeff u32 mridx; 274219820Sjeff 275219820Sjeff mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align); 276219820Sjeff if (mridx == -1) 277219820Sjeff return -ENOMEM; 278219820Sjeff 279219820Sjeff *base_mridx = mridx; 280219820Sjeff return 0; 281219820Sjeff 282219820Sjeff} 283219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_reserve_range); 284219820Sjeff 285219820Sjeffvoid mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt) 286219820Sjeff{ 287219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 288219820Sjeff mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt); 289219820Sjeff} 290219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_release_range); 291219820Sjeff 292219820Sjeffint mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 293219820Sjeff u64 iova, u64 size, u32 access, int npages, 294219820Sjeff int page_shift, struct mlx4_mr *mr) 295219820Sjeff{ 296219820Sjeff mr->iova = iova; 297219820Sjeff mr->size = size; 298219820Sjeff mr->pd = pd; 299219820Sjeff mr->access = access; 300219820Sjeff mr->enabled = 0; 301219820Sjeff mr->key = hw_index_to_key(mridx); 302219820Sjeff 303219820Sjeff return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 304219820Sjeff} 305219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved); 306219820Sjeff 307219820Sjeffint mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 308219820Sjeff int npages, int page_shift, struct mlx4_mr *mr) 309219820Sjeff{ 310219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 311219820Sjeff u32 index; 312219820Sjeff int err; 313219820Sjeff 314219820Sjeff index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 315219820Sjeff if (index == -1) 316219820Sjeff return -ENOMEM; 317219820Sjeff 318219820Sjeff err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 319219820Sjeff access, npages, page_shift, mr); 320219820Sjeff if (err) 321219820Sjeff mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 322219820Sjeff 323219820Sjeff return err; 324219820Sjeff} 325219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_alloc); 326219820Sjeff 327219820Sjeffvoid mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 328219820Sjeff{ 329219820Sjeff int err; 330219820Sjeff 331219820Sjeff if (mr->enabled) { 332219820Sjeff err = mlx4_HW2SW_MPT(dev, NULL, 333219820Sjeff key_to_hw_index(mr->key) & 334219820Sjeff (dev->caps.num_mpts - 1)); 335219820Sjeff if (err) 336219820Sjeff mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); 337219820Sjeff } 338219820Sjeff 339219820Sjeff mlx4_mtt_cleanup(dev, &mr->mtt); 340219820Sjeff} 341219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_free_reserved); 342219820Sjeff 343219820Sjeffvoid mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 344219820Sjeff{ 345219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 346219820Sjeff mlx4_mr_free_reserved(dev, mr); 347219820Sjeff mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); 348219820Sjeff} 349219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_free); 350219820Sjeff 351219820Sjeffint mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 352219820Sjeff{ 353219820Sjeff struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 354219820Sjeff struct mlx4_cmd_mailbox *mailbox; 355219820Sjeff struct mlx4_mpt_entry *mpt_entry; 356219820Sjeff int err; 357219820Sjeff 358219820Sjeff err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 359219820Sjeff if (err) 360219820Sjeff return err; 361219820Sjeff 362219820Sjeff mailbox = mlx4_alloc_cmd_mailbox(dev); 363219820Sjeff if (IS_ERR(mailbox)) { 364219820Sjeff err = PTR_ERR(mailbox); 365219820Sjeff goto err_table; 366219820Sjeff } 367219820Sjeff mpt_entry = mailbox->buf; 368219820Sjeff 369219820Sjeff memset(mpt_entry, 0, sizeof *mpt_entry); 370219820Sjeff 371219820Sjeff mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 372219820Sjeff MLX4_MPT_FLAG_REGION | 373219820Sjeff mr->access); 374219820Sjeff 375219820Sjeff mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 376219820Sjeff mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 377219820Sjeff mpt_entry->start = cpu_to_be64(mr->iova); 378219820Sjeff mpt_entry->length = cpu_to_be64(mr->size); 379219820Sjeff mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 380219820Sjeff 381219820Sjeff if (mr->mtt.order < 0) { 382219820Sjeff mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 383219820Sjeff mpt_entry->mtt_seg = 0; 384219820Sjeff } else { 385219820Sjeff mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); 386219820Sjeff } 387219820Sjeff 388219820Sjeff if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 389219820Sjeff /* fast register MR in free state */ 390219820Sjeff mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 391219820Sjeff mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 392219820Sjeff MLX4_MPT_PD_FLAG_RAE); 393219820Sjeff mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 394219820Sjeff dev->caps.mtts_per_seg); 395219820Sjeff } else { 396219820Sjeff mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 397219820Sjeff } 398219820Sjeff 399219820Sjeff err = mlx4_SW2HW_MPT(dev, mailbox, 400219820Sjeff key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 401219820Sjeff if (err) { 402219820Sjeff mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 403219820Sjeff goto err_cmd; 404219820Sjeff } 405219820Sjeff 406219820Sjeff mr->enabled = 1; 407219820Sjeff 408219820Sjeff mlx4_free_cmd_mailbox(dev, mailbox); 409219820Sjeff 410219820Sjeff return 0; 411219820Sjeff 412219820Sjefferr_cmd: 413219820Sjeff mlx4_free_cmd_mailbox(dev, mailbox); 414219820Sjeff 415219820Sjefferr_table: 416219820Sjeff mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 417219820Sjeff return err; 418219820Sjeff} 419219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_enable); 420219820Sjeff 421219820Sjeffstatic int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 422219820Sjeff int start_index, int npages, u64 *page_list) 423219820Sjeff{ 424219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 425219820Sjeff __be64 *mtts; 426219820Sjeff dma_addr_t dma_handle; 427219820Sjeff int i; 428219820Sjeff int s = start_index * sizeof (u64); 429219820Sjeff 430219820Sjeff /* All MTTs must fit in the same page */ 431219820Sjeff if (start_index / (PAGE_SIZE / sizeof (u64)) != 432219820Sjeff (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 433219820Sjeff return -EINVAL; 434219820Sjeff 435219820Sjeff if (start_index & (dev->caps.mtts_per_seg - 1)) 436219820Sjeff return -EINVAL; 437219820Sjeff 438219820Sjeff mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 439219820Sjeff s / dev->caps.mtt_entry_sz, &dma_handle); 440219820Sjeff if (!mtts) 441219820Sjeff return -ENOMEM; 442219820Sjeff 443219820Sjeff for (i = 0; i < npages; ++i) 444219820Sjeff mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 445219820Sjeff 446219820Sjeff dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); 447219820Sjeff 448219820Sjeff return 0; 449219820Sjeff} 450219820Sjeff 451219820Sjeffint mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 452219820Sjeff int start_index, int npages, u64 *page_list) 453219820Sjeff{ 454219820Sjeff int chunk; 455219820Sjeff int err; 456219820Sjeff 457219820Sjeff if (mtt->order < 0) 458219820Sjeff return -EINVAL; 459219820Sjeff 460219820Sjeff while (npages > 0) { 461219820Sjeff chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); 462219820Sjeff err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 463219820Sjeff if (err) 464219820Sjeff return err; 465219820Sjeff 466219820Sjeff npages -= chunk; 467219820Sjeff start_index += chunk; 468219820Sjeff page_list += chunk; 469219820Sjeff } 470219820Sjeff 471219820Sjeff return 0; 472219820Sjeff} 473219820SjeffEXPORT_SYMBOL_GPL(mlx4_write_mtt); 474219820Sjeff 475219820Sjeffint mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 476219820Sjeff struct mlx4_buf *buf) 477219820Sjeff{ 478219820Sjeff u64 *page_list; 479219820Sjeff int err; 480219820Sjeff int i; 481219820Sjeff 482219820Sjeff page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); 483219820Sjeff if (!page_list) 484219820Sjeff return -ENOMEM; 485219820Sjeff 486219820Sjeff for (i = 0; i < buf->npages; ++i) 487219820Sjeff if (buf->direct.map) 488219820Sjeff page_list[i] = buf->direct.map + (i << buf->page_shift); 489219820Sjeff else 490219820Sjeff page_list[i] = buf->page_list[i].map; 491219820Sjeff 492219820Sjeff err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 493219820Sjeff 494219820Sjeff kfree(page_list); 495219820Sjeff return err; 496219820Sjeff} 497219820SjeffEXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 498219820Sjeff 499219820Sjeffint mlx4_init_mr_table(struct mlx4_dev *dev) 500219820Sjeff{ 501219820Sjeff struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 502219820Sjeff int err; 503219820Sjeff 504219820Sjeff if (!is_power_of_2(dev->caps.num_mpts)) 505219820Sjeff return -EINVAL; 506219820Sjeff 507219820Sjeff err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 508219820Sjeff ~0, dev->caps.reserved_mrws, 0); 509219820Sjeff if (err) 510219820Sjeff return err; 511219820Sjeff 512219820Sjeff err = mlx4_buddy_init(&mr_table->mtt_buddy, 513219820Sjeff ilog2(dev->caps.num_mtt_segs)); 514219820Sjeff if (err) 515219820Sjeff goto err_buddy; 516219820Sjeff 517219820Sjeff if (dev->caps.reserved_mtts) { 518219820Sjeff if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { 519219820Sjeff mlx4_warn(dev, "MTT table of order %d is too small.\n", 520219820Sjeff mr_table->mtt_buddy.max_order); 521219820Sjeff err = -ENOMEM; 522219820Sjeff goto err_reserve_mtts; 523219820Sjeff } 524219820Sjeff } 525219820Sjeff 526219820Sjeff return 0; 527219820Sjeff 528219820Sjefferr_reserve_mtts: 529219820Sjeff mlx4_buddy_cleanup(&mr_table->mtt_buddy); 530219820Sjeff 531219820Sjefferr_buddy: 532219820Sjeff mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 533219820Sjeff 534219820Sjeff return err; 535219820Sjeff} 536219820Sjeff 537219820Sjeffvoid mlx4_cleanup_mr_table(struct mlx4_dev *dev) 538219820Sjeff{ 539219820Sjeff struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 540219820Sjeff 541219820Sjeff mlx4_buddy_cleanup(&mr_table->mtt_buddy); 542219820Sjeff mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 543219820Sjeff} 544219820Sjeff 545219820Sjeffstatic inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 546219820Sjeff int npages, u64 iova) 547219820Sjeff{ 548219820Sjeff int i, page_mask; 549219820Sjeff 550219820Sjeff if (npages > fmr->max_pages) 551219820Sjeff return -EINVAL; 552219820Sjeff 553219820Sjeff page_mask = (1 << fmr->page_shift) - 1; 554219820Sjeff 555219820Sjeff /* We are getting page lists, so va must be page aligned. */ 556219820Sjeff if (iova & page_mask) 557219820Sjeff return -EINVAL; 558219820Sjeff 559219820Sjeff /* Trust the user not to pass misaligned data in page_list */ 560219820Sjeff if (0) 561219820Sjeff for (i = 0; i < npages; ++i) { 562219820Sjeff if (page_list[i] & ~page_mask) 563219820Sjeff return -EINVAL; 564219820Sjeff } 565219820Sjeff 566219820Sjeff if (fmr->maps >= fmr->max_maps) 567219820Sjeff return -EINVAL; 568219820Sjeff 569219820Sjeff return 0; 570219820Sjeff} 571219820Sjeff 572219820Sjeffint mlx4_map_phys_fmr_fbo(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 573219820Sjeff u64 *page_list, int npages, u64 iova, u32 fbo, 574219820Sjeff u32 len, u32 *lkey, u32 *rkey, int same_key) 575219820Sjeff{ 576219820Sjeff u32 key; 577219820Sjeff int i, err; 578219820Sjeff 579219820Sjeff err = mlx4_check_fmr(fmr, page_list, npages, iova); 580219820Sjeff if (err) 581219820Sjeff return err; 582219820Sjeff 583219820Sjeff ++fmr->maps; 584219820Sjeff 585219820Sjeff key = key_to_hw_index(fmr->mr.key); 586219820Sjeff if (!same_key) 587219820Sjeff key += dev->caps.num_mpts; 588219820Sjeff *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 589219820Sjeff 590219820Sjeff *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 591219820Sjeff 592219820Sjeff /* Make sure MPT status is visible before writing MTT entries */ 593219820Sjeff wmb(); 594219820Sjeff 595219820Sjeff for (i = 0; i < npages; ++i) 596219820Sjeff fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 597219820Sjeff 598219820Sjeff dma_sync_single(&dev->pdev->dev, fmr->dma_handle, 599219820Sjeff npages * sizeof(u64), DMA_TO_DEVICE); 600219820Sjeff 601219820Sjeff fmr->mpt->key = cpu_to_be32(key); 602219820Sjeff fmr->mpt->lkey = cpu_to_be32(key); 603219820Sjeff fmr->mpt->length = cpu_to_be64(len); 604219820Sjeff fmr->mpt->start = cpu_to_be64(iova); 605219820Sjeff fmr->mpt->first_byte_offset = cpu_to_be32(fbo & 0x001fffff); 606219820Sjeff fmr->mpt->flags2 = (fbo ? MLX4_MPT_FLAG2_FBO_EN : 0); 607219820Sjeff 608219820Sjeff /* Make MTT entries are visible before setting MPT status */ 609219820Sjeff wmb(); 610219820Sjeff 611219820Sjeff *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 612219820Sjeff 613219820Sjeff /* Make sure MPT status is visible before consumer can use FMR */ 614219820Sjeff wmb(); 615219820Sjeff 616219820Sjeff return 0; 617219820Sjeff} 618219820SjeffEXPORT_SYMBOL_GPL(mlx4_map_phys_fmr_fbo); 619219820Sjeff 620219820Sjeffint mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 621219820Sjeff int npages, u64 iova, u32 *lkey, u32 *rkey) 622219820Sjeff{ 623219820Sjeff u32 len = npages * (1ull << fmr->page_shift); 624219820Sjeff 625219820Sjeff return mlx4_map_phys_fmr_fbo(dev, fmr, page_list, npages, iova, 0, 626219820Sjeff len, lkey, rkey, 0); 627219820Sjeff} 628219820SjeffEXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 629219820Sjeff 630219820Sjeffint mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 631219820Sjeff int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 632219820Sjeff{ 633219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 634219820Sjeff u64 mtt_seg; 635219820Sjeff int err = -ENOMEM; 636219820Sjeff 637219820Sjeff if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 638219820Sjeff return -EINVAL; 639219820Sjeff 640219820Sjeff /* All MTTs must fit in the same page */ 641219820Sjeff if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 642219820Sjeff return -EINVAL; 643219820Sjeff 644219820Sjeff fmr->page_shift = page_shift; 645219820Sjeff fmr->max_pages = max_pages; 646219820Sjeff fmr->max_maps = max_maps; 647219820Sjeff fmr->maps = 0; 648219820Sjeff 649219820Sjeff err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 650219820Sjeff page_shift, &fmr->mr); 651219820Sjeff if (err) 652219820Sjeff return err; 653219820Sjeff 654219820Sjeff mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; 655219820Sjeff 656219820Sjeff fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 657219820Sjeff fmr->mr.mtt.first_seg, 658219820Sjeff &fmr->dma_handle); 659219820Sjeff if (!fmr->mtts) { 660219820Sjeff err = -ENOMEM; 661219820Sjeff goto err_free; 662219820Sjeff } 663219820Sjeff 664219820Sjeff return 0; 665219820Sjeff 666219820Sjefferr_free: 667219820Sjeff mlx4_mr_free(dev, &fmr->mr); 668219820Sjeff return err; 669219820Sjeff} 670219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 671219820Sjeff 672219820Sjeffint mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, 673219820Sjeff u32 pd, u32 access, int max_pages, 674219820Sjeff int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 675219820Sjeff{ 676219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 677219820Sjeff u64 mtt_seg; 678219820Sjeff int err = -ENOMEM; 679219820Sjeff 680219820Sjeff if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 681219820Sjeff return -EINVAL; 682219820Sjeff 683219820Sjeff /* All MTTs must fit in the same page */ 684219820Sjeff if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 685219820Sjeff return -EINVAL; 686219820Sjeff 687219820Sjeff fmr->page_shift = page_shift; 688219820Sjeff fmr->max_pages = max_pages; 689219820Sjeff fmr->max_maps = max_maps; 690219820Sjeff fmr->maps = 0; 691219820Sjeff 692219820Sjeff err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages, 693219820Sjeff page_shift, &fmr->mr); 694219820Sjeff if (err) 695219820Sjeff return err; 696219820Sjeff 697219820Sjeff mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; 698219820Sjeff 699219820Sjeff fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 700219820Sjeff fmr->mr.mtt.first_seg, 701219820Sjeff &fmr->dma_handle); 702219820Sjeff if (!fmr->mtts) { 703219820Sjeff err = -ENOMEM; 704219820Sjeff goto err_free; 705219820Sjeff } 706219820Sjeff 707219820Sjeff return 0; 708219820Sjeff 709219820Sjefferr_free: 710219820Sjeff mlx4_mr_free_reserved(dev, &fmr->mr); 711219820Sjeff return err; 712219820Sjeff} 713219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved); 714219820Sjeff 715219820Sjeffint mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 716219820Sjeff{ 717219820Sjeff struct mlx4_priv *priv = mlx4_priv(dev); 718219820Sjeff int err; 719219820Sjeff 720219820Sjeff err = mlx4_mr_enable(dev, &fmr->mr); 721219820Sjeff if (err) 722219820Sjeff return err; 723219820Sjeff 724219820Sjeff fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 725219820Sjeff key_to_hw_index(fmr->mr.key), NULL); 726219820Sjeff if (!fmr->mpt) 727219820Sjeff return -ENOMEM; 728219820Sjeff 729219820Sjeff return 0; 730219820Sjeff} 731219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_enable); 732219820Sjeff 733219820Sjeffvoid mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 734219820Sjeff u32 *lkey, u32 *rkey) 735219820Sjeff{ 736219820Sjeff if (!fmr->maps) 737219820Sjeff return; 738219820Sjeff 739219820Sjeff fmr->maps = 0; 740219820Sjeff 741219820Sjeff *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 742219820Sjeff} 743219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 744219820Sjeff 745219820Sjeffint mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 746219820Sjeff{ 747219820Sjeff if (fmr->maps) 748219820Sjeff return -EBUSY; 749219820Sjeff 750219820Sjeff fmr->mr.enabled = 0; 751219820Sjeff mlx4_mr_free(dev, &fmr->mr); 752219820Sjeff 753219820Sjeff return 0; 754219820Sjeff} 755219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_free); 756219820Sjeff 757219820Sjeffint mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 758219820Sjeff{ 759219820Sjeff if (fmr->maps) 760219820Sjeff return -EBUSY; 761219820Sjeff 762219820Sjeff fmr->mr.enabled = 0; 763219820Sjeff mlx4_mr_free_reserved(dev, &fmr->mr); 764219820Sjeff 765219820Sjeff return 0; 766219820Sjeff} 767219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved); 768219820Sjeff 769219820Sjeffint mlx4_SYNC_TPT(struct mlx4_dev *dev) 770219820Sjeff{ 771219820Sjeff return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); 772219820Sjeff} 773219820SjeffEXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 774