1/* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: iser_memory.c,v 1.1.1.1 2007/08/03 18:52:32 Exp $ 33 */ 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/slab.h> 37#include <linux/mm.h> 38#include <linux/highmem.h> 39#include <asm/io.h> 40#include <asm/scatterlist.h> 41#include <linux/scatterlist.h> 42 43#include "iscsi_iser.h" 44 45#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 46 47/** 48 * Decrements the reference count for the 49 * registered buffer & releases it 50 * 51 * returns 0 if released, 1 if deferred 52 */ 53int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 54{ 55 struct ib_device *dev; 56 57 if ((atomic_read(®d_buf->ref_count) == 0) || 58 atomic_dec_and_test(®d_buf->ref_count)) { 59 /* if we used the dma mr, unreg is just NOP */ 60 if (regd_buf->reg.is_fmr) 61 iser_unreg_mem(®d_buf->reg); 62 63 if (regd_buf->dma_addr) { 64 dev = regd_buf->device->ib_device; 65 ib_dma_unmap_single(dev, 66 regd_buf->dma_addr, 67 regd_buf->data_size, 68 regd_buf->direction); 69 } 70 /* else this regd buf is associated with task which we */ 71 /* dma_unmap_single/sg later */ 72 return 0; 73 } else { 74 iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf); 75 return 1; 76 } 77} 78 79/** 80 * iser_reg_single - fills registered buffer descriptor with 81 * registration information 82 */ 83void iser_reg_single(struct iser_device *device, 84 struct iser_regd_buf *regd_buf, 85 enum dma_data_direction direction) 86{ 87 u64 dma_addr; 88 89 dma_addr = ib_dma_map_single(device->ib_device, 90 regd_buf->virt_addr, 91 regd_buf->data_size, direction); 92 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); 93 94 regd_buf->reg.lkey = device->mr->lkey; 95 regd_buf->reg.len = regd_buf->data_size; 96 regd_buf->reg.va = dma_addr; 97 regd_buf->reg.is_fmr = 0; 98 99 regd_buf->dma_addr = dma_addr; 100 regd_buf->direction = direction; 101} 102 103/** 104 * iser_start_rdma_unaligned_sg 105 */ 106int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 107 enum iser_data_dir cmd_dir) 108{ 109 int dma_nents; 110 struct ib_device *dev; 111 char *mem = NULL; 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 113 unsigned long cmd_data_len = data->data_len; 114 115 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 116 mem = (void *)__get_free_pages(GFP_NOIO, 117 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 118 else 119 mem = kmalloc(cmd_data_len, GFP_NOIO); 120 121 if (mem == NULL) { 122 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 123 data->size,(int)cmd_data_len); 124 return -ENOMEM; 125 } 126 127 if (cmd_dir == ISER_DIR_OUT) { 128 /* copy the unaligned sg the buffer which is used for RDMA */ 129 struct scatterlist *sg = (struct scatterlist *)data->buf; 130 int i; 131 char *p, *from; 132 133 for (p = mem, i = 0; i < data->size; i++) { 134 from = kmap_atomic(sg[i].page, KM_USER0); 135 memcpy(p, 136 from + sg[i].offset, 137 sg[i].length); 138 kunmap_atomic(from, KM_USER0); 139 p += sg[i].length; 140 } 141 } 142 143 sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); 144 iser_ctask->data_copy[cmd_dir].buf = 145 &iser_ctask->data_copy[cmd_dir].sg_single; 146 iser_ctask->data_copy[cmd_dir].size = 1; 147 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 149 150 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 151 dma_nents = ib_dma_map_sg(dev, 152 &iser_ctask->data_copy[cmd_dir].sg_single, 153 1, 154 (cmd_dir == ISER_DIR_OUT) ? 155 DMA_TO_DEVICE : DMA_FROM_DEVICE); 156 BUG_ON(dma_nents == 0); 157 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 159 return 0; 160} 161 162/** 163 * iser_finalize_rdma_unaligned_sg 164 */ 165void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 166 enum iser_data_dir cmd_dir) 167{ 168 struct ib_device *dev; 169 struct iser_data_buf *mem_copy; 170 unsigned long cmd_data_len; 171 172 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 173 mem_copy = &iser_ctask->data_copy[cmd_dir]; 174 175 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 176 (cmd_dir == ISER_DIR_OUT) ? 177 DMA_TO_DEVICE : DMA_FROM_DEVICE); 178 179 if (cmd_dir == ISER_DIR_IN) { 180 char *mem; 181 struct scatterlist *sg; 182 unsigned char *p, *to; 183 unsigned int sg_size; 184 int i; 185 186 /* copy back read RDMA to unaligned sg */ 187 mem = mem_copy->copy_buf; 188 189 sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 190 sg_size = iser_ctask->data[ISER_DIR_IN].size; 191 192 for (p = mem, i = 0; i < sg_size; i++){ 193 to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 194 memcpy(to + sg[i].offset, 195 p, 196 sg[i].length); 197 kunmap_atomic(to, KM_SOFTIRQ0); 198 p += sg[i].length; 199 } 200 } 201 202 cmd_data_len = iser_ctask->data[cmd_dir].data_len; 203 204 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 205 free_pages((unsigned long)mem_copy->copy_buf, 206 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 207 else 208 kfree(mem_copy->copy_buf); 209 210 mem_copy->copy_buf = NULL; 211} 212 213/** 214 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses 215 * and returns the length of resulting physical address array (may be less than 216 * the original due to possible compaction). 217 * 218 * we build a "page vec" under the assumption that the SG meets the RDMA 219 * alignment requirements. Other then the first and last SG elements, all 220 * the "internal" elements can be compacted into a list whose elements are 221 * dma addresses of physical pages. The code supports also the weird case 222 * where --few fragments of the same page-- are present in the SG as 223 * consecutive elements. Also, it handles one entry SG. 224 */ 225static int iser_sg_to_page_vec(struct iser_data_buf *data, 226 struct iser_page_vec *page_vec, 227 struct ib_device *ibdev) 228{ 229 struct scatterlist *sg = (struct scatterlist *)data->buf; 230 u64 first_addr, last_addr, page; 231 int end_aligned; 232 unsigned int cur_page = 0; 233 unsigned long total_sz = 0; 234 int i; 235 236 /* compute the offset of first element */ 237 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 238 239 for (i = 0; i < data->dma_nents; i++) { 240 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 241 242 total_sz += dma_len; 243 244 first_addr = ib_sg_dma_address(ibdev, &sg[i]); 245 last_addr = first_addr + dma_len; 246 247 end_aligned = !(last_addr & ~MASK_4K); 248 249 /* continue to collect page fragments till aligned or SG ends */ 250 while (!end_aligned && (i + 1 < data->dma_nents)) { 251 i++; 252 dma_len = ib_sg_dma_len(ibdev, &sg[i]); 253 total_sz += dma_len; 254 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 255 end_aligned = !(last_addr & ~MASK_4K); 256 } 257 258 /* handle the 1st page in the 1st DMA element */ 259 if (cur_page == 0) { 260 page = first_addr & MASK_4K; 261 page_vec->pages[cur_page] = page; 262 cur_page++; 263 page += SIZE_4K; 264 } else 265 page = first_addr; 266 267 for (; page < last_addr; page += SIZE_4K) { 268 page_vec->pages[cur_page] = page; 269 cur_page++; 270 } 271 272 } 273 page_vec->data_size = total_sz; 274 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); 275 return cur_page; 276} 277 278#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) 279 280/** 281 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 282 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns 283 * the number of entries which are aligned correctly. Supports the case where 284 * consecutive SG elements are actually fragments of the same physcial page. 285 */ 286static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 287 struct ib_device *ibdev) 288{ 289 struct scatterlist *sg; 290 u64 end_addr, next_addr; 291 int i, cnt; 292 unsigned int ret_len = 0; 293 294 sg = (struct scatterlist *)data->buf; 295 296 for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { 297 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 298 "offset: %ld sz: %ld\n", i, 299 (unsigned long)page_to_phys(sg[i].page), 300 (unsigned long)sg[i].offset, 301 (unsigned long)sg[i].length); */ 302 end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 303 ib_sg_dma_len(ibdev, &sg[i]); 304 /* iser_dbg("Checking sg iobuf end address " 305 "0x%08lX\n", end_addr); */ 306 if (i + 1 < data->dma_nents) { 307 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 308 /* are i, i+1 fragments of the same page? */ 309 if (end_addr == next_addr) 310 continue; 311 else if (!IS_4K_ALIGNED(end_addr)) { 312 ret_len = cnt + 1; 313 break; 314 } 315 } 316 } 317 if (i == data->dma_nents) 318 ret_len = cnt; /* loop ended */ 319 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", 320 ret_len, data->dma_nents, data); 321 return ret_len; 322} 323 324static void iser_data_buf_dump(struct iser_data_buf *data, 325 struct ib_device *ibdev) 326{ 327 struct scatterlist *sg = (struct scatterlist *)data->buf; 328 int i; 329 330 for (i = 0; i < data->dma_nents; i++) 331 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 332 "off:0x%x sz:0x%x dma_len:0x%x\n", 333 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 334 sg[i].page, sg[i].offset, 335 sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 336} 337 338static void iser_dump_page_vec(struct iser_page_vec *page_vec) 339{ 340 int i; 341 342 iser_err("page vec length %d data size %d\n", 343 page_vec->length, page_vec->data_size); 344 for (i = 0; i < page_vec->length; i++) 345 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); 346} 347 348static void iser_page_vec_build(struct iser_data_buf *data, 349 struct iser_page_vec *page_vec, 350 struct ib_device *ibdev) 351{ 352 int page_vec_len = 0; 353 354 page_vec->length = 0; 355 page_vec->offset = 0; 356 357 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 358 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); 359 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 360 361 page_vec->length = page_vec_len; 362 363 if (page_vec_len * SIZE_4K < page_vec->data_size) { 364 iser_err("page_vec too short to hold this SG\n"); 365 iser_data_buf_dump(data, ibdev); 366 iser_dump_page_vec(page_vec); 367 BUG(); 368 } 369} 370 371int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, 372 struct iser_data_buf *data, 373 enum iser_data_dir iser_dir, 374 enum dma_data_direction dma_dir) 375{ 376 struct ib_device *dev; 377 378 iser_ctask->dir[iser_dir] = 1; 379 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 380 381 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 382 if (data->dma_nents == 0) { 383 iser_err("dma_map_sg failed!!!\n"); 384 return -EINVAL; 385 } 386 return 0; 387} 388 389void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 390{ 391 struct ib_device *dev; 392 struct iser_data_buf *data; 393 394 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 395 396 if (iser_ctask->dir[ISER_DIR_IN]) { 397 data = &iser_ctask->data[ISER_DIR_IN]; 398 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 399 } 400 401 if (iser_ctask->dir[ISER_DIR_OUT]) { 402 data = &iser_ctask->data[ISER_DIR_OUT]; 403 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 404 } 405} 406 407/** 408 * iser_reg_rdma_mem - Registers memory intended for RDMA, 409 * obtaining rkey and va 410 * 411 * returns 0 on success, errno code on failure 412 */ 413int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, 414 enum iser_data_dir cmd_dir) 415{ 416 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 417 struct iser_device *device = ib_conn->device; 418 struct ib_device *ibdev = device->ib_device; 419 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 420 struct iser_regd_buf *regd_buf; 421 int aligned_len; 422 int err; 423 int i; 424 struct scatterlist *sg; 425 426 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 427 428 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 429 if (aligned_len != mem->dma_nents) { 430 iser_err("rdma alignment violation %d/%d aligned\n", 431 aligned_len, mem->size); 432 iser_data_buf_dump(mem, ibdev); 433 434 /* unmap the command data before accessing it */ 435 iser_dma_unmap_task_data(iser_ctask); 436 437 /* allocate copy buf, if we are writing, copy the */ 438 /* unaligned scatterlist, dma map the copy */ 439 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) 440 return -ENOMEM; 441 mem = &iser_ctask->data_copy[cmd_dir]; 442 } 443 444 /* if there a single dma entry, FMR is not needed */ 445 if (mem->dma_nents == 1) { 446 sg = (struct scatterlist *)mem->buf; 447 448 regd_buf->reg.lkey = device->mr->lkey; 449 regd_buf->reg.rkey = device->mr->rkey; 450 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 451 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 452 regd_buf->reg.is_fmr = 0; 453 454 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 455 "va: 0x%08lX sz: %ld]\n", 456 (unsigned int)regd_buf->reg.lkey, 457 (unsigned int)regd_buf->reg.rkey, 458 (unsigned long)regd_buf->reg.va, 459 (unsigned long)regd_buf->reg.len); 460 } else { /* use FMR for multiple dma entries */ 461 iser_page_vec_build(mem, ib_conn->page_vec, ibdev); 462 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); 463 if (err) { 464 iser_data_buf_dump(mem, ibdev); 465 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 466 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 467 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 468 ib_conn->page_vec->data_size, ib_conn->page_vec->length, 469 ib_conn->page_vec->offset); 470 for (i=0 ; i<ib_conn->page_vec->length ; i++) 471 iser_err("page_vec[%d] = 0x%llx\n", i, 472 (unsigned long long) ib_conn->page_vec->pages[i]); 473 return err; 474 } 475 } 476 477 /* take a reference on this regd buf such that it will not be released * 478 * (eg in send dto completion) before we get the scsi response */ 479 atomic_inc(®d_buf->ref_count); 480 return 0; 481} 482