1/* 2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * Author: Tom Tucker <tom@opengridcomputing.com> 40 */ 41 42#include <linux/sunrpc/debug.h> 43#include <linux/sunrpc/rpc_rdma.h> 44#include <linux/spinlock.h> 45#include <asm/unaligned.h> 46#include <rdma/ib_verbs.h> 47#include <rdma/rdma_cm.h> 48#include <linux/sunrpc/svc_rdma.h> 49 50#define RPCDBG_FACILITY RPCDBG_SVCXPRT 51 52/* Encode an XDR as an array of IB SGE 53 * 54 * Assumptions: 55 * - head[0] is physically contiguous. 56 * - tail[0] is physically contiguous. 57 * - pages[] is not physically or virtually contiguous and consists of 58 * PAGE_SIZE elements. 59 * 60 * Output: 61 * SGE[0] reserved for RCPRDMA header 62 * SGE[1] data from xdr->head[] 63 * SGE[2..sge_count-2] data from xdr->pages[] 64 * SGE[sge_count-1] data from xdr->tail. 65 * 66 * The max SGE we need is the length of the XDR / pagesize + one for 67 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES 68 * reserves a page for both the request and the reply header, and this 69 * array is only concerned with the reply we are assured that we have 70 * on extra page for the RPCRMDA header. 71 */ 72static int fast_reg_xdr(struct svcxprt_rdma *xprt, 73 struct xdr_buf *xdr, 74 struct svc_rdma_req_map *vec) 75{ 76 int sge_no; 77 u32 sge_bytes; 78 u32 page_bytes; 79 u32 page_off; 80 int page_no = 0; 81 u8 *frva; 82 struct svc_rdma_fastreg_mr *frmr; 83 84 frmr = svc_rdma_get_frmr(xprt); 85 if (IS_ERR(frmr)) 86 return -ENOMEM; 87 vec->frmr = frmr; 88 89 /* Skip the RPCRDMA header */ 90 sge_no = 1; 91 92 /* Map the head. */ 93 frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK); 94 vec->sge[sge_no].iov_base = xdr->head[0].iov_base; 95 vec->sge[sge_no].iov_len = xdr->head[0].iov_len; 96 vec->count = 2; 97 sge_no++; 98 99 /* Build the FRMR */ 100 frmr->kva = frva; 101 frmr->direction = DMA_TO_DEVICE; 102 frmr->access_flags = 0; 103 frmr->map_len = PAGE_SIZE; 104 frmr->page_list_len = 1; 105 frmr->page_list->page_list[page_no] = 106 ib_dma_map_single(xprt->sc_cm_id->device, 107 (void *)xdr->head[0].iov_base, 108 PAGE_SIZE, DMA_TO_DEVICE); 109 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 110 frmr->page_list->page_list[page_no])) 111 goto fatal_err; 112 atomic_inc(&xprt->sc_dma_used); 113 114 page_off = xdr->page_base; 115 page_bytes = xdr->page_len + page_off; 116 if (!page_bytes) 117 goto encode_tail; 118 119 /* Map the pages */ 120 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; 121 vec->sge[sge_no].iov_len = page_bytes; 122 sge_no++; 123 while (page_bytes) { 124 struct page *page; 125 126 page = xdr->pages[page_no++]; 127 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off)); 128 page_bytes -= sge_bytes; 129 130 frmr->page_list->page_list[page_no] = 131 ib_dma_map_single(xprt->sc_cm_id->device, 132 page_address(page), 133 PAGE_SIZE, DMA_TO_DEVICE); 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 135 frmr->page_list->page_list[page_no])) 136 goto fatal_err; 137 138 atomic_inc(&xprt->sc_dma_used); 139 page_off = 0; /* reset for next time through loop */ 140 frmr->map_len += PAGE_SIZE; 141 frmr->page_list_len++; 142 } 143 vec->count++; 144 145 encode_tail: 146 /* Map tail */ 147 if (0 == xdr->tail[0].iov_len) 148 goto done; 149 150 vec->count++; 151 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; 152 153 if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) == 154 ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) { 155 /* 156 * If head and tail use the same page, we don't need 157 * to map it again. 158 */ 159 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; 160 } else { 161 void *va; 162 163 /* Map another page for the tail */ 164 page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK; 165 va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK); 166 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; 167 168 frmr->page_list->page_list[page_no] = 169 ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE, 170 DMA_TO_DEVICE); 171 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 172 frmr->page_list->page_list[page_no])) 173 goto fatal_err; 174 atomic_inc(&xprt->sc_dma_used); 175 frmr->map_len += PAGE_SIZE; 176 frmr->page_list_len++; 177 } 178 179 done: 180 if (svc_rdma_fastreg(xprt, frmr)) 181 goto fatal_err; 182 183 return 0; 184 185 fatal_err: 186 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); 187 vec->frmr = NULL; 188 svc_rdma_put_frmr(xprt, frmr); 189 return -EIO; 190} 191 192static int map_xdr(struct svcxprt_rdma *xprt, 193 struct xdr_buf *xdr, 194 struct svc_rdma_req_map *vec) 195{ 196 int sge_no; 197 u32 sge_bytes; 198 u32 page_bytes; 199 u32 page_off; 200 int page_no; 201 202 BUG_ON(xdr->len != 203 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)); 204 205 if (xprt->sc_frmr_pg_list_len) 206 return fast_reg_xdr(xprt, xdr, vec); 207 208 /* Skip the first sge, this is for the RPCRDMA header */ 209 sge_no = 1; 210 211 /* Head SGE */ 212 vec->sge[sge_no].iov_base = xdr->head[0].iov_base; 213 vec->sge[sge_no].iov_len = xdr->head[0].iov_len; 214 sge_no++; 215 216 /* pages SGE */ 217 page_no = 0; 218 page_bytes = xdr->page_len; 219 page_off = xdr->page_base; 220 while (page_bytes) { 221 vec->sge[sge_no].iov_base = 222 page_address(xdr->pages[page_no]) + page_off; 223 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off)); 224 page_bytes -= sge_bytes; 225 vec->sge[sge_no].iov_len = sge_bytes; 226 227 sge_no++; 228 page_no++; 229 page_off = 0; /* reset for next time through loop */ 230 } 231 232 /* Tail SGE */ 233 if (xdr->tail[0].iov_len) { 234 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; 235 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; 236 sge_no++; 237 } 238 239 dprintk("svcrdma: map_xdr: sge_no %d page_no %d " 240 "page_base %u page_len %u head_len %zu tail_len %zu\n", 241 sge_no, page_no, xdr->page_base, xdr->page_len, 242 xdr->head[0].iov_len, xdr->tail[0].iov_len); 243 244 vec->count = sge_no; 245 return 0; 246} 247 248/* Assumptions: 249 * - We are using FRMR 250 * - or - 251 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE 252 */ 253static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, 254 u32 rmr, u64 to, 255 u32 xdr_off, int write_len, 256 struct svc_rdma_req_map *vec) 257{ 258 struct ib_send_wr write_wr; 259 struct ib_sge *sge; 260 int xdr_sge_no; 261 int sge_no; 262 int sge_bytes; 263 int sge_off; 264 int bc; 265 struct svc_rdma_op_ctxt *ctxt; 266 267 BUG_ON(vec->count > RPCSVC_MAXPAGES); 268 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " 269 "write_len=%d, vec->sge=%p, vec->count=%lu\n", 270 rmr, (unsigned long long)to, xdr_off, 271 write_len, vec->sge, vec->count); 272 273 ctxt = svc_rdma_get_context(xprt); 274 ctxt->direction = DMA_TO_DEVICE; 275 sge = ctxt->sge; 276 277 /* Find the SGE associated with xdr_off */ 278 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count; 279 xdr_sge_no++) { 280 if (vec->sge[xdr_sge_no].iov_len > bc) 281 break; 282 bc -= vec->sge[xdr_sge_no].iov_len; 283 } 284 285 sge_off = bc; 286 bc = write_len; 287 sge_no = 0; 288 289 /* Copy the remaining SGE */ 290 while (bc != 0) { 291 sge_bytes = min_t(size_t, 292 bc, vec->sge[xdr_sge_no].iov_len-sge_off); 293 sge[sge_no].length = sge_bytes; 294 if (!vec->frmr) { 295 sge[sge_no].addr = 296 ib_dma_map_single(xprt->sc_cm_id->device, 297 (void *) 298 vec->sge[xdr_sge_no].iov_base + sge_off, 299 sge_bytes, DMA_TO_DEVICE); 300 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 301 sge[sge_no].addr)) 302 goto err; 303 atomic_inc(&xprt->sc_dma_used); 304 sge[sge_no].lkey = xprt->sc_dma_lkey; 305 } else { 306 sge[sge_no].addr = (unsigned long) 307 vec->sge[xdr_sge_no].iov_base + sge_off; 308 sge[sge_no].lkey = vec->frmr->mr->lkey; 309 } 310 ctxt->count++; 311 ctxt->frmr = vec->frmr; 312 sge_off = 0; 313 sge_no++; 314 xdr_sge_no++; 315 BUG_ON(xdr_sge_no > vec->count); 316 bc -= sge_bytes; 317 } 318 319 /* Prepare WRITE WR */ 320 memset(&write_wr, 0, sizeof write_wr); 321 ctxt->wr_op = IB_WR_RDMA_WRITE; 322 write_wr.wr_id = (unsigned long)ctxt; 323 write_wr.sg_list = &sge[0]; 324 write_wr.num_sge = sge_no; 325 write_wr.opcode = IB_WR_RDMA_WRITE; 326 write_wr.send_flags = IB_SEND_SIGNALED; 327 write_wr.wr.rdma.rkey = rmr; 328 write_wr.wr.rdma.remote_addr = to; 329 330 /* Post It */ 331 atomic_inc(&rdma_stat_write); 332 if (svc_rdma_send(xprt, &write_wr)) 333 goto err; 334 return 0; 335 err: 336 svc_rdma_put_context(ctxt, 0); 337 /* Fatal error, close transport */ 338 return -EIO; 339} 340 341static int send_write_chunks(struct svcxprt_rdma *xprt, 342 struct rpcrdma_msg *rdma_argp, 343 struct rpcrdma_msg *rdma_resp, 344 struct svc_rqst *rqstp, 345 struct svc_rdma_req_map *vec) 346{ 347 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; 348 int write_len; 349 int max_write; 350 u32 xdr_off; 351 int chunk_off; 352 int chunk_no; 353 struct rpcrdma_write_array *arg_ary; 354 struct rpcrdma_write_array *res_ary; 355 int ret; 356 357 arg_ary = svc_rdma_get_write_array(rdma_argp); 358 if (!arg_ary) 359 return 0; 360 res_ary = (struct rpcrdma_write_array *) 361 &rdma_resp->rm_body.rm_chunks[1]; 362 363 if (vec->frmr) 364 max_write = vec->frmr->map_len; 365 else 366 max_write = xprt->sc_max_sge * PAGE_SIZE; 367 368 /* Write chunks start at the pagelist */ 369 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; 370 xfer_len && chunk_no < arg_ary->wc_nchunks; 371 chunk_no++) { 372 struct rpcrdma_segment *arg_ch; 373 u64 rs_offset; 374 375 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 376 write_len = min(xfer_len, arg_ch->rs_length); 377 378 /* Prepare the response chunk given the length actually 379 * written */ 380 rs_offset = get_unaligned(&(arg_ch->rs_offset)); 381 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 382 arg_ch->rs_handle, 383 rs_offset, 384 write_len); 385 chunk_off = 0; 386 while (write_len) { 387 int this_write; 388 this_write = min(write_len, max_write); 389 ret = send_write(xprt, rqstp, 390 arg_ch->rs_handle, 391 rs_offset + chunk_off, 392 xdr_off, 393 this_write, 394 vec); 395 if (ret) { 396 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 397 ret); 398 return -EIO; 399 } 400 chunk_off += this_write; 401 xdr_off += this_write; 402 xfer_len -= this_write; 403 write_len -= this_write; 404 } 405 } 406 /* Update the req with the number of chunks actually used */ 407 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no); 408 409 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; 410} 411 412static int send_reply_chunks(struct svcxprt_rdma *xprt, 413 struct rpcrdma_msg *rdma_argp, 414 struct rpcrdma_msg *rdma_resp, 415 struct svc_rqst *rqstp, 416 struct svc_rdma_req_map *vec) 417{ 418 u32 xfer_len = rqstp->rq_res.len; 419 int write_len; 420 int max_write; 421 u32 xdr_off; 422 int chunk_no; 423 int chunk_off; 424 struct rpcrdma_segment *ch; 425 struct rpcrdma_write_array *arg_ary; 426 struct rpcrdma_write_array *res_ary; 427 int ret; 428 429 arg_ary = svc_rdma_get_reply_array(rdma_argp); 430 if (!arg_ary) 431 return 0; 432 res_ary = (struct rpcrdma_write_array *) 433 &rdma_resp->rm_body.rm_chunks[2]; 434 435 if (vec->frmr) 436 max_write = vec->frmr->map_len; 437 else 438 max_write = xprt->sc_max_sge * PAGE_SIZE; 439 440 /* xdr offset starts at RPC message */ 441 for (xdr_off = 0, chunk_no = 0; 442 xfer_len && chunk_no < arg_ary->wc_nchunks; 443 chunk_no++) { 444 u64 rs_offset; 445 ch = &arg_ary->wc_array[chunk_no].wc_target; 446 write_len = min(xfer_len, ch->rs_length); 447 448 /* Prepare the reply chunk given the length actually 449 * written */ 450 rs_offset = get_unaligned(&(ch->rs_offset)); 451 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 452 ch->rs_handle, rs_offset, 453 write_len); 454 chunk_off = 0; 455 while (write_len) { 456 int this_write; 457 458 this_write = min(write_len, max_write); 459 ret = send_write(xprt, rqstp, 460 ch->rs_handle, 461 rs_offset + chunk_off, 462 xdr_off, 463 this_write, 464 vec); 465 if (ret) { 466 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 467 ret); 468 return -EIO; 469 } 470 chunk_off += this_write; 471 xdr_off += this_write; 472 xfer_len -= this_write; 473 write_len -= this_write; 474 } 475 } 476 /* Update the req with the number of chunks actually used */ 477 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no); 478 479 return rqstp->rq_res.len; 480} 481 482/* This function prepares the portion of the RPCRDMA message to be 483 * sent in the RDMA_SEND. This function is called after data sent via 484 * RDMA has already been transmitted. There are three cases: 485 * - The RPCRDMA header, RPC header, and payload are all sent in a 486 * single RDMA_SEND. This is the "inline" case. 487 * - The RPCRDMA header and some portion of the RPC header and data 488 * are sent via this RDMA_SEND and another portion of the data is 489 * sent via RDMA. 490 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC 491 * header and data are all transmitted via RDMA. 492 * In all three cases, this function prepares the RPCRDMA header in 493 * sge[0], the 'type' parameter indicates the type to place in the 494 * RPCRDMA header, and the 'byte_count' field indicates how much of 495 * the XDR to include in this RDMA_SEND. 496 */ 497static int send_reply(struct svcxprt_rdma *rdma, 498 struct svc_rqst *rqstp, 499 struct page *page, 500 struct rpcrdma_msg *rdma_resp, 501 struct svc_rdma_op_ctxt *ctxt, 502 struct svc_rdma_req_map *vec, 503 int byte_count) 504{ 505 struct ib_send_wr send_wr; 506 struct ib_send_wr inv_wr; 507 int sge_no; 508 int sge_bytes; 509 int page_no; 510 int ret; 511 512 /* Post a recv buffer to handle another request. */ 513 ret = svc_rdma_post_recv(rdma); 514 if (ret) { 515 printk(KERN_INFO 516 "svcrdma: could not post a receive buffer, err=%d." 517 "Closing transport %p.\n", ret, rdma); 518 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 519 svc_rdma_put_frmr(rdma, vec->frmr); 520 svc_rdma_put_context(ctxt, 0); 521 return -ENOTCONN; 522 } 523 524 /* Prepare the context */ 525 ctxt->pages[0] = page; 526 ctxt->count = 1; 527 ctxt->frmr = vec->frmr; 528 if (vec->frmr) 529 set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 530 else 531 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 532 533 /* Prepare the SGE for the RPCRDMA Header */ 534 ctxt->sge[0].lkey = rdma->sc_dma_lkey; 535 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); 536 ctxt->sge[0].addr = 537 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page), 538 ctxt->sge[0].length, DMA_TO_DEVICE); 539 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 540 goto err; 541 atomic_inc(&rdma->sc_dma_used); 542 543 ctxt->direction = DMA_TO_DEVICE; 544 545 /* Determine how many of our SGE are to be transmitted */ 546 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 547 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 548 byte_count -= sge_bytes; 549 if (!vec->frmr) { 550 ctxt->sge[sge_no].addr = 551 ib_dma_map_single(rdma->sc_cm_id->device, 552 vec->sge[sge_no].iov_base, 553 sge_bytes, DMA_TO_DEVICE); 554 if (ib_dma_mapping_error(rdma->sc_cm_id->device, 555 ctxt->sge[sge_no].addr)) 556 goto err; 557 atomic_inc(&rdma->sc_dma_used); 558 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey; 559 } else { 560 ctxt->sge[sge_no].addr = (unsigned long) 561 vec->sge[sge_no].iov_base; 562 ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey; 563 } 564 ctxt->sge[sge_no].length = sge_bytes; 565 } 566 BUG_ON(byte_count != 0); 567 568 /* Save all respages in the ctxt and remove them from the 569 * respages array. They are our pages until the I/O 570 * completes. 571 */ 572 for (page_no = 0; page_no < rqstp->rq_resused; page_no++) { 573 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; 574 ctxt->count++; 575 rqstp->rq_respages[page_no] = NULL; 576 /* 577 * If there are more pages than SGE, terminate SGE 578 * list so that svc_rdma_unmap_dma doesn't attempt to 579 * unmap garbage. 580 */ 581 if (page_no+1 >= sge_no) 582 ctxt->sge[page_no+1].length = 0; 583 } 584 BUG_ON(sge_no > rdma->sc_max_sge); 585 memset(&send_wr, 0, sizeof send_wr); 586 ctxt->wr_op = IB_WR_SEND; 587 send_wr.wr_id = (unsigned long)ctxt; 588 send_wr.sg_list = ctxt->sge; 589 send_wr.num_sge = sge_no; 590 send_wr.opcode = IB_WR_SEND; 591 send_wr.send_flags = IB_SEND_SIGNALED; 592 if (vec->frmr) { 593 /* Prepare INVALIDATE WR */ 594 memset(&inv_wr, 0, sizeof inv_wr); 595 inv_wr.opcode = IB_WR_LOCAL_INV; 596 inv_wr.send_flags = IB_SEND_SIGNALED; 597 inv_wr.ex.invalidate_rkey = 598 vec->frmr->mr->lkey; 599 send_wr.next = &inv_wr; 600 } 601 602 ret = svc_rdma_send(rdma, &send_wr); 603 if (ret) 604 goto err; 605 606 return 0; 607 608 err: 609 svc_rdma_unmap_dma(ctxt); 610 svc_rdma_put_frmr(rdma, vec->frmr); 611 svc_rdma_put_context(ctxt, 1); 612 return -EIO; 613} 614 615void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) 616{ 617} 618 619/* 620 * Return the start of an xdr buffer. 621 */ 622static void *xdr_start(struct xdr_buf *xdr) 623{ 624 return xdr->head[0].iov_base - 625 (xdr->len - 626 xdr->page_len - 627 xdr->tail[0].iov_len - 628 xdr->head[0].iov_len); 629} 630 631int svc_rdma_sendto(struct svc_rqst *rqstp) 632{ 633 struct svc_xprt *xprt = rqstp->rq_xprt; 634 struct svcxprt_rdma *rdma = 635 container_of(xprt, struct svcxprt_rdma, sc_xprt); 636 struct rpcrdma_msg *rdma_argp; 637 struct rpcrdma_msg *rdma_resp; 638 struct rpcrdma_write_array *reply_ary; 639 enum rpcrdma_proc reply_type; 640 int ret; 641 int inline_bytes; 642 struct page *res_page; 643 struct svc_rdma_op_ctxt *ctxt; 644 struct svc_rdma_req_map *vec; 645 646 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); 647 648 /* Get the RDMA request header. */ 649 rdma_argp = xdr_start(&rqstp->rq_arg); 650 651 /* Build an req vec for the XDR */ 652 ctxt = svc_rdma_get_context(rdma); 653 ctxt->direction = DMA_TO_DEVICE; 654 vec = svc_rdma_get_req_map(); 655 ret = map_xdr(rdma, &rqstp->rq_res, vec); 656 if (ret) 657 goto err0; 658 inline_bytes = rqstp->rq_res.len; 659 660 /* Create the RDMA response header */ 661 res_page = svc_rdma_get_page(); 662 rdma_resp = page_address(res_page); 663 reply_ary = svc_rdma_get_reply_array(rdma_argp); 664 if (reply_ary) 665 reply_type = RDMA_NOMSG; 666 else 667 reply_type = RDMA_MSG; 668 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp, 669 rdma_resp, reply_type); 670 671 /* Send any write-chunk data and build resp write-list */ 672 ret = send_write_chunks(rdma, rdma_argp, rdma_resp, 673 rqstp, vec); 674 if (ret < 0) { 675 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n", 676 ret); 677 goto err1; 678 } 679 inline_bytes -= ret; 680 681 /* Send any reply-list data and update resp reply-list */ 682 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp, 683 rqstp, vec); 684 if (ret < 0) { 685 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n", 686 ret); 687 goto err1; 688 } 689 inline_bytes -= ret; 690 691 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, 692 inline_bytes); 693 svc_rdma_put_req_map(vec); 694 dprintk("svcrdma: send_reply returns %d\n", ret); 695 return ret; 696 697 err1: 698 put_page(res_page); 699 err0: 700 svc_rdma_put_req_map(vec); 701 svc_rdma_put_context(ctxt, 0); 702 return ret; 703} 704