1/* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33#include <linux/kernel.h> 34#include <linux/slab.h> 35#include <linux/pci.h> 36#include <linux/dma-mapping.h> 37#include <rdma/rdma_cm.h> 38 39#include "rds.h" 40#include "iw.h" 41 42static struct kmem_cache *rds_iw_incoming_slab; 43static struct kmem_cache *rds_iw_frag_slab; 44static atomic_t rds_iw_allocation = ATOMIC_INIT(0); 45 46static void rds_iw_frag_drop_page(struct rds_page_frag *frag) 47{ 48 rdsdebug("frag %p page %p\n", frag, frag->f_page); 49 __free_page(frag->f_page); 50 frag->f_page = NULL; 51} 52 53static void rds_iw_frag_free(struct rds_page_frag *frag) 54{ 55 rdsdebug("frag %p page %p\n", frag, frag->f_page); 56 BUG_ON(frag->f_page != NULL); 57 kmem_cache_free(rds_iw_frag_slab, frag); 58} 59 60/* 61 * We map a page at a time. Its fragments are posted in order. This 62 * is called in fragment order as the fragments get send completion events. 63 * Only the last frag in the page performs the unmapping. 64 * 65 * It's OK for ring cleanup to call this in whatever order it likes because 66 * DMA is not in flight and so we can unmap while other ring entries still 67 * hold page references in their frags. 68 */ 69static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic, 70 struct rds_iw_recv_work *recv) 71{ 72 struct rds_page_frag *frag = recv->r_frag; 73 74 rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page); 75 if (frag->f_mapped) 76 ib_dma_unmap_page(ic->i_cm_id->device, 77 frag->f_mapped, 78 RDS_FRAG_SIZE, DMA_FROM_DEVICE); 79 frag->f_mapped = 0; 80} 81 82void rds_iw_recv_init_ring(struct rds_iw_connection *ic) 83{ 84 struct rds_iw_recv_work *recv; 85 u32 i; 86 87 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { 88 struct ib_sge *sge; 89 90 recv->r_iwinc = NULL; 91 recv->r_frag = NULL; 92 93 recv->r_wr.next = NULL; 94 recv->r_wr.wr_id = i; 95 recv->r_wr.sg_list = recv->r_sge; 96 recv->r_wr.num_sge = RDS_IW_RECV_SGE; 97 98 sge = rds_iw_data_sge(ic, recv->r_sge); 99 sge->addr = 0; 100 sge->length = RDS_FRAG_SIZE; 101 sge->lkey = 0; 102 103 sge = rds_iw_header_sge(ic, recv->r_sge); 104 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); 105 sge->length = sizeof(struct rds_header); 106 sge->lkey = 0; 107 } 108} 109 110static void rds_iw_recv_clear_one(struct rds_iw_connection *ic, 111 struct rds_iw_recv_work *recv) 112{ 113 if (recv->r_iwinc) { 114 rds_inc_put(&recv->r_iwinc->ii_inc); 115 recv->r_iwinc = NULL; 116 } 117 if (recv->r_frag) { 118 rds_iw_recv_unmap_page(ic, recv); 119 if (recv->r_frag->f_page) 120 rds_iw_frag_drop_page(recv->r_frag); 121 rds_iw_frag_free(recv->r_frag); 122 recv->r_frag = NULL; 123 } 124} 125 126void rds_iw_recv_clear_ring(struct rds_iw_connection *ic) 127{ 128 u32 i; 129 130 for (i = 0; i < ic->i_recv_ring.w_nr; i++) 131 rds_iw_recv_clear_one(ic, &ic->i_recvs[i]); 132 133 if (ic->i_frag.f_page) 134 rds_iw_frag_drop_page(&ic->i_frag); 135} 136 137static int rds_iw_recv_refill_one(struct rds_connection *conn, 138 struct rds_iw_recv_work *recv, 139 gfp_t kptr_gfp, gfp_t page_gfp) 140{ 141 struct rds_iw_connection *ic = conn->c_transport_data; 142 dma_addr_t dma_addr; 143 struct ib_sge *sge; 144 int ret = -ENOMEM; 145 146 if (recv->r_iwinc == NULL) { 147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { 148 rds_iw_stats_inc(s_iw_rx_alloc_limit); 149 goto out; 150 } 151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, 152 kptr_gfp); 153 if (recv->r_iwinc == NULL) { 154 atomic_dec(&rds_iw_allocation); 155 goto out; 156 } 157 INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); 158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); 159 } 160 161 if (recv->r_frag == NULL) { 162 recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp); 163 if (recv->r_frag == NULL) 164 goto out; 165 INIT_LIST_HEAD(&recv->r_frag->f_item); 166 recv->r_frag->f_page = NULL; 167 } 168 169 if (ic->i_frag.f_page == NULL) { 170 ic->i_frag.f_page = alloc_page(page_gfp); 171 if (ic->i_frag.f_page == NULL) 172 goto out; 173 ic->i_frag.f_offset = 0; 174 } 175 176 dma_addr = ib_dma_map_page(ic->i_cm_id->device, 177 ic->i_frag.f_page, 178 ic->i_frag.f_offset, 179 RDS_FRAG_SIZE, 180 DMA_FROM_DEVICE); 181 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr)) 182 goto out; 183 184 /* 185 * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap() 186 * must be called on this recv. This happens as completions hit 187 * in order or on connection shutdown. 188 */ 189 recv->r_frag->f_page = ic->i_frag.f_page; 190 recv->r_frag->f_offset = ic->i_frag.f_offset; 191 recv->r_frag->f_mapped = dma_addr; 192 193 sge = rds_iw_data_sge(ic, recv->r_sge); 194 sge->addr = dma_addr; 195 sge->length = RDS_FRAG_SIZE; 196 197 sge = rds_iw_header_sge(ic, recv->r_sge); 198 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); 199 sge->length = sizeof(struct rds_header); 200 201 get_page(recv->r_frag->f_page); 202 203 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) { 204 ic->i_frag.f_offset += RDS_FRAG_SIZE; 205 } else { 206 put_page(ic->i_frag.f_page); 207 ic->i_frag.f_page = NULL; 208 ic->i_frag.f_offset = 0; 209 } 210 211 ret = 0; 212out: 213 return ret; 214} 215 216/* 217 * This tries to allocate and post unused work requests after making sure that 218 * they have all the allocations they need to queue received fragments into 219 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc 220 * pairs don't go unmatched. 221 * 222 * -1 is returned if posting fails due to temporary resource exhaustion. 223 */ 224int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 225 gfp_t page_gfp, int prefill) 226{ 227 struct rds_iw_connection *ic = conn->c_transport_data; 228 struct rds_iw_recv_work *recv; 229 struct ib_recv_wr *failed_wr; 230 unsigned int posted = 0; 231 int ret = 0; 232 u32 pos; 233 234 while ((prefill || rds_conn_up(conn)) && 235 rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 236 if (pos >= ic->i_recv_ring.w_nr) { 237 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 238 pos); 239 ret = -EINVAL; 240 break; 241 } 242 243 recv = &ic->i_recvs[pos]; 244 ret = rds_iw_recv_refill_one(conn, recv, kptr_gfp, page_gfp); 245 if (ret) { 246 ret = -1; 247 break; 248 } 249 250 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 251 rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv, 252 recv->r_iwinc, recv->r_frag->f_page, 253 (long) recv->r_frag->f_mapped, ret); 254 if (ret) { 255 rds_iw_conn_error(conn, "recv post on " 256 "%pI4 returned %d, disconnecting and " 257 "reconnecting\n", &conn->c_faddr, 258 ret); 259 ret = -1; 260 break; 261 } 262 263 posted++; 264 } 265 266 /* We're doing flow control - update the window. */ 267 if (ic->i_flowctl && posted) 268 rds_iw_advertise_credits(conn, posted); 269 270 if (ret) 271 rds_iw_ring_unalloc(&ic->i_recv_ring, 1); 272 return ret; 273} 274 275void rds_iw_inc_purge(struct rds_incoming *inc) 276{ 277 struct rds_iw_incoming *iwinc; 278 struct rds_page_frag *frag; 279 struct rds_page_frag *pos; 280 281 iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); 282 rdsdebug("purging iwinc %p inc %p\n", iwinc, inc); 283 284 list_for_each_entry_safe(frag, pos, &iwinc->ii_frags, f_item) { 285 list_del_init(&frag->f_item); 286 rds_iw_frag_drop_page(frag); 287 rds_iw_frag_free(frag); 288 } 289} 290 291void rds_iw_inc_free(struct rds_incoming *inc) 292{ 293 struct rds_iw_incoming *iwinc; 294 295 iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); 296 297 rds_iw_inc_purge(inc); 298 rdsdebug("freeing iwinc %p inc %p\n", iwinc, inc); 299 BUG_ON(!list_empty(&iwinc->ii_frags)); 300 kmem_cache_free(rds_iw_incoming_slab, iwinc); 301 atomic_dec(&rds_iw_allocation); 302 BUG_ON(atomic_read(&rds_iw_allocation) < 0); 303} 304 305int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 306 size_t size) 307{ 308 struct rds_iw_incoming *iwinc; 309 struct rds_page_frag *frag; 310 struct iovec *iov = first_iov; 311 unsigned long to_copy; 312 unsigned long frag_off = 0; 313 unsigned long iov_off = 0; 314 int copied = 0; 315 int ret; 316 u32 len; 317 318 iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); 319 frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); 320 len = be32_to_cpu(inc->i_hdr.h_len); 321 322 while (copied < size && copied < len) { 323 if (frag_off == RDS_FRAG_SIZE) { 324 frag = list_entry(frag->f_item.next, 325 struct rds_page_frag, f_item); 326 frag_off = 0; 327 } 328 while (iov_off == iov->iov_len) { 329 iov_off = 0; 330 iov++; 331 } 332 333 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); 334 to_copy = min_t(size_t, to_copy, size - copied); 335 to_copy = min_t(unsigned long, to_copy, len - copied); 336 337 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " 338 "[%p, %lu] + %lu\n", 339 to_copy, iov->iov_base, iov->iov_len, iov_off, 340 frag->f_page, frag->f_offset, frag_off); 341 342 ret = rds_page_copy_to_user(frag->f_page, 343 frag->f_offset + frag_off, 344 iov->iov_base + iov_off, 345 to_copy); 346 if (ret) { 347 copied = ret; 348 break; 349 } 350 351 iov_off += to_copy; 352 frag_off += to_copy; 353 copied += to_copy; 354 } 355 356 return copied; 357} 358 359/* ic starts out kzalloc()ed */ 360void rds_iw_recv_init_ack(struct rds_iw_connection *ic) 361{ 362 struct ib_send_wr *wr = &ic->i_ack_wr; 363 struct ib_sge *sge = &ic->i_ack_sge; 364 365 sge->addr = ic->i_ack_dma; 366 sge->length = sizeof(struct rds_header); 367 sge->lkey = rds_iw_local_dma_lkey(ic); 368 369 wr->sg_list = sge; 370 wr->num_sge = 1; 371 wr->opcode = IB_WR_SEND; 372 wr->wr_id = RDS_IW_ACK_WR_ID; 373 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; 374} 375 376/* 377 * You'd think that with reliable IB connections you wouldn't need to ack 378 * messages that have been received. The problem is that IB hardware generates 379 * an ack message before it has DMAed the message into memory. This creates a 380 * potential message loss if the HCA is disabled for any reason between when it 381 * sends the ack and before the message is DMAed and processed. This is only a 382 * potential issue if another HCA is available for fail-over. 383 * 384 * When the remote host receives our ack they'll free the sent message from 385 * their send queue. To decrease the latency of this we always send an ack 386 * immediately after we've received messages. 387 * 388 * For simplicity, we only have one ack in flight at a time. This puts 389 * pressure on senders to have deep enough send queues to absorb the latency of 390 * a single ack frame being in flight. This might not be good enough. 391 * 392 * This is implemented by have a long-lived send_wr and sge which point to a 393 * statically allocated ack frame. This ack wr does not fall under the ring 394 * accounting that the tx and rx wrs do. The QP attribute specifically makes 395 * room for it beyond the ring size. Send completion notices its special 396 * wr_id and avoids working with the ring in that case. 397 */ 398#ifndef KERNEL_HAS_ATOMIC64 399static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, 400 int ack_required) 401{ 402 unsigned long flags; 403 404 spin_lock_irqsave(&ic->i_ack_lock, flags); 405 ic->i_ack_next = seq; 406 if (ack_required) 407 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 408 spin_unlock_irqrestore(&ic->i_ack_lock, flags); 409} 410 411static u64 rds_iw_get_ack(struct rds_iw_connection *ic) 412{ 413 unsigned long flags; 414 u64 seq; 415 416 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 417 418 spin_lock_irqsave(&ic->i_ack_lock, flags); 419 seq = ic->i_ack_next; 420 spin_unlock_irqrestore(&ic->i_ack_lock, flags); 421 422 return seq; 423} 424#else 425static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, 426 int ack_required) 427{ 428 atomic64_set(&ic->i_ack_next, seq); 429 if (ack_required) { 430 smp_mb__before_clear_bit(); 431 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 432 } 433} 434 435static u64 rds_iw_get_ack(struct rds_iw_connection *ic) 436{ 437 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 438 smp_mb__after_clear_bit(); 439 440 return atomic64_read(&ic->i_ack_next); 441} 442#endif 443 444 445static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits) 446{ 447 struct rds_header *hdr = ic->i_ack; 448 struct ib_send_wr *failed_wr; 449 u64 seq; 450 int ret; 451 452 seq = rds_iw_get_ack(ic); 453 454 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); 455 rds_message_populate_header(hdr, 0, 0, 0); 456 hdr->h_ack = cpu_to_be64(seq); 457 hdr->h_credit = adv_credits; 458 rds_message_make_checksum(hdr); 459 ic->i_ack_queued = jiffies; 460 461 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); 462 if (unlikely(ret)) { 463 /* Failed to send. Release the WR, and 464 * force another ACK. 465 */ 466 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 467 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 468 469 rds_iw_stats_inc(s_iw_ack_send_failure); 470 471 rds_iw_conn_error(ic->conn, "sending ack failed\n"); 472 } else 473 rds_iw_stats_inc(s_iw_ack_sent); 474} 475 476/* 477 * There are 3 ways of getting acknowledgements to the peer: 478 * 1. We call rds_iw_attempt_ack from the recv completion handler 479 * to send an ACK-only frame. 480 * However, there can be only one such frame in the send queue 481 * at any time, so we may have to postpone it. 482 * 2. When another (data) packet is transmitted while there's 483 * an ACK in the queue, we piggyback the ACK sequence number 484 * on the data packet. 485 * 3. If the ACK WR is done sending, we get called from the 486 * send queue completion handler, and check whether there's 487 * another ACK pending (postponed because the WR was on the 488 * queue). If so, we transmit it. 489 * 490 * We maintain 2 variables: 491 * - i_ack_flags, which keeps track of whether the ACK WR 492 * is currently in the send queue or not (IB_ACK_IN_FLIGHT) 493 * - i_ack_next, which is the last sequence number we received 494 * 495 * Potentially, send queue and receive queue handlers can run concurrently. 496 * It would be nice to not have to use a spinlock to synchronize things, 497 * but the one problem that rules this out is that 64bit updates are 498 * not atomic on all platforms. Things would be a lot simpler if 499 * we had atomic64 or maybe cmpxchg64 everywhere. 500 * 501 * Reconnecting complicates this picture just slightly. When we 502 * reconnect, we may be seeing duplicate packets. The peer 503 * is retransmitting them, because it hasn't seen an ACK for 504 * them. It is important that we ACK these. 505 * 506 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with 507 * this flag set *MUST* be acknowledged immediately. 508 */ 509 510/* 511 * When we get here, we're called from the recv queue handler. 512 * Check whether we ought to transmit an ACK. 513 */ 514void rds_iw_attempt_ack(struct rds_iw_connection *ic) 515{ 516 unsigned int adv_credits; 517 518 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) 519 return; 520 521 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { 522 rds_iw_stats_inc(s_iw_ack_send_delayed); 523 return; 524 } 525 526 /* Can we get a send credit? */ 527 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { 528 rds_iw_stats_inc(s_iw_tx_throttle); 529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 530 return; 531 } 532 533 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 534 rds_iw_send_ack(ic, adv_credits); 535} 536 537/* 538 * We get here from the send completion handler, when the 539 * adapter tells us the ACK frame was sent. 540 */ 541void rds_iw_ack_send_complete(struct rds_iw_connection *ic) 542{ 543 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 544 rds_iw_attempt_ack(ic); 545} 546 547/* 548 * This is called by the regular xmit code when it wants to piggyback 549 * an ACK on an outgoing frame. 550 */ 551u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic) 552{ 553 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) 554 rds_iw_stats_inc(s_iw_ack_send_piggybacked); 555 return rds_iw_get_ack(ic); 556} 557 558/* 559 * It's kind of lame that we're copying from the posted receive pages into 560 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 561 * them. But receiving new congestion bitmaps should be a *rare* event, so 562 * hopefully we won't need to invest that complexity in making it more 563 * efficient. By copying we can share a simpler core with TCP which has to 564 * copy. 565 */ 566static void rds_iw_cong_recv(struct rds_connection *conn, 567 struct rds_iw_incoming *iwinc) 568{ 569 struct rds_cong_map *map; 570 unsigned int map_off; 571 unsigned int map_page; 572 struct rds_page_frag *frag; 573 unsigned long frag_off; 574 unsigned long to_copy; 575 unsigned long copied; 576 uint64_t uncongested = 0; 577 void *addr; 578 579 /* catch completely corrupt packets */ 580 if (be32_to_cpu(iwinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) 581 return; 582 583 map = conn->c_fcong; 584 map_page = 0; 585 map_off = 0; 586 587 frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); 588 frag_off = 0; 589 590 copied = 0; 591 592 while (copied < RDS_CONG_MAP_BYTES) { 593 uint64_t *src, *dst; 594 unsigned int k; 595 596 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 597 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 598 599 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); 600 601 src = addr + frag_off; 602 dst = (void *)map->m_page_addrs[map_page] + map_off; 603 for (k = 0; k < to_copy; k += 8) { 604 /* Record ports that became uncongested, ie 605 * bits that changed from 0 to 1. */ 606 uncongested |= ~(*src) & *dst; 607 *dst++ = *src++; 608 } 609 kunmap_atomic(addr, KM_SOFTIRQ0); 610 611 copied += to_copy; 612 613 map_off += to_copy; 614 if (map_off == PAGE_SIZE) { 615 map_off = 0; 616 map_page++; 617 } 618 619 frag_off += to_copy; 620 if (frag_off == RDS_FRAG_SIZE) { 621 frag = list_entry(frag->f_item.next, 622 struct rds_page_frag, f_item); 623 frag_off = 0; 624 } 625 } 626 627 /* the congestion map is in little endian order */ 628 uncongested = le64_to_cpu(uncongested); 629 630 rds_cong_map_updated(map, uncongested); 631} 632 633/* 634 * Rings are posted with all the allocations they'll need to queue the 635 * incoming message to the receiving socket so this can't fail. 636 * All fragments start with a header, so we can make sure we're not receiving 637 * garbage, and we can tell a small 8 byte fragment from an ACK frame. 638 */ 639struct rds_iw_ack_state { 640 u64 ack_next; 641 u64 ack_recv; 642 unsigned int ack_required:1; 643 unsigned int ack_next_valid:1; 644 unsigned int ack_recv_valid:1; 645}; 646 647static void rds_iw_process_recv(struct rds_connection *conn, 648 struct rds_iw_recv_work *recv, u32 byte_len, 649 struct rds_iw_ack_state *state) 650{ 651 struct rds_iw_connection *ic = conn->c_transport_data; 652 struct rds_iw_incoming *iwinc = ic->i_iwinc; 653 struct rds_header *ihdr, *hdr; 654 655 656 rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv, 657 byte_len); 658 659 if (byte_len < sizeof(struct rds_header)) { 660 rds_iw_conn_error(conn, "incoming message " 661 "from %pI4 didn't inclue a " 662 "header, disconnecting and " 663 "reconnecting\n", 664 &conn->c_faddr); 665 return; 666 } 667 byte_len -= sizeof(struct rds_header); 668 669 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; 670 671 /* Validate the checksum. */ 672 if (!rds_message_verify_checksum(ihdr)) { 673 rds_iw_conn_error(conn, "incoming message " 674 "from %pI4 has corrupted header - " 675 "forcing a reconnect\n", 676 &conn->c_faddr); 677 rds_stats_inc(s_recv_drop_bad_checksum); 678 return; 679 } 680 681 /* Process the ACK sequence which comes with every packet */ 682 state->ack_recv = be64_to_cpu(ihdr->h_ack); 683 state->ack_recv_valid = 1; 684 685 /* Process the credits update if there was one */ 686 if (ihdr->h_credit) 687 rds_iw_send_add_credits(conn, ihdr->h_credit); 688 689 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) { 690 /* This is an ACK-only packet. The fact that it gets 691 * special treatment here is that historically, ACKs 692 * were rather special beasts. 693 */ 694 rds_iw_stats_inc(s_iw_ack_received); 695 696 rds_iw_frag_drop_page(recv->r_frag); 697 return; 698 } 699 700 /* 701 * If we don't already have an inc on the connection then this 702 * fragment has a header and starts a message.. copy its header 703 * into the inc and save the inc so we can hang upcoming fragments 704 * off its list. 705 */ 706 if (iwinc == NULL) { 707 iwinc = recv->r_iwinc; 708 recv->r_iwinc = NULL; 709 ic->i_iwinc = iwinc; 710 711 hdr = &iwinc->ii_inc.i_hdr; 712 memcpy(hdr, ihdr, sizeof(*hdr)); 713 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); 714 715 rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc, 716 ic->i_recv_data_rem, hdr->h_flags); 717 } else { 718 hdr = &iwinc->ii_inc.i_hdr; 719 /* We can't just use memcmp here; fragments of a 720 * single message may carry different ACKs */ 721 if (hdr->h_sequence != ihdr->h_sequence || 722 hdr->h_len != ihdr->h_len || 723 hdr->h_sport != ihdr->h_sport || 724 hdr->h_dport != ihdr->h_dport) { 725 rds_iw_conn_error(conn, 726 "fragment header mismatch; forcing reconnect\n"); 727 return; 728 } 729 } 730 731 list_add_tail(&recv->r_frag->f_item, &iwinc->ii_frags); 732 recv->r_frag = NULL; 733 734 if (ic->i_recv_data_rem > RDS_FRAG_SIZE) 735 ic->i_recv_data_rem -= RDS_FRAG_SIZE; 736 else { 737 ic->i_recv_data_rem = 0; 738 ic->i_iwinc = NULL; 739 740 if (iwinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) 741 rds_iw_cong_recv(conn, iwinc); 742 else { 743 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, 744 &iwinc->ii_inc, GFP_ATOMIC, 745 KM_SOFTIRQ0); 746 state->ack_next = be64_to_cpu(hdr->h_sequence); 747 state->ack_next_valid = 1; 748 } 749 750 /* Evaluate the ACK_REQUIRED flag *after* we received 751 * the complete frame, and after bumping the next_rx 752 * sequence. */ 753 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { 754 rds_stats_inc(s_recv_ack_required); 755 state->ack_required = 1; 756 } 757 758 rds_inc_put(&iwinc->ii_inc); 759 } 760} 761 762/* 763 * Plucking the oldest entry from the ring can be done concurrently with 764 * the thread refilling the ring. Each ring operation is protected by 765 * spinlocks and the transient state of refilling doesn't change the 766 * recording of which entry is oldest. 767 * 768 * This relies on IB only calling one cq comp_handler for each cq so that 769 * there will only be one caller of rds_recv_incoming() per RDS connection. 770 */ 771void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) 772{ 773 struct rds_connection *conn = context; 774 struct rds_iw_connection *ic = conn->c_transport_data; 775 776 rdsdebug("conn %p cq %p\n", conn, cq); 777 778 rds_iw_stats_inc(s_iw_rx_cq_call); 779 780 tasklet_schedule(&ic->i_recv_tasklet); 781} 782 783static inline void rds_poll_cq(struct rds_iw_connection *ic, 784 struct rds_iw_ack_state *state) 785{ 786 struct rds_connection *conn = ic->conn; 787 struct ib_wc wc; 788 struct rds_iw_recv_work *recv; 789 790 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { 791 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 792 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 793 be32_to_cpu(wc.ex.imm_data)); 794 rds_iw_stats_inc(s_iw_rx_cq_event); 795 796 recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)]; 797 798 rds_iw_recv_unmap_page(ic, recv); 799 800 /* 801 * Also process recvs in connecting state because it is possible 802 * to get a recv completion _before_ the rdmacm ESTABLISHED 803 * event is processed. 804 */ 805 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 806 /* We expect errors as the qp is drained during shutdown */ 807 if (wc.status == IB_WC_SUCCESS) { 808 rds_iw_process_recv(conn, recv, wc.byte_len, state); 809 } else { 810 rds_iw_conn_error(conn, "recv completion on " 811 "%pI4 had status %u, disconnecting and " 812 "reconnecting\n", &conn->c_faddr, 813 wc.status); 814 } 815 } 816 817 rds_iw_ring_free(&ic->i_recv_ring, 1); 818 } 819} 820 821void rds_iw_recv_tasklet_fn(unsigned long data) 822{ 823 struct rds_iw_connection *ic = (struct rds_iw_connection *) data; 824 struct rds_connection *conn = ic->conn; 825 struct rds_iw_ack_state state = { 0, }; 826 827 rds_poll_cq(ic, &state); 828 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 829 rds_poll_cq(ic, &state); 830 831 if (state.ack_next_valid) 832 rds_iw_set_ack(ic, state.ack_next, state.ack_required); 833 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { 834 rds_send_drop_acked(conn, state.ack_recv, NULL); 835 ic->i_ack_recv = state.ack_recv; 836 } 837 if (rds_conn_up(conn)) 838 rds_iw_attempt_ack(ic); 839 840 /* If we ever end up with a really empty receive ring, we're 841 * in deep trouble, as the sender will definitely see RNR 842 * timeouts. */ 843 if (rds_iw_ring_empty(&ic->i_recv_ring)) 844 rds_iw_stats_inc(s_iw_rx_ring_empty); 845 846 /* 847 * If the ring is running low, then schedule the thread to refill. 848 */ 849 if (rds_iw_ring_low(&ic->i_recv_ring)) 850 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 851} 852 853int rds_iw_recv(struct rds_connection *conn) 854{ 855 struct rds_iw_connection *ic = conn->c_transport_data; 856 int ret = 0; 857 858 rdsdebug("conn %p\n", conn); 859 860 /* 861 * If we get a temporary posting failure in this context then 862 * we're really low and we want the caller to back off for a bit. 863 */ 864 mutex_lock(&ic->i_recv_mutex); 865 if (rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0)) 866 ret = -ENOMEM; 867 else 868 rds_iw_stats_inc(s_iw_rx_refill_from_thread); 869 mutex_unlock(&ic->i_recv_mutex); 870 871 if (rds_conn_up(conn)) 872 rds_iw_attempt_ack(ic); 873 874 return ret; 875} 876 877int __init rds_iw_recv_init(void) 878{ 879 struct sysinfo si; 880 int ret = -ENOMEM; 881 882 /* Default to 30% of all available RAM for recv memory */ 883 si_meminfo(&si); 884 rds_iw_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; 885 886 rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming", 887 sizeof(struct rds_iw_incoming), 888 0, 0, NULL); 889 if (rds_iw_incoming_slab == NULL) 890 goto out; 891 892 rds_iw_frag_slab = kmem_cache_create("rds_iw_frag", 893 sizeof(struct rds_page_frag), 894 0, 0, NULL); 895 if (rds_iw_frag_slab == NULL) 896 kmem_cache_destroy(rds_iw_incoming_slab); 897 else 898 ret = 0; 899out: 900 return ret; 901} 902 903void rds_iw_recv_exit(void) 904{ 905 kmem_cache_destroy(rds_iw_incoming_slab); 906 kmem_cache_destroy(rds_iw_frag_slab); 907} 908