1/* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33#include <linux/kernel.h> 34#include <linux/slab.h> 35#include <net/sock.h> 36#include <linux/in.h> 37 38#include "rds.h" 39#include "rdma.h" 40 41void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 42 __be32 saddr) 43{ 44 atomic_set(&inc->i_refcount, 1); 45 INIT_LIST_HEAD(&inc->i_item); 46 inc->i_conn = conn; 47 inc->i_saddr = saddr; 48 inc->i_rdma_cookie = 0; 49} 50EXPORT_SYMBOL_GPL(rds_inc_init); 51 52void rds_inc_addref(struct rds_incoming *inc) 53{ 54 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); 55 atomic_inc(&inc->i_refcount); 56} 57EXPORT_SYMBOL_GPL(rds_inc_addref); 58 59void rds_inc_put(struct rds_incoming *inc) 60{ 61 rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); 62 if (atomic_dec_and_test(&inc->i_refcount)) { 63 BUG_ON(!list_empty(&inc->i_item)); 64 65 inc->i_conn->c_trans->inc_free(inc); 66 } 67} 68EXPORT_SYMBOL_GPL(rds_inc_put); 69 70static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, 71 struct rds_cong_map *map, 72 int delta, __be16 port) 73{ 74 int now_congested; 75 76 if (delta == 0) 77 return; 78 79 rs->rs_rcv_bytes += delta; 80 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); 81 82 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " 83 "now_cong %d delta %d\n", 84 rs, &rs->rs_bound_addr, 85 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes, 86 rds_sk_rcvbuf(rs), now_congested, delta); 87 88 /* wasn't -> am congested */ 89 if (!rs->rs_congested && now_congested) { 90 rs->rs_congested = 1; 91 rds_cong_set_bit(map, port); 92 rds_cong_queue_updates(map); 93 } 94 /* was -> aren't congested */ 95 /* Require more free space before reporting uncongested to prevent 96 bouncing cong/uncong state too often */ 97 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) { 98 rs->rs_congested = 0; 99 rds_cong_clear_bit(map, port); 100 rds_cong_queue_updates(map); 101 } 102 103 /* do nothing if no change in cong state */ 104} 105 106/* 107 * Process all extension headers that come with this message. 108 */ 109static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs) 110{ 111 struct rds_header *hdr = &inc->i_hdr; 112 unsigned int pos = 0, type, len; 113 union { 114 struct rds_ext_header_version version; 115 struct rds_ext_header_rdma rdma; 116 struct rds_ext_header_rdma_dest rdma_dest; 117 } buffer; 118 119 while (1) { 120 len = sizeof(buffer); 121 type = rds_message_next_extension(hdr, &pos, &buffer, &len); 122 if (type == RDS_EXTHDR_NONE) 123 break; 124 /* Process extension header here */ 125 switch (type) { 126 case RDS_EXTHDR_RDMA: 127 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0); 128 break; 129 130 case RDS_EXTHDR_RDMA_DEST: 131 /* We ignore the size for now. We could stash it 132 * somewhere and use it for error checking. */ 133 inc->i_rdma_cookie = rds_rdma_make_cookie( 134 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey), 135 be32_to_cpu(buffer.rdma_dest.h_rdma_offset)); 136 137 break; 138 } 139 } 140} 141 142/* 143 * The transport must make sure that this is serialized against other 144 * rx and conn reset on this specific conn. 145 * 146 * We currently assert that only one fragmented message will be sent 147 * down a connection at a time. This lets us reassemble in the conn 148 * instead of per-flow which means that we don't have to go digging through 149 * flows to tear down partial reassembly progress on conn failure and 150 * we save flow lookup and locking for each frag arrival. It does mean 151 * that small messages will wait behind large ones. Fragmenting at all 152 * is only to reduce the memory consumption of pre-posted buffers. 153 * 154 * The caller passes in saddr and daddr instead of us getting it from the 155 * conn. This lets loopback, who only has one conn for both directions, 156 * tell us which roles the addrs in the conn are playing for this message. 157 */ 158void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 159 struct rds_incoming *inc, gfp_t gfp, enum km_type km) 160{ 161 struct rds_sock *rs = NULL; 162 struct sock *sk; 163 unsigned long flags; 164 165 inc->i_conn = conn; 166 inc->i_rx_jiffies = jiffies; 167 168 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u " 169 "flags 0x%x rx_jiffies %lu\n", conn, 170 (unsigned long long)conn->c_next_rx_seq, 171 inc, 172 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), 173 be32_to_cpu(inc->i_hdr.h_len), 174 be16_to_cpu(inc->i_hdr.h_sport), 175 be16_to_cpu(inc->i_hdr.h_dport), 176 inc->i_hdr.h_flags, 177 inc->i_rx_jiffies); 178 179 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq && 180 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { 181 rds_stats_inc(s_recv_drop_old_seq); 182 goto out; 183 } 184 conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; 185 186 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { 187 rds_stats_inc(s_recv_ping); 188 rds_send_pong(conn, inc->i_hdr.h_sport); 189 goto out; 190 } 191 192 rs = rds_find_bound(daddr, inc->i_hdr.h_dport); 193 if (rs == NULL) { 194 rds_stats_inc(s_recv_drop_no_sock); 195 goto out; 196 } 197 198 /* Process extension headers */ 199 rds_recv_incoming_exthdrs(inc, rs); 200 201 /* We can be racing with rds_release() which marks the socket dead. */ 202 sk = rds_rs_to_sk(rs); 203 204 /* serialize with rds_release -> sock_orphan */ 205 write_lock_irqsave(&rs->rs_recv_lock, flags); 206 if (!sock_flag(sk, SOCK_DEAD)) { 207 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs); 208 rds_stats_inc(s_recv_queued); 209 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 210 be32_to_cpu(inc->i_hdr.h_len), 211 inc->i_hdr.h_dport); 212 rds_inc_addref(inc); 213 list_add_tail(&inc->i_item, &rs->rs_recv_queue); 214 __rds_wake_sk_sleep(sk); 215 } else { 216 rds_stats_inc(s_recv_drop_dead_sock); 217 } 218 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 219 220out: 221 if (rs) 222 rds_sock_put(rs); 223} 224EXPORT_SYMBOL_GPL(rds_recv_incoming); 225 226/* 227 * be very careful here. This is being called as the condition in 228 * wait_event_*() needs to cope with being called many times. 229 */ 230static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc) 231{ 232 unsigned long flags; 233 234 if (*inc == NULL) { 235 read_lock_irqsave(&rs->rs_recv_lock, flags); 236 if (!list_empty(&rs->rs_recv_queue)) { 237 *inc = list_entry(rs->rs_recv_queue.next, 238 struct rds_incoming, 239 i_item); 240 rds_inc_addref(*inc); 241 } 242 read_unlock_irqrestore(&rs->rs_recv_lock, flags); 243 } 244 245 return *inc != NULL; 246} 247 248static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, 249 int drop) 250{ 251 struct sock *sk = rds_rs_to_sk(rs); 252 int ret = 0; 253 unsigned long flags; 254 255 write_lock_irqsave(&rs->rs_recv_lock, flags); 256 if (!list_empty(&inc->i_item)) { 257 ret = 1; 258 if (drop) { 259 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 260 -be32_to_cpu(inc->i_hdr.h_len), 261 inc->i_hdr.h_dport); 262 list_del_init(&inc->i_item); 263 rds_inc_put(inc); 264 } 265 } 266 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 267 268 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop); 269 return ret; 270} 271 272/* 273 * Pull errors off the error queue. 274 * If msghdr is NULL, we will just purge the error queue. 275 */ 276int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) 277{ 278 struct rds_notifier *notifier; 279 struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ 280 unsigned int count = 0, max_messages = ~0U; 281 unsigned long flags; 282 LIST_HEAD(copy); 283 int err = 0; 284 285 286 /* put_cmsg copies to user space and thus may sleep. We can't do this 287 * with rs_lock held, so first grab as many notifications as we can stuff 288 * in the user provided cmsg buffer. We don't try to copy more, to avoid 289 * losing notifications - except when the buffer is so small that it wouldn't 290 * even hold a single notification. Then we give him as much of this single 291 * msg as we can squeeze in, and set MSG_CTRUNC. 292 */ 293 if (msghdr) { 294 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg)); 295 if (!max_messages) 296 max_messages = 1; 297 } 298 299 spin_lock_irqsave(&rs->rs_lock, flags); 300 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) { 301 notifier = list_entry(rs->rs_notify_queue.next, 302 struct rds_notifier, n_list); 303 list_move(¬ifier->n_list, ©); 304 count++; 305 } 306 spin_unlock_irqrestore(&rs->rs_lock, flags); 307 308 if (!count) 309 return 0; 310 311 while (!list_empty(©)) { 312 notifier = list_entry(copy.next, struct rds_notifier, n_list); 313 314 if (msghdr) { 315 cmsg.user_token = notifier->n_user_token; 316 cmsg.status = notifier->n_status; 317 318 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS, 319 sizeof(cmsg), &cmsg); 320 if (err) 321 break; 322 } 323 324 list_del_init(¬ifier->n_list); 325 kfree(notifier); 326 } 327 328 /* If we bailed out because of an error in put_cmsg, 329 * we may be left with one or more notifications that we 330 * didn't process. Return them to the head of the list. */ 331 if (!list_empty(©)) { 332 spin_lock_irqsave(&rs->rs_lock, flags); 333 list_splice(©, &rs->rs_notify_queue); 334 spin_unlock_irqrestore(&rs->rs_lock, flags); 335 } 336 337 return err; 338} 339 340/* 341 * Queue a congestion notification 342 */ 343static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr) 344{ 345 uint64_t notify = rs->rs_cong_notify; 346 unsigned long flags; 347 int err; 348 349 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE, 350 sizeof(notify), ¬ify); 351 if (err) 352 return err; 353 354 spin_lock_irqsave(&rs->rs_lock, flags); 355 rs->rs_cong_notify &= ~notify; 356 spin_unlock_irqrestore(&rs->rs_lock, flags); 357 358 return 0; 359} 360 361/* 362 * Receive any control messages. 363 */ 364static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg) 365{ 366 int ret = 0; 367 368 if (inc->i_rdma_cookie) { 369 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, 370 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie); 371 if (ret) 372 return ret; 373 } 374 375 return 0; 376} 377 378int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 379 size_t size, int msg_flags) 380{ 381 struct sock *sk = sock->sk; 382 struct rds_sock *rs = rds_sk_to_rs(sk); 383 long timeo; 384 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; 385 struct sockaddr_in *sin; 386 struct rds_incoming *inc = NULL; 387 388 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */ 389 timeo = sock_rcvtimeo(sk, nonblock); 390 391 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo); 392 393 if (msg_flags & MSG_OOB) 394 goto out; 395 396 while (1) { 397 /* If there are pending notifications, do those - and nothing else */ 398 if (!list_empty(&rs->rs_notify_queue)) { 399 ret = rds_notify_queue_get(rs, msg); 400 break; 401 } 402 403 if (rs->rs_cong_notify) { 404 ret = rds_notify_cong(rs, msg); 405 break; 406 } 407 408 if (!rds_next_incoming(rs, &inc)) { 409 if (nonblock) { 410 ret = -EAGAIN; 411 break; 412 } 413 414 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 415 (!list_empty(&rs->rs_notify_queue) || 416 rs->rs_cong_notify || 417 rds_next_incoming(rs, &inc)), timeo); 418 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, 419 timeo); 420 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) 421 continue; 422 423 ret = timeo; 424 if (ret == 0) 425 ret = -ETIMEDOUT; 426 break; 427 } 428 429 rdsdebug("copying inc %p from %pI4:%u to user\n", inc, 430 &inc->i_conn->c_faddr, 431 ntohs(inc->i_hdr.h_sport)); 432 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov, 433 size); 434 if (ret < 0) 435 break; 436 437 /* 438 * if the message we just copied isn't at the head of the 439 * recv queue then someone else raced us to return it, try 440 * to get the next message. 441 */ 442 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) { 443 rds_inc_put(inc); 444 inc = NULL; 445 rds_stats_inc(s_recv_deliver_raced); 446 continue; 447 } 448 449 if (ret < be32_to_cpu(inc->i_hdr.h_len)) { 450 if (msg_flags & MSG_TRUNC) 451 ret = be32_to_cpu(inc->i_hdr.h_len); 452 msg->msg_flags |= MSG_TRUNC; 453 } 454 455 if (rds_cmsg_recv(inc, msg)) { 456 ret = -EFAULT; 457 goto out; 458 } 459 460 rds_stats_inc(s_recv_delivered); 461 462 sin = (struct sockaddr_in *)msg->msg_name; 463 if (sin) { 464 sin->sin_family = AF_INET; 465 sin->sin_port = inc->i_hdr.h_sport; 466 sin->sin_addr.s_addr = inc->i_saddr; 467 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 468 } 469 break; 470 } 471 472 if (inc) 473 rds_inc_put(inc); 474 475out: 476 return ret; 477} 478 479/* 480 * The socket is being shut down and we're asked to drop messages that were 481 * queued for recvmsg. The caller has unbound the socket so the receive path 482 * won't queue any more incoming fragments or messages on the socket. 483 */ 484void rds_clear_recv_queue(struct rds_sock *rs) 485{ 486 struct sock *sk = rds_rs_to_sk(rs); 487 struct rds_incoming *inc, *tmp; 488 unsigned long flags; 489 490 write_lock_irqsave(&rs->rs_recv_lock, flags); 491 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) { 492 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 493 -be32_to_cpu(inc->i_hdr.h_len), 494 inc->i_hdr.h_dport); 495 list_del_init(&inc->i_item); 496 rds_inc_put(inc); 497 } 498 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 499} 500 501/* 502 * inc->i_saddr isn't used here because it is only set in the receive 503 * path. 504 */ 505void rds_inc_info_copy(struct rds_incoming *inc, 506 struct rds_info_iterator *iter, 507 __be32 saddr, __be32 daddr, int flip) 508{ 509 struct rds_info_message minfo; 510 511 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence); 512 minfo.len = be32_to_cpu(inc->i_hdr.h_len); 513 514 if (flip) { 515 minfo.laddr = daddr; 516 minfo.faddr = saddr; 517 minfo.lport = inc->i_hdr.h_dport; 518 minfo.fport = inc->i_hdr.h_sport; 519 } else { 520 minfo.laddr = saddr; 521 minfo.faddr = daddr; 522 minfo.lport = inc->i_hdr.h_sport; 523 minfo.fport = inc->i_hdr.h_dport; 524 } 525 526 rds_info_copy(iter, &minfo, sizeof(minfo)); 527} 528