1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/gfp.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38
39#include "rds.h"
40#include "rdma.h"
41
42/* When transmitting messages in rds_send_xmit, we need to emerge from
43 * time to time and briefly release the CPU. Otherwise the softlock watchdog
44 * will kick our shin.
45 * Also, it seems fairer to not let one busy connection stall all the
46 * others.
47 *
48 * send_batch_count is the number of times we'll loop in send_xmit. Setting
49 * it to 0 will restore the old behavior (where we looped until we had
50 * drained the queue).
51 */
52static int send_batch_count = 64;
53module_param(send_batch_count, int, 0444);
54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
55
56/*
57 * Reset the send state. Caller must hold c_send_lock when calling here.
58 */
59void rds_send_reset(struct rds_connection *conn)
60{
61	struct rds_message *rm, *tmp;
62	unsigned long flags;
63
64	if (conn->c_xmit_rm) {
65		/* Tell the user the RDMA op is no longer mapped by the
66		 * transport. This isn't entirely true (it's flushed out
67		 * independently) but as the connection is down, there's
68		 * no ongoing RDMA to/from that memory */
69		rds_message_unmapped(conn->c_xmit_rm);
70		rds_message_put(conn->c_xmit_rm);
71		conn->c_xmit_rm = NULL;
72	}
73	conn->c_xmit_sg = 0;
74	conn->c_xmit_hdr_off = 0;
75	conn->c_xmit_data_off = 0;
76	conn->c_xmit_rdma_sent = 0;
77
78	conn->c_map_queued = 0;
79
80	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
81	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
82
83	/* Mark messages as retransmissions, and move them to the send q */
84	spin_lock_irqsave(&conn->c_lock, flags);
85	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
86		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
87		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
88	}
89	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
90	spin_unlock_irqrestore(&conn->c_lock, flags);
91}
92
93/*
94 * We're making the concious trade-off here to only send one message
95 * down the connection at a time.
96 *   Pro:
97 *      - tx queueing is a simple fifo list
98 *   	- reassembly is optional and easily done by transports per conn
99 *      - no per flow rx lookup at all, straight to the socket
100 *   	- less per-frag memory and wire overhead
101 *   Con:
102 *      - queued acks can be delayed behind large messages
103 *   Depends:
104 *      - small message latency is higher behind queued large messages
105 *      - large message latency isn't starved by intervening small sends
106 */
107int rds_send_xmit(struct rds_connection *conn)
108{
109	struct rds_message *rm;
110	unsigned long flags;
111	unsigned int tmp;
112	unsigned int send_quota = send_batch_count;
113	struct scatterlist *sg;
114	int ret = 0;
115	int was_empty = 0;
116	LIST_HEAD(to_be_dropped);
117
118	/*
119	 * sendmsg calls here after having queued its message on the send
120	 * queue.  We only have one task feeding the connection at a time.  If
121	 * another thread is already feeding the queue then we back off.  This
122	 * avoids blocking the caller and trading per-connection data between
123	 * caches per message.
124	 *
125	 * The sem holder will issue a retry if they notice that someone queued
126	 * a message after they stopped walking the send queue but before they
127	 * dropped the sem.
128	 */
129	if (!mutex_trylock(&conn->c_send_lock)) {
130		rds_stats_inc(s_send_sem_contention);
131		ret = -ENOMEM;
132		goto out;
133	}
134
135	if (conn->c_trans->xmit_prepare)
136		conn->c_trans->xmit_prepare(conn);
137
138	/*
139	 * spin trying to push headers and data down the connection until
140	 * the connection doens't make forward progress.
141	 */
142	while (--send_quota) {
143		/*
144		 * See if need to send a congestion map update if we're
145		 * between sending messages.  The send_sem protects our sole
146		 * use of c_map_offset and _bytes.
147		 * Note this is used only by transports that define a special
148		 * xmit_cong_map function. For all others, we create allocate
149		 * a cong_map message and treat it just like any other send.
150		 */
151		if (conn->c_map_bytes) {
152			ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153						conn->c_map_offset);
154			if (ret <= 0)
155				break;
156
157			conn->c_map_offset += ret;
158			conn->c_map_bytes -= ret;
159			if (conn->c_map_bytes)
160				continue;
161		}
162
163		/* If we're done sending the current message, clear the
164		 * offset and S/G temporaries.
165		 */
166		rm = conn->c_xmit_rm;
167		if (rm != NULL &&
168		    conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169		    conn->c_xmit_sg == rm->m_nents) {
170			conn->c_xmit_rm = NULL;
171			conn->c_xmit_sg = 0;
172			conn->c_xmit_hdr_off = 0;
173			conn->c_xmit_data_off = 0;
174			conn->c_xmit_rdma_sent = 0;
175
176			/* Release the reference to the previous message. */
177			rds_message_put(rm);
178			rm = NULL;
179		}
180
181		/* If we're asked to send a cong map update, do so.
182		 */
183		if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
184			if (conn->c_trans->xmit_cong_map != NULL) {
185				conn->c_map_offset = 0;
186				conn->c_map_bytes = sizeof(struct rds_header) +
187					RDS_CONG_MAP_BYTES;
188				continue;
189			}
190
191			rm = rds_cong_update_alloc(conn);
192			if (IS_ERR(rm)) {
193				ret = PTR_ERR(rm);
194				break;
195			}
196
197			conn->c_xmit_rm = rm;
198		}
199
200		/*
201		 * Grab the next message from the send queue, if there is one.
202		 *
203		 * c_xmit_rm holds a ref while we're sending this message down
204		 * the connction.  We can use this ref while holding the
205		 * send_sem.. rds_send_reset() is serialized with it.
206		 */
207		if (rm == NULL) {
208			unsigned int len;
209
210			spin_lock_irqsave(&conn->c_lock, flags);
211
212			if (!list_empty(&conn->c_send_queue)) {
213				rm = list_entry(conn->c_send_queue.next,
214						struct rds_message,
215						m_conn_item);
216				rds_message_addref(rm);
217
218				/*
219				 * Move the message from the send queue to the retransmit
220				 * list right away.
221				 */
222				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
223			}
224
225			spin_unlock_irqrestore(&conn->c_lock, flags);
226
227			if (rm == NULL) {
228				was_empty = 1;
229				break;
230			}
231
232			/* Unfortunately, the way Infiniband deals with
233			 * RDMA to a bad MR key is by moving the entire
234			 * queue pair to error state. We cold possibly
235			 * recover from that, but right now we drop the
236			 * connection.
237			 * Therefore, we never retransmit messages with RDMA ops.
238			 */
239			if (rm->m_rdma_op &&
240			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
241				spin_lock_irqsave(&conn->c_lock, flags);
242				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
243					list_move(&rm->m_conn_item, &to_be_dropped);
244				spin_unlock_irqrestore(&conn->c_lock, flags);
245				rds_message_put(rm);
246				continue;
247			}
248
249			/* Require an ACK every once in a while */
250			len = ntohl(rm->m_inc.i_hdr.h_len);
251			if (conn->c_unacked_packets == 0 ||
252			    conn->c_unacked_bytes < len) {
253				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
254
255				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
256				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
257				rds_stats_inc(s_send_ack_required);
258			} else {
259				conn->c_unacked_bytes -= len;
260				conn->c_unacked_packets--;
261			}
262
263			conn->c_xmit_rm = rm;
264		}
265
266		/*
267		 * Try and send an rdma message.  Let's see if we can
268		 * keep this simple and require that the transport either
269		 * send the whole rdma or none of it.
270		 */
271		if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
272			ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
273			if (ret)
274				break;
275			conn->c_xmit_rdma_sent = 1;
276			/* The transport owns the mapped memory for now.
277			 * You can't unmap it while it's on the send queue */
278			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279		}
280
281		if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
282		    conn->c_xmit_sg < rm->m_nents) {
283			ret = conn->c_trans->xmit(conn, rm,
284						  conn->c_xmit_hdr_off,
285						  conn->c_xmit_sg,
286						  conn->c_xmit_data_off);
287			if (ret <= 0)
288				break;
289
290			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
291				tmp = min_t(int, ret,
292					    sizeof(struct rds_header) -
293					    conn->c_xmit_hdr_off);
294				conn->c_xmit_hdr_off += tmp;
295				ret -= tmp;
296			}
297
298			sg = &rm->m_sg[conn->c_xmit_sg];
299			while (ret) {
300				tmp = min_t(int, ret, sg->length -
301						      conn->c_xmit_data_off);
302				conn->c_xmit_data_off += tmp;
303				ret -= tmp;
304				if (conn->c_xmit_data_off == sg->length) {
305					conn->c_xmit_data_off = 0;
306					sg++;
307					conn->c_xmit_sg++;
308					BUG_ON(ret != 0 &&
309					       conn->c_xmit_sg == rm->m_nents);
310				}
311			}
312		}
313	}
314
315	/* Nuke any messages we decided not to retransmit. */
316	if (!list_empty(&to_be_dropped))
317		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
318
319	if (conn->c_trans->xmit_complete)
320		conn->c_trans->xmit_complete(conn);
321
322	/*
323	 * We might be racing with another sender who queued a message but
324	 * backed off on noticing that we held the c_send_lock.  If we check
325	 * for queued messages after dropping the sem then either we'll
326	 * see the queued message or the queuer will get the sem.  If we
327	 * notice the queued message then we trigger an immediate retry.
328	 *
329	 * We need to be careful only to do this when we stopped processing
330	 * the send queue because it was empty.  It's the only way we
331	 * stop processing the loop when the transport hasn't taken
332	 * responsibility for forward progress.
333	 */
334	mutex_unlock(&conn->c_send_lock);
335
336	if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
337		/* We exhausted the send quota, but there's work left to
338		 * do. Return and (re-)schedule the send worker.
339		 */
340		ret = -EAGAIN;
341	}
342
343	if (ret == 0 && was_empty) {
344		/* A simple bit test would be way faster than taking the
345		 * spin lock */
346		spin_lock_irqsave(&conn->c_lock, flags);
347		if (!list_empty(&conn->c_send_queue)) {
348			rds_stats_inc(s_send_sem_queue_raced);
349			ret = -EAGAIN;
350		}
351		spin_unlock_irqrestore(&conn->c_lock, flags);
352	}
353out:
354	return ret;
355}
356
357static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
358{
359	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
360
361	assert_spin_locked(&rs->rs_lock);
362
363	BUG_ON(rs->rs_snd_bytes < len);
364	rs->rs_snd_bytes -= len;
365
366	if (rs->rs_snd_bytes == 0)
367		rds_stats_inc(s_send_queue_empty);
368}
369
370static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
371				    is_acked_func is_acked)
372{
373	if (is_acked)
374		return is_acked(rm, ack);
375	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
376}
377
378/*
379 * Returns true if there are no messages on the send and retransmit queues
380 * which have a sequence number greater than or equal to the given sequence
381 * number.
382 */
383int rds_send_acked_before(struct rds_connection *conn, u64 seq)
384{
385	struct rds_message *rm, *tmp;
386	int ret = 1;
387
388	spin_lock(&conn->c_lock);
389
390	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
391		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
392			ret = 0;
393		break;
394	}
395
396	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
397		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
398			ret = 0;
399		break;
400	}
401
402	spin_unlock(&conn->c_lock);
403
404	return ret;
405}
406
407/*
408 * This is pretty similar to what happens below in the ACK
409 * handling code - except that we call here as soon as we get
410 * the IB send completion on the RDMA op and the accompanying
411 * message.
412 */
413void rds_rdma_send_complete(struct rds_message *rm, int status)
414{
415	struct rds_sock *rs = NULL;
416	struct rds_rdma_op *ro;
417	struct rds_notifier *notifier;
418
419	spin_lock(&rm->m_rs_lock);
420
421	ro = rm->m_rdma_op;
422	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
423	    ro && ro->r_notify && ro->r_notifier) {
424		notifier = ro->r_notifier;
425		rs = rm->m_rs;
426		sock_hold(rds_rs_to_sk(rs));
427
428		notifier->n_status = status;
429		spin_lock(&rs->rs_lock);
430		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
431		spin_unlock(&rs->rs_lock);
432
433		ro->r_notifier = NULL;
434	}
435
436	spin_unlock(&rm->m_rs_lock);
437
438	if (rs) {
439		rds_wake_sk_sleep(rs);
440		sock_put(rds_rs_to_sk(rs));
441	}
442}
443EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
444
445/*
446 * This is the same as rds_rdma_send_complete except we
447 * don't do any locking - we have all the ingredients (message,
448 * socket, socket lock) and can just move the notifier.
449 */
450static inline void
451__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
452{
453	struct rds_rdma_op *ro;
454
455	ro = rm->m_rdma_op;
456	if (ro && ro->r_notify && ro->r_notifier) {
457		ro->r_notifier->n_status = status;
458		list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
459		ro->r_notifier = NULL;
460	}
461
462	/* No need to wake the app - caller does this */
463}
464
465/*
466 * This is called from the IB send completion when we detect
467 * a RDMA operation that failed with remote access error.
468 * So speed is not an issue here.
469 */
470struct rds_message *rds_send_get_message(struct rds_connection *conn,
471					 struct rds_rdma_op *op)
472{
473	struct rds_message *rm, *tmp, *found = NULL;
474	unsigned long flags;
475
476	spin_lock_irqsave(&conn->c_lock, flags);
477
478	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
479		if (rm->m_rdma_op == op) {
480			atomic_inc(&rm->m_refcount);
481			found = rm;
482			goto out;
483		}
484	}
485
486	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
487		if (rm->m_rdma_op == op) {
488			atomic_inc(&rm->m_refcount);
489			found = rm;
490			break;
491		}
492	}
493
494out:
495	spin_unlock_irqrestore(&conn->c_lock, flags);
496
497	return found;
498}
499EXPORT_SYMBOL_GPL(rds_send_get_message);
500
501/*
502 * This removes messages from the socket's list if they're on it.  The list
503 * argument must be private to the caller, we must be able to modify it
504 * without locks.  The messages must have a reference held for their
505 * position on the list.  This function will drop that reference after
506 * removing the messages from the 'messages' list regardless of if it found
507 * the messages on the socket list or not.
508 */
509void rds_send_remove_from_sock(struct list_head *messages, int status)
510{
511	unsigned long flags;
512	struct rds_sock *rs = NULL;
513	struct rds_message *rm;
514
515	while (!list_empty(messages)) {
516		int was_on_sock = 0;
517
518		rm = list_entry(messages->next, struct rds_message,
519				m_conn_item);
520		list_del_init(&rm->m_conn_item);
521
522		/*
523		 * If we see this flag cleared then we're *sure* that someone
524		 * else beat us to removing it from the sock.  If we race
525		 * with their flag update we'll get the lock and then really
526		 * see that the flag has been cleared.
527		 *
528		 * The message spinlock makes sure nobody clears rm->m_rs
529		 * while we're messing with it. It does not prevent the
530		 * message from being removed from the socket, though.
531		 */
532		spin_lock_irqsave(&rm->m_rs_lock, flags);
533		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
534			goto unlock_and_drop;
535
536		if (rs != rm->m_rs) {
537			if (rs) {
538				rds_wake_sk_sleep(rs);
539				sock_put(rds_rs_to_sk(rs));
540			}
541			rs = rm->m_rs;
542			sock_hold(rds_rs_to_sk(rs));
543		}
544		spin_lock(&rs->rs_lock);
545
546		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547			struct rds_rdma_op *ro = rm->m_rdma_op;
548			struct rds_notifier *notifier;
549
550			list_del_init(&rm->m_sock_item);
551			rds_send_sndbuf_remove(rs, rm);
552
553			if (ro && ro->r_notifier && (status || ro->r_notify)) {
554				notifier = ro->r_notifier;
555				list_add_tail(&notifier->n_list,
556						&rs->rs_notify_queue);
557				if (!notifier->n_status)
558					notifier->n_status = status;
559				rm->m_rdma_op->r_notifier = NULL;
560			}
561			was_on_sock = 1;
562			rm->m_rs = NULL;
563		}
564		spin_unlock(&rs->rs_lock);
565
566unlock_and_drop:
567		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
568		rds_message_put(rm);
569		if (was_on_sock)
570			rds_message_put(rm);
571	}
572
573	if (rs) {
574		rds_wake_sk_sleep(rs);
575		sock_put(rds_rs_to_sk(rs));
576	}
577}
578
579void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
580			 is_acked_func is_acked)
581{
582	struct rds_message *rm, *tmp;
583	unsigned long flags;
584	LIST_HEAD(list);
585
586	spin_lock_irqsave(&conn->c_lock, flags);
587
588	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
589		if (!rds_send_is_acked(rm, ack, is_acked))
590			break;
591
592		list_move(&rm->m_conn_item, &list);
593		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
594	}
595
596	/* order flag updates with spin locks */
597	if (!list_empty(&list))
598		smp_mb__after_clear_bit();
599
600	spin_unlock_irqrestore(&conn->c_lock, flags);
601
602	/* now remove the messages from the sock list as needed */
603	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
604}
605EXPORT_SYMBOL_GPL(rds_send_drop_acked);
606
607void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
608{
609	struct rds_message *rm, *tmp;
610	struct rds_connection *conn;
611	unsigned long flags, flags2;
612	LIST_HEAD(list);
613	int wake = 0;
614
615	/* get all the messages we're dropping under the rs lock */
616	spin_lock_irqsave(&rs->rs_lock, flags);
617
618	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
619		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
620			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
621			continue;
622
623		wake = 1;
624		list_move(&rm->m_sock_item, &list);
625		rds_send_sndbuf_remove(rs, rm);
626		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
627	}
628
629	/* order flag updates with the rs lock */
630	if (wake)
631		smp_mb__after_clear_bit();
632
633	spin_unlock_irqrestore(&rs->rs_lock, flags);
634
635	conn = NULL;
636
637	/* now remove the messages from the conn list as needed */
638	list_for_each_entry(rm, &list, m_sock_item) {
639		/* We do this here rather than in the loop above, so that
640		 * we don't have to nest m_rs_lock under rs->rs_lock */
641		spin_lock_irqsave(&rm->m_rs_lock, flags2);
642		/* If this is a RDMA operation, notify the app. */
643		spin_lock(&rs->rs_lock);
644		__rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
645		spin_unlock(&rs->rs_lock);
646		rm->m_rs = NULL;
647		spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
648
649		/*
650		 * If we see this flag cleared then we're *sure* that someone
651		 * else beat us to removing it from the conn.  If we race
652		 * with their flag update we'll get the lock and then really
653		 * see that the flag has been cleared.
654		 */
655		if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
656			continue;
657
658		if (conn != rm->m_inc.i_conn) {
659			if (conn)
660				spin_unlock_irqrestore(&conn->c_lock, flags);
661			conn = rm->m_inc.i_conn;
662			spin_lock_irqsave(&conn->c_lock, flags);
663		}
664
665		if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
666			list_del_init(&rm->m_conn_item);
667			rds_message_put(rm);
668		}
669	}
670
671	if (conn)
672		spin_unlock_irqrestore(&conn->c_lock, flags);
673
674	if (wake)
675		rds_wake_sk_sleep(rs);
676
677	while (!list_empty(&list)) {
678		rm = list_entry(list.next, struct rds_message, m_sock_item);
679		list_del_init(&rm->m_sock_item);
680
681		rds_message_wait(rm);
682		rds_message_put(rm);
683	}
684}
685
686/*
687 * we only want this to fire once so we use the callers 'queued'.  It's
688 * possible that another thread can race with us and remove the
689 * message from the flow with RDS_CANCEL_SENT_TO.
690 */
691static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
692			     struct rds_message *rm, __be16 sport,
693			     __be16 dport, int *queued)
694{
695	unsigned long flags;
696	u32 len;
697
698	if (*queued)
699		goto out;
700
701	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
702
703	/* this is the only place which holds both the socket's rs_lock
704	 * and the connection's c_lock */
705	spin_lock_irqsave(&rs->rs_lock, flags);
706
707	/*
708	 * If there is a little space in sndbuf, we don't queue anything,
709	 * and userspace gets -EAGAIN. But poll() indicates there's send
710	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
711	 * freed up by incoming acks. So we check the *old* value of
712	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
713	 * and poll() now knows no more data can be sent.
714	 */
715	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
716		rs->rs_snd_bytes += len;
717
718		/* let recv side know we are close to send space exhaustion.
719		 * This is probably not the optimal way to do it, as this
720		 * means we set the flag on *all* messages as soon as our
721		 * throughput hits a certain threshold.
722		 */
723		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
724			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
725
726		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
727		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
728		rds_message_addref(rm);
729		rm->m_rs = rs;
730
731		/* The code ordering is a little weird, but we're
732		   trying to minimize the time we hold c_lock */
733		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
734		rm->m_inc.i_conn = conn;
735		rds_message_addref(rm);
736
737		spin_lock(&conn->c_lock);
738		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
739		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
740		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
741		spin_unlock(&conn->c_lock);
742
743		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
744			 rm, len, rs, rs->rs_snd_bytes,
745			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
746
747		*queued = 1;
748	}
749
750	spin_unlock_irqrestore(&rs->rs_lock, flags);
751out:
752	return *queued;
753}
754
755static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
756			 struct msghdr *msg, int *allocated_mr)
757{
758	struct cmsghdr *cmsg;
759	int ret = 0;
760
761	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
762		if (!CMSG_OK(msg, cmsg))
763			return -EINVAL;
764
765		if (cmsg->cmsg_level != SOL_RDS)
766			continue;
767
768		/* As a side effect, RDMA_DEST and RDMA_MAP will set
769		 * rm->m_rdma_cookie and rm->m_rdma_mr.
770		 */
771		switch (cmsg->cmsg_type) {
772		case RDS_CMSG_RDMA_ARGS:
773			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
774			break;
775
776		case RDS_CMSG_RDMA_DEST:
777			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
778			break;
779
780		case RDS_CMSG_RDMA_MAP:
781			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
782			if (!ret)
783				*allocated_mr = 1;
784			break;
785
786		default:
787			return -EINVAL;
788		}
789
790		if (ret)
791			break;
792	}
793
794	return ret;
795}
796
797int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
798		size_t payload_len)
799{
800	struct sock *sk = sock->sk;
801	struct rds_sock *rs = rds_sk_to_rs(sk);
802	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
803	__be32 daddr;
804	__be16 dport;
805	struct rds_message *rm = NULL;
806	struct rds_connection *conn;
807	int ret = 0;
808	int queued = 0, allocated_mr = 0;
809	int nonblock = msg->msg_flags & MSG_DONTWAIT;
810	long timeo = sock_sndtimeo(sk, nonblock);
811
812	/* Mirror Linux UDP mirror of BSD error message compatibility */
813	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
814		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
815		ret = -EOPNOTSUPP;
816		goto out;
817	}
818
819	if (msg->msg_namelen) {
820		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
821			ret = -EINVAL;
822			goto out;
823		}
824		daddr = usin->sin_addr.s_addr;
825		dport = usin->sin_port;
826	} else {
827		/* We only care about consistency with ->connect() */
828		lock_sock(sk);
829		daddr = rs->rs_conn_addr;
830		dport = rs->rs_conn_port;
831		release_sock(sk);
832	}
833
834	/* racing with another thread binding seems ok here */
835	if (daddr == 0 || rs->rs_bound_addr == 0) {
836		ret = -ENOTCONN;
837		goto out;
838	}
839
840	rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
841	if (IS_ERR(rm)) {
842		ret = PTR_ERR(rm);
843		rm = NULL;
844		goto out;
845	}
846
847	rm->m_daddr = daddr;
848
849	/* rds_conn_create has a spinlock that runs with IRQ off.
850	 * Caching the conn in the socket helps a lot. */
851	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
852		conn = rs->rs_conn;
853	else {
854		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
855					rs->rs_transport,
856					sock->sk->sk_allocation);
857		if (IS_ERR(conn)) {
858			ret = PTR_ERR(conn);
859			goto out;
860		}
861		rs->rs_conn = conn;
862	}
863
864	/* Parse any control messages the user may have included. */
865	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
866	if (ret)
867		goto out;
868
869	if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
870	    conn->c_trans->xmit_rdma == NULL) {
871		if (printk_ratelimit())
872			printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
873				rm->m_rdma_op, conn->c_trans->xmit_rdma);
874		ret = -EOPNOTSUPP;
875		goto out;
876	}
877
878	/* If the connection is down, trigger a connect. We may
879	 * have scheduled a delayed reconnect however - in this case
880	 * we should not interfere.
881	 */
882	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
883	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
884		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
885
886	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
887	if (ret) {
888		rs->rs_seen_congestion = 1;
889		goto out;
890	}
891
892	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
893				  dport, &queued)) {
894		rds_stats_inc(s_send_queue_full);
895		if (payload_len > rds_sk_sndbuf(rs)) {
896			ret = -EMSGSIZE;
897			goto out;
898		}
899		if (nonblock) {
900			ret = -EAGAIN;
901			goto out;
902		}
903
904		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
905					rds_send_queue_rm(rs, conn, rm,
906							  rs->rs_bound_port,
907							  dport,
908							  &queued),
909					timeo);
910		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
911		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
912			continue;
913
914		ret = timeo;
915		if (ret == 0)
916			ret = -ETIMEDOUT;
917		goto out;
918	}
919
920	/*
921	 * By now we've committed to the send.  We reuse rds_send_worker()
922	 * to retry sends in the rds thread if the transport asks us to.
923	 */
924	rds_stats_inc(s_send_queued);
925
926	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
927		rds_send_worker(&conn->c_send_w.work);
928
929	rds_message_put(rm);
930	return payload_len;
931
932out:
933	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
934	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
935	 * or in any other way, we need to destroy the MR again */
936	if (allocated_mr)
937		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
938
939	if (rm)
940		rds_message_put(rm);
941	return ret;
942}
943
944/*
945 * Reply to a ping packet.
946 */
947int
948rds_send_pong(struct rds_connection *conn, __be16 dport)
949{
950	struct rds_message *rm;
951	unsigned long flags;
952	int ret = 0;
953
954	rm = rds_message_alloc(0, GFP_ATOMIC);
955	if (rm == NULL) {
956		ret = -ENOMEM;
957		goto out;
958	}
959
960	rm->m_daddr = conn->c_faddr;
961
962	/* If the connection is down, trigger a connect. We may
963	 * have scheduled a delayed reconnect however - in this case
964	 * we should not interfere.
965	 */
966	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
967	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
968		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
969
970	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
971	if (ret)
972		goto out;
973
974	spin_lock_irqsave(&conn->c_lock, flags);
975	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
976	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
977	rds_message_addref(rm);
978	rm->m_inc.i_conn = conn;
979
980	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
981				    conn->c_next_tx_seq);
982	conn->c_next_tx_seq++;
983	spin_unlock_irqrestore(&conn->c_lock, flags);
984
985	rds_stats_inc(s_send_queued);
986	rds_stats_inc(s_send_pong);
987
988	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
989	rds_message_put(rm);
990	return 0;
991
992out:
993	if (rm)
994		rds_message_put(rm);
995	return ret;
996}
997