1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40static struct kmem_cache *rds_tcp_incoming_slab;
41
42void rds_tcp_inc_purge(struct rds_incoming *inc)
43{
44	struct rds_tcp_incoming *tinc;
45	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
46	rdsdebug("purging tinc %p inc %p\n", tinc, inc);
47	skb_queue_purge(&tinc->ti_skb_list);
48}
49
50void rds_tcp_inc_free(struct rds_incoming *inc)
51{
52	struct rds_tcp_incoming *tinc;
53	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
54	rds_tcp_inc_purge(inc);
55	rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
56	kmem_cache_free(rds_tcp_incoming_slab, tinc);
57}
58
59/*
60 * this is pretty lame, but, whatever.
61 */
62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
63			     size_t size)
64{
65	struct rds_tcp_incoming *tinc;
66	struct iovec *iov, tmp;
67	struct sk_buff *skb;
68	unsigned long to_copy, skb_off;
69	int ret = 0;
70
71	if (size == 0)
72		goto out;
73
74	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
75	iov = first_iov;
76	tmp = *iov;
77
78	skb_queue_walk(&tinc->ti_skb_list, skb) {
79		skb_off = 0;
80		while (skb_off < skb->len) {
81			while (tmp.iov_len == 0) {
82				iov++;
83				tmp = *iov;
84			}
85
86			to_copy = min(tmp.iov_len, size);
87			to_copy = min(to_copy, skb->len - skb_off);
88
89			rdsdebug("ret %d size %zu skb %p skb_off %lu "
90				 "skblen %d iov_base %p iov_len %zu cpy %lu\n",
91				 ret, size, skb, skb_off, skb->len,
92				 tmp.iov_base, tmp.iov_len, to_copy);
93
94			/* modifies tmp as it copies */
95			if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
96						    to_copy)) {
97				ret = -EFAULT;
98				goto out;
99			}
100
101			rds_stats_add(s_copy_to_user, to_copy);
102			size -= to_copy;
103			ret += to_copy;
104			skb_off += to_copy;
105			if (size == 0)
106				goto out;
107		}
108	}
109out:
110	return ret;
111}
112
113/*
114 * We have a series of skbs that have fragmented pieces of the congestion
115 * bitmap.  They must add up to the exact size of the congestion bitmap.  We
116 * use the skb helpers to copy those into the pages that make up the in-memory
117 * congestion bitmap for the remote address of this connection.  We then tell
118 * the congestion core that the bitmap has been changed so that it can wake up
119 * sleepers.
120 *
121 * This is racing with sending paths which are using test_bit to see if the
122 * bitmap indicates that their recipient is congested.
123 */
124
125static void rds_tcp_cong_recv(struct rds_connection *conn,
126			      struct rds_tcp_incoming *tinc)
127{
128	struct sk_buff *skb;
129	unsigned int to_copy, skb_off;
130	unsigned int map_off;
131	unsigned int map_page;
132	struct rds_cong_map *map;
133	int ret;
134
135	/* catch completely corrupt packets */
136	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
137		return;
138
139	map_page = 0;
140	map_off = 0;
141	map = conn->c_fcong;
142
143	skb_queue_walk(&tinc->ti_skb_list, skb) {
144		skb_off = 0;
145		while (skb_off < skb->len) {
146			to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
147					skb->len - skb_off);
148
149			BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
150
151			/* only returns 0 or -error */
152			ret = skb_copy_bits(skb, skb_off,
153				(void *)map->m_page_addrs[map_page] + map_off,
154				to_copy);
155			BUG_ON(ret != 0);
156
157			skb_off += to_copy;
158			map_off += to_copy;
159			if (map_off == PAGE_SIZE) {
160				map_off = 0;
161				map_page++;
162			}
163		}
164	}
165
166	rds_cong_map_updated(map, ~(u64) 0);
167}
168
169struct rds_tcp_desc_arg {
170	struct rds_connection *conn;
171	gfp_t gfp;
172	enum km_type km;
173};
174
175static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
176			     unsigned int offset, size_t len)
177{
178	struct rds_tcp_desc_arg *arg = desc->arg.data;
179	struct rds_connection *conn = arg->conn;
180	struct rds_tcp_connection *tc = conn->c_transport_data;
181	struct rds_tcp_incoming *tinc = tc->t_tinc;
182	struct sk_buff *clone;
183	size_t left = len, to_copy;
184
185	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
186		 len);
187
188	/*
189	 * tcp_read_sock() interprets partial progress as an indication to stop
190	 * processing.
191	 */
192	while (left) {
193		if (tinc == NULL) {
194			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
195					        arg->gfp);
196			if (tinc == NULL) {
197				desc->error = -ENOMEM;
198				goto out;
199			}
200			tc->t_tinc = tinc;
201			rdsdebug("alloced tinc %p\n", tinc);
202			rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
203			skb_queue_head_init(&tinc->ti_skb_list);
204		}
205
206		if (left && tc->t_tinc_hdr_rem) {
207			to_copy = min(tc->t_tinc_hdr_rem, left);
208			rdsdebug("copying %zu header from skb %p\n", to_copy,
209				 skb);
210			skb_copy_bits(skb, offset,
211				      (char *)&tinc->ti_inc.i_hdr +
212						sizeof(struct rds_header) -
213						tc->t_tinc_hdr_rem,
214				      to_copy);
215			tc->t_tinc_hdr_rem -= to_copy;
216			left -= to_copy;
217			offset += to_copy;
218
219			if (tc->t_tinc_hdr_rem == 0) {
220				/* could be 0 for a 0 len message */
221				tc->t_tinc_data_rem =
222					be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
223			}
224		}
225
226		if (left && tc->t_tinc_data_rem) {
227			clone = skb_clone(skb, arg->gfp);
228			if (clone == NULL) {
229				desc->error = -ENOMEM;
230				goto out;
231			}
232
233			to_copy = min(tc->t_tinc_data_rem, left);
234			pskb_pull(clone, offset);
235			pskb_trim(clone, to_copy);
236			skb_queue_tail(&tinc->ti_skb_list, clone);
237
238			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
239				 "clone %p data %p len %d\n",
240				 skb, skb->data, skb->len, offset, to_copy,
241				 clone, clone->data, clone->len);
242
243			tc->t_tinc_data_rem -= to_copy;
244			left -= to_copy;
245			offset += to_copy;
246		}
247
248		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
249			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
250				rds_tcp_cong_recv(conn, tinc);
251			else
252				rds_recv_incoming(conn, conn->c_faddr,
253						  conn->c_laddr, &tinc->ti_inc,
254						  arg->gfp, arg->km);
255
256			tc->t_tinc_hdr_rem = sizeof(struct rds_header);
257			tc->t_tinc_data_rem = 0;
258			tc->t_tinc = NULL;
259			rds_inc_put(&tinc->ti_inc);
260			tinc = NULL;
261		}
262	}
263out:
264	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
265		 len, left, skb->len,
266		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
267	return len - left;
268}
269
270/* the caller has to hold the sock lock */
271int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, enum km_type km)
272{
273	struct rds_tcp_connection *tc = conn->c_transport_data;
274	struct socket *sock = tc->t_sock;
275	read_descriptor_t desc;
276	struct rds_tcp_desc_arg arg;
277
278	/* It's like glib in the kernel! */
279	arg.conn = conn;
280	arg.gfp = gfp;
281	arg.km = km;
282	desc.arg.data = &arg;
283	desc.error = 0;
284	desc.count = 1; /* give more than one skb per call */
285
286	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
287	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
288		 desc.error);
289
290	return desc.error;
291}
292
293/*
294 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
295 * data_ready.
296 *
297 * if we fail to allocate we're in trouble.. blindly wait some time before
298 * trying again to see if the VM can free up something for us.
299 */
300int rds_tcp_recv(struct rds_connection *conn)
301{
302	struct rds_tcp_connection *tc = conn->c_transport_data;
303	struct socket *sock = tc->t_sock;
304	int ret = 0;
305
306	rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
307
308	lock_sock(sock->sk);
309	ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0);
310	release_sock(sock->sk);
311
312	return ret;
313}
314
315void rds_tcp_data_ready(struct sock *sk, int bytes)
316{
317	void (*ready)(struct sock *sk, int bytes);
318	struct rds_connection *conn;
319	struct rds_tcp_connection *tc;
320
321	rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
322
323	read_lock_bh(&sk->sk_callback_lock);
324	conn = sk->sk_user_data;
325	if (conn == NULL) { /* check for teardown race */
326		ready = sk->sk_data_ready;
327		goto out;
328	}
329
330	tc = conn->c_transport_data;
331	ready = tc->t_orig_data_ready;
332	rds_tcp_stats_inc(s_tcp_data_ready_calls);
333
334	if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
335		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
336out:
337	read_unlock_bh(&sk->sk_callback_lock);
338	ready(sk, bytes);
339}
340
341int __init rds_tcp_recv_init(void)
342{
343	rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
344					sizeof(struct rds_tcp_incoming),
345					0, 0, NULL);
346	if (rds_tcp_incoming_slab == NULL)
347		return -ENOMEM;
348	return 0;
349}
350
351void rds_tcp_recv_exit(void)
352{
353	kmem_cache_destroy(rds_tcp_incoming_slab);
354}
355