1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC virtual connection handler, common bits.
3 *
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/net.h>
13#include <linux/skbuff.h>
14#include "ar-internal.h"
15
16/*
17 * Time till a connection expires after last use (in seconds).
18 */
19unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21
22static void rxrpc_clean_up_connection(struct work_struct *work);
23static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24					 unsigned long reap_at);
25
26void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27{
28	struct rxrpc_local *local = conn->local;
29	bool busy;
30
31	if (WARN_ON_ONCE(!local))
32		return;
33
34	spin_lock_bh(&local->lock);
35	busy = !list_empty(&conn->attend_link);
36	if (!busy) {
37		rxrpc_get_connection(conn, why);
38		list_add_tail(&conn->attend_link, &local->conn_attend_q);
39	}
40	spin_unlock_bh(&local->lock);
41	rxrpc_wake_up_io_thread(local);
42}
43
44static void rxrpc_connection_timer(struct timer_list *timer)
45{
46	struct rxrpc_connection *conn =
47		container_of(timer, struct rxrpc_connection, timer);
48
49	rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50}
51
52/*
53 * allocate a new connection
54 */
55struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56						gfp_t gfp)
57{
58	struct rxrpc_connection *conn;
59
60	_enter("");
61
62	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63	if (conn) {
64		INIT_LIST_HEAD(&conn->cache_link);
65		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66		INIT_WORK(&conn->processor, rxrpc_process_connection);
67		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68		INIT_LIST_HEAD(&conn->proc_link);
69		INIT_LIST_HEAD(&conn->link);
70		mutex_init(&conn->security_lock);
71		mutex_init(&conn->tx_data_alloc_lock);
72		skb_queue_head_init(&conn->rx_queue);
73		conn->rxnet = rxnet;
74		conn->security = &rxrpc_no_security;
75		spin_lock_init(&conn->state_lock);
76		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77		conn->idle_timestamp = jiffies;
78	}
79
80	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
81	return conn;
82}
83
84/*
85 * Look up a connection in the cache by protocol parameters.
86 *
87 * If successful, a pointer to the connection is returned, but no ref is taken.
88 * NULL is returned if there is no match.
89 *
90 * When searching for a service call, if we find a peer but no connection, we
91 * return that through *_peer in case we need to create a new service call.
92 *
93 * The caller must be holding the RCU read lock.
94 */
95struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
96							  struct sockaddr_rxrpc *srx,
97							  struct sk_buff *skb)
98{
99	struct rxrpc_connection *conn;
100	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
101	struct rxrpc_peer *peer;
102
103	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
104
105	/* Look up client connections by connection ID alone as their
106	 * IDs are unique for this machine.
107	 */
108	conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
109	if (!conn || refcount_read(&conn->ref) == 0) {
110		_debug("no conn");
111		goto not_found;
112	}
113
114	if (conn->proto.epoch != sp->hdr.epoch ||
115	    conn->local != local)
116		goto not_found;
117
118	peer = conn->peer;
119	switch (srx->transport.family) {
120	case AF_INET:
121		if (peer->srx.transport.sin.sin_port !=
122		    srx->transport.sin.sin_port ||
123		    peer->srx.transport.sin.sin_addr.s_addr !=
124		    srx->transport.sin.sin_addr.s_addr)
125			goto not_found;
126		break;
127#ifdef CONFIG_AF_RXRPC_IPV6
128	case AF_INET6:
129		if (peer->srx.transport.sin6.sin6_port !=
130		    srx->transport.sin6.sin6_port ||
131		    memcmp(&peer->srx.transport.sin6.sin6_addr,
132			   &srx->transport.sin6.sin6_addr,
133			   sizeof(struct in6_addr)) != 0)
134			goto not_found;
135		break;
136#endif
137	default:
138		BUG();
139	}
140
141	_leave(" = %p", conn);
142	return conn;
143
144not_found:
145	_leave(" = NULL");
146	return NULL;
147}
148
149/*
150 * Disconnect a call and clear any channel it occupies when that call
151 * terminates.  The caller must hold the channel_lock and must release the
152 * call's ref on the connection.
153 */
154void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
155			     struct rxrpc_call *call)
156{
157	struct rxrpc_channel *chan =
158		&conn->channels[call->cid & RXRPC_CHANNELMASK];
159
160	_enter("%d,%x", conn->debug_id, call->cid);
161
162	if (chan->call == call) {
163		/* Save the result of the call so that we can repeat it if necessary
164		 * through the channel, whilst disposing of the actual call record.
165		 */
166		trace_rxrpc_disconnect_call(call);
167		switch (call->completion) {
168		case RXRPC_CALL_SUCCEEDED:
169			chan->last_seq = call->rx_highest_seq;
170			chan->last_type = RXRPC_PACKET_TYPE_ACK;
171			break;
172		case RXRPC_CALL_LOCALLY_ABORTED:
173			chan->last_abort = call->abort_code;
174			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
175			break;
176		default:
177			chan->last_abort = RX_CALL_DEAD;
178			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
179			break;
180		}
181
182		chan->last_call = chan->call_id;
183		chan->call_id = chan->call_counter;
184		chan->call = NULL;
185	}
186
187	_leave("");
188}
189
190/*
191 * Disconnect a call and clear any channel it occupies when that call
192 * terminates.
193 */
194void rxrpc_disconnect_call(struct rxrpc_call *call)
195{
196	struct rxrpc_connection *conn = call->conn;
197
198	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
199	rxrpc_see_call(call, rxrpc_call_see_disconnected);
200
201	call->peer->cong_ssthresh = call->cong_ssthresh;
202
203	if (!hlist_unhashed(&call->error_link)) {
204		spin_lock(&call->peer->lock);
205		hlist_del_init(&call->error_link);
206		spin_unlock(&call->peer->lock);
207	}
208
209	if (rxrpc_is_client_call(call)) {
210		rxrpc_disconnect_client_call(call->bundle, call);
211	} else {
212		__rxrpc_disconnect_call(conn, call);
213		conn->idle_timestamp = jiffies;
214		if (atomic_dec_and_test(&conn->active))
215			rxrpc_set_service_reap_timer(conn->rxnet,
216						     jiffies + rxrpc_connection_expiry * HZ);
217	}
218
219	rxrpc_put_call(call, rxrpc_call_put_io_thread);
220}
221
222/*
223 * Queue a connection's work processor, getting a ref to pass to the work
224 * queue.
225 */
226void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
227{
228	if (atomic_read(&conn->active) >= 0 &&
229	    rxrpc_queue_work(&conn->processor))
230		rxrpc_see_connection(conn, why);
231}
232
233/*
234 * Note the re-emergence of a connection.
235 */
236void rxrpc_see_connection(struct rxrpc_connection *conn,
237			  enum rxrpc_conn_trace why)
238{
239	if (conn) {
240		int r = refcount_read(&conn->ref);
241
242		trace_rxrpc_conn(conn->debug_id, r, why);
243	}
244}
245
246/*
247 * Get a ref on a connection.
248 */
249struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
250					      enum rxrpc_conn_trace why)
251{
252	int r;
253
254	__refcount_inc(&conn->ref, &r);
255	trace_rxrpc_conn(conn->debug_id, r + 1, why);
256	return conn;
257}
258
259/*
260 * Try to get a ref on a connection.
261 */
262struct rxrpc_connection *
263rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
264			   enum rxrpc_conn_trace why)
265{
266	int r;
267
268	if (conn) {
269		if (__refcount_inc_not_zero(&conn->ref, &r))
270			trace_rxrpc_conn(conn->debug_id, r + 1, why);
271		else
272			conn = NULL;
273	}
274	return conn;
275}
276
277/*
278 * Set the service connection reap timer.
279 */
280static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
281					 unsigned long reap_at)
282{
283	if (rxnet->live)
284		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
285}
286
287/*
288 * destroy a virtual connection
289 */
290static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
291{
292	struct rxrpc_connection *conn =
293		container_of(rcu, struct rxrpc_connection, rcu);
294	struct rxrpc_net *rxnet = conn->rxnet;
295
296	_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
297
298	trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
299			 rxrpc_conn_free);
300	kfree(conn);
301
302	if (atomic_dec_and_test(&rxnet->nr_conns))
303		wake_up_var(&rxnet->nr_conns);
304}
305
306/*
307 * Clean up a dead connection.
308 */
309static void rxrpc_clean_up_connection(struct work_struct *work)
310{
311	struct rxrpc_connection *conn =
312		container_of(work, struct rxrpc_connection, destructor);
313	struct rxrpc_net *rxnet = conn->rxnet;
314
315	ASSERT(!conn->channels[0].call &&
316	       !conn->channels[1].call &&
317	       !conn->channels[2].call &&
318	       !conn->channels[3].call);
319	ASSERT(list_empty(&conn->cache_link));
320
321	del_timer_sync(&conn->timer);
322	cancel_work_sync(&conn->processor); /* Processing may restart the timer */
323	del_timer_sync(&conn->timer);
324
325	write_lock(&rxnet->conn_lock);
326	list_del_init(&conn->proc_link);
327	write_unlock(&rxnet->conn_lock);
328
329	rxrpc_purge_queue(&conn->rx_queue);
330
331	rxrpc_kill_client_conn(conn);
332
333	conn->security->clear(conn);
334	key_put(conn->key);
335	rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
336	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
337	rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
338
339	/* Drain the Rx queue.  Note that even though we've unpublished, an
340	 * incoming packet could still be being added to our Rx queue, so we
341	 * will need to drain it again in the RCU cleanup handler.
342	 */
343	rxrpc_purge_queue(&conn->rx_queue);
344
345	if (conn->tx_data_alloc.va)
346		__page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
347					conn->tx_data_alloc.pagecnt_bias);
348	call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
349}
350
351/*
352 * Drop a ref on a connection.
353 */
354void rxrpc_put_connection(struct rxrpc_connection *conn,
355			  enum rxrpc_conn_trace why)
356{
357	unsigned int debug_id;
358	bool dead;
359	int r;
360
361	if (!conn)
362		return;
363
364	debug_id = conn->debug_id;
365	dead = __refcount_dec_and_test(&conn->ref, &r);
366	trace_rxrpc_conn(debug_id, r - 1, why);
367	if (dead) {
368		del_timer(&conn->timer);
369		cancel_work(&conn->processor);
370
371		if (in_softirq() || work_busy(&conn->processor) ||
372		    timer_pending(&conn->timer))
373			/* Can't use the rxrpc workqueue as we need to cancel/flush
374			 * something that may be running/waiting there.
375			 */
376			schedule_work(&conn->destructor);
377		else
378			rxrpc_clean_up_connection(&conn->destructor);
379	}
380}
381
382/*
383 * reap dead service connections
384 */
385void rxrpc_service_connection_reaper(struct work_struct *work)
386{
387	struct rxrpc_connection *conn, *_p;
388	struct rxrpc_net *rxnet =
389		container_of(work, struct rxrpc_net, service_conn_reaper);
390	unsigned long expire_at, earliest, idle_timestamp, now;
391	int active;
392
393	LIST_HEAD(graveyard);
394
395	_enter("");
396
397	now = jiffies;
398	earliest = now + MAX_JIFFY_OFFSET;
399
400	write_lock(&rxnet->conn_lock);
401	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
402		ASSERTCMP(atomic_read(&conn->active), >=, 0);
403		if (likely(atomic_read(&conn->active) > 0))
404			continue;
405		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
406			continue;
407
408		if (rxnet->live && !conn->local->dead) {
409			idle_timestamp = READ_ONCE(conn->idle_timestamp);
410			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
411			if (conn->local->service_closed)
412				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
413
414			_debug("reap CONN %d { a=%d,t=%ld }",
415			       conn->debug_id, atomic_read(&conn->active),
416			       (long)expire_at - (long)now);
417
418			if (time_before(now, expire_at)) {
419				if (time_before(expire_at, earliest))
420					earliest = expire_at;
421				continue;
422			}
423		}
424
425		/* The activity count sits at 0 whilst the conn is unused on
426		 * the list; we reduce that to -1 to make the conn unavailable.
427		 */
428		active = 0;
429		if (!atomic_try_cmpxchg(&conn->active, &active, -1))
430			continue;
431		rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
432
433		if (rxrpc_conn_is_client(conn))
434			BUG();
435		else
436			rxrpc_unpublish_service_conn(conn);
437
438		list_move_tail(&conn->link, &graveyard);
439	}
440	write_unlock(&rxnet->conn_lock);
441
442	if (earliest != now + MAX_JIFFY_OFFSET) {
443		_debug("reschedule reaper %ld", (long)earliest - (long)now);
444		ASSERT(time_after(earliest, now));
445		rxrpc_set_service_reap_timer(rxnet, earliest);
446	}
447
448	while (!list_empty(&graveyard)) {
449		conn = list_entry(graveyard.next, struct rxrpc_connection,
450				  link);
451		list_del_init(&conn->link);
452
453		ASSERTCMP(atomic_read(&conn->active), ==, -1);
454		rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
455	}
456
457	_leave("");
458}
459
460/*
461 * preemptively destroy all the service connection records rather than
462 * waiting for them to time out
463 */
464void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
465{
466	struct rxrpc_connection *conn, *_p;
467	bool leak = false;
468
469	_enter("");
470
471	atomic_dec(&rxnet->nr_conns);
472
473	del_timer_sync(&rxnet->service_conn_reap_timer);
474	rxrpc_queue_work(&rxnet->service_conn_reaper);
475	flush_workqueue(rxrpc_workqueue);
476
477	write_lock(&rxnet->conn_lock);
478	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
479		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
480		       conn, refcount_read(&conn->ref));
481		leak = true;
482	}
483	write_unlock(&rxnet->conn_lock);
484	BUG_ON(leak);
485
486	ASSERT(list_empty(&rxnet->conn_proc_list));
487
488	/* We need to wait for the connections to be destroyed by RCU as they
489	 * pin things that we still need to get rid of.
490	 */
491	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
492	_leave("");
493}
494