1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 *  AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 *  applies to SOCK_STREAM sockets only
7 *  offers an alternative communication option for TCP-protocol sockets
8 *  applicable with RoCE-cards only
9 *
10 *  Initial restrictions:
11 *    - support for alternate links postponed
12 *
13 *  Copyright IBM Corp. 2016, 2018
14 *
15 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
16 *              based on prototype from Frank Blaschka
17 */
18
19#define KMSG_COMPONENT "smc"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/module.h>
23#include <linux/socket.h>
24#include <linux/workqueue.h>
25#include <linux/in.h>
26#include <linux/sched/signal.h>
27#include <linux/if_vlan.h>
28#include <linux/rcupdate_wait.h>
29#include <linux/ctype.h>
30#include <linux/splice.h>
31
32#include <net/sock.h>
33#include <net/tcp.h>
34#include <net/smc.h>
35#include <asm/ioctls.h>
36
37#include <net/net_namespace.h>
38#include <net/netns/generic.h>
39#include "smc_netns.h"
40
41#include "smc.h"
42#include "smc_clc.h"
43#include "smc_llc.h"
44#include "smc_cdc.h"
45#include "smc_core.h"
46#include "smc_ib.h"
47#include "smc_ism.h"
48#include "smc_pnet.h"
49#include "smc_netlink.h"
50#include "smc_tx.h"
51#include "smc_rx.h"
52#include "smc_close.h"
53#include "smc_stats.h"
54#include "smc_tracepoint.h"
55#include "smc_sysctl.h"
56
57static DEFINE_MUTEX(smc_server_lgr_pending);	/* serialize link group
58						 * creation on server
59						 */
60static DEFINE_MUTEX(smc_client_lgr_pending);	/* serialize link group
61						 * creation on client
62						 */
63
64static struct workqueue_struct	*smc_tcp_ls_wq;	/* wq for tcp listen work */
65struct workqueue_struct	*smc_hs_wq;	/* wq for handshake work */
66struct workqueue_struct	*smc_close_wq;	/* wq for close work */
67
68static void smc_tcp_listen_work(struct work_struct *);
69static void smc_connect_work(struct work_struct *);
70
71int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
72{
73	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
74	void *hdr;
75
76	if (cb_ctx->pos[0])
77		goto out;
78
79	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
80			  &smc_gen_nl_family, NLM_F_MULTI,
81			  SMC_NETLINK_DUMP_HS_LIMITATION);
82	if (!hdr)
83		return -ENOMEM;
84
85	if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED,
86		       sock_net(skb->sk)->smc.limit_smc_hs))
87		goto err;
88
89	genlmsg_end(skb, hdr);
90	cb_ctx->pos[0] = 1;
91out:
92	return skb->len;
93err:
94	genlmsg_cancel(skb, hdr);
95	return -EMSGSIZE;
96}
97
98int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
99{
100	sock_net(skb->sk)->smc.limit_smc_hs = true;
101	return 0;
102}
103
104int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
105{
106	sock_net(skb->sk)->smc.limit_smc_hs = false;
107	return 0;
108}
109
110static void smc_set_keepalive(struct sock *sk, int val)
111{
112	struct smc_sock *smc = smc_sk(sk);
113
114	smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
115}
116
117static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
118					  struct sk_buff *skb,
119					  struct request_sock *req,
120					  struct dst_entry *dst,
121					  struct request_sock *req_unhash,
122					  bool *own_req)
123{
124	struct smc_sock *smc;
125	struct sock *child;
126
127	smc = smc_clcsock_user_data(sk);
128
129	if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
130				sk->sk_max_ack_backlog)
131		goto drop;
132
133	if (sk_acceptq_is_full(&smc->sk)) {
134		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
135		goto drop;
136	}
137
138	/* passthrough to original syn recv sock fct */
139	child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
140					       own_req);
141	/* child must not inherit smc or its ops */
142	if (child) {
143		rcu_assign_sk_user_data(child, NULL);
144
145		/* v4-mapped sockets don't inherit parent ops. Don't restore. */
146		if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
147			inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
148	}
149	return child;
150
151drop:
152	dst_release(dst);
153	tcp_listendrop(sk);
154	return NULL;
155}
156
157static bool smc_hs_congested(const struct sock *sk)
158{
159	const struct smc_sock *smc;
160
161	smc = smc_clcsock_user_data(sk);
162
163	if (!smc)
164		return true;
165
166	if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
167		return true;
168
169	return false;
170}
171
172static struct smc_hashinfo smc_v4_hashinfo = {
173	.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
174};
175
176static struct smc_hashinfo smc_v6_hashinfo = {
177	.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
178};
179
180int smc_hash_sk(struct sock *sk)
181{
182	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
183	struct hlist_head *head;
184
185	head = &h->ht;
186
187	write_lock_bh(&h->lock);
188	sk_add_node(sk, head);
189	write_unlock_bh(&h->lock);
190	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
191
192	return 0;
193}
194EXPORT_SYMBOL_GPL(smc_hash_sk);
195
196void smc_unhash_sk(struct sock *sk)
197{
198	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
199
200	write_lock_bh(&h->lock);
201	if (sk_del_node_init(sk))
202		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
203	write_unlock_bh(&h->lock);
204}
205EXPORT_SYMBOL_GPL(smc_unhash_sk);
206
207/* This will be called before user really release sock_lock. So do the
208 * work which we didn't do because of user hold the sock_lock in the
209 * BH context
210 */
211static void smc_release_cb(struct sock *sk)
212{
213	struct smc_sock *smc = smc_sk(sk);
214
215	if (smc->conn.tx_in_release_sock) {
216		smc_tx_pending(&smc->conn);
217		smc->conn.tx_in_release_sock = false;
218	}
219}
220
221struct proto smc_proto = {
222	.name		= "SMC",
223	.owner		= THIS_MODULE,
224	.keepalive	= smc_set_keepalive,
225	.hash		= smc_hash_sk,
226	.unhash		= smc_unhash_sk,
227	.release_cb	= smc_release_cb,
228	.obj_size	= sizeof(struct smc_sock),
229	.h.smc_hash	= &smc_v4_hashinfo,
230	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
231};
232EXPORT_SYMBOL_GPL(smc_proto);
233
234struct proto smc_proto6 = {
235	.name		= "SMC6",
236	.owner		= THIS_MODULE,
237	.keepalive	= smc_set_keepalive,
238	.hash		= smc_hash_sk,
239	.unhash		= smc_unhash_sk,
240	.release_cb	= smc_release_cb,
241	.obj_size	= sizeof(struct smc_sock),
242	.h.smc_hash	= &smc_v6_hashinfo,
243	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
244};
245EXPORT_SYMBOL_GPL(smc_proto6);
246
247static void smc_fback_restore_callbacks(struct smc_sock *smc)
248{
249	struct sock *clcsk = smc->clcsock->sk;
250
251	write_lock_bh(&clcsk->sk_callback_lock);
252	clcsk->sk_user_data = NULL;
253
254	smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
255	smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
256	smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
257	smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
258
259	write_unlock_bh(&clcsk->sk_callback_lock);
260}
261
262static void smc_restore_fallback_changes(struct smc_sock *smc)
263{
264	if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
265		smc->clcsock->file->private_data = smc->sk.sk_socket;
266		smc->clcsock->file = NULL;
267		smc_fback_restore_callbacks(smc);
268	}
269}
270
271static int __smc_release(struct smc_sock *smc)
272{
273	struct sock *sk = &smc->sk;
274	int rc = 0;
275
276	if (!smc->use_fallback) {
277		rc = smc_close_active(smc);
278		smc_sock_set_flag(sk, SOCK_DEAD);
279		sk->sk_shutdown |= SHUTDOWN_MASK;
280	} else {
281		if (sk->sk_state != SMC_CLOSED) {
282			if (sk->sk_state != SMC_LISTEN &&
283			    sk->sk_state != SMC_INIT)
284				sock_put(sk); /* passive closing */
285			if (sk->sk_state == SMC_LISTEN) {
286				/* wake up clcsock accept */
287				rc = kernel_sock_shutdown(smc->clcsock,
288							  SHUT_RDWR);
289			}
290			sk->sk_state = SMC_CLOSED;
291			sk->sk_state_change(sk);
292		}
293		smc_restore_fallback_changes(smc);
294	}
295
296	sk->sk_prot->unhash(sk);
297
298	if (sk->sk_state == SMC_CLOSED) {
299		if (smc->clcsock) {
300			release_sock(sk);
301			smc_clcsock_release(smc);
302			lock_sock(sk);
303		}
304		if (!smc->use_fallback)
305			smc_conn_free(&smc->conn);
306	}
307
308	return rc;
309}
310
311static int smc_release(struct socket *sock)
312{
313	struct sock *sk = sock->sk;
314	struct smc_sock *smc;
315	int old_state, rc = 0;
316
317	if (!sk)
318		goto out;
319
320	sock_hold(sk); /* sock_put below */
321	smc = smc_sk(sk);
322
323	old_state = sk->sk_state;
324
325	/* cleanup for a dangling non-blocking connect */
326	if (smc->connect_nonblock && old_state == SMC_INIT)
327		tcp_abort(smc->clcsock->sk, ECONNABORTED);
328
329	if (cancel_work_sync(&smc->connect_work))
330		sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
331
332	if (sk->sk_state == SMC_LISTEN)
333		/* smc_close_non_accepted() is called and acquires
334		 * sock lock for child sockets again
335		 */
336		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
337	else
338		lock_sock(sk);
339
340	if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
341	    !smc->use_fallback)
342		smc_close_active_abort(smc);
343
344	rc = __smc_release(smc);
345
346	/* detach socket */
347	sock_orphan(sk);
348	sock->sk = NULL;
349	release_sock(sk);
350
351	sock_put(sk); /* sock_hold above */
352	sock_put(sk); /* final sock_put */
353out:
354	return rc;
355}
356
357static void smc_destruct(struct sock *sk)
358{
359	if (sk->sk_state != SMC_CLOSED)
360		return;
361	if (!sock_flag(sk, SOCK_DEAD))
362		return;
363}
364
365static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
366				   int protocol)
367{
368	struct smc_sock *smc;
369	struct proto *prot;
370	struct sock *sk;
371
372	prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
373	sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
374	if (!sk)
375		return NULL;
376
377	sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
378	sk->sk_state = SMC_INIT;
379	sk->sk_destruct = smc_destruct;
380	sk->sk_protocol = protocol;
381	WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
382	WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
383	smc = smc_sk(sk);
384	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
385	INIT_WORK(&smc->connect_work, smc_connect_work);
386	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
387	INIT_LIST_HEAD(&smc->accept_q);
388	spin_lock_init(&smc->accept_q_lock);
389	spin_lock_init(&smc->conn.send_lock);
390	sk->sk_prot->hash(sk);
391	mutex_init(&smc->clcsock_release_lock);
392	smc_init_saved_callbacks(smc);
393
394	return sk;
395}
396
397static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
398		    int addr_len)
399{
400	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
401	struct sock *sk = sock->sk;
402	struct smc_sock *smc;
403	int rc;
404
405	smc = smc_sk(sk);
406
407	/* replicate tests from inet_bind(), to be safe wrt. future changes */
408	rc = -EINVAL;
409	if (addr_len < sizeof(struct sockaddr_in))
410		goto out;
411
412	rc = -EAFNOSUPPORT;
413	if (addr->sin_family != AF_INET &&
414	    addr->sin_family != AF_INET6 &&
415	    addr->sin_family != AF_UNSPEC)
416		goto out;
417	/* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
418	if (addr->sin_family == AF_UNSPEC &&
419	    addr->sin_addr.s_addr != htonl(INADDR_ANY))
420		goto out;
421
422	lock_sock(sk);
423
424	/* Check if socket is already active */
425	rc = -EINVAL;
426	if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
427		goto out_rel;
428
429	smc->clcsock->sk->sk_reuse = sk->sk_reuse;
430	smc->clcsock->sk->sk_reuseport = sk->sk_reuseport;
431	rc = kernel_bind(smc->clcsock, uaddr, addr_len);
432
433out_rel:
434	release_sock(sk);
435out:
436	return rc;
437}
438
439/* copy only relevant settings and flags of SOL_SOCKET level from smc to
440 * clc socket (since smc is not called for these options from net/core)
441 */
442
443#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
444			     (1UL << SOCK_KEEPOPEN) | \
445			     (1UL << SOCK_LINGER) | \
446			     (1UL << SOCK_BROADCAST) | \
447			     (1UL << SOCK_TIMESTAMP) | \
448			     (1UL << SOCK_DBG) | \
449			     (1UL << SOCK_RCVTSTAMP) | \
450			     (1UL << SOCK_RCVTSTAMPNS) | \
451			     (1UL << SOCK_LOCALROUTE) | \
452			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
453			     (1UL << SOCK_RXQ_OVFL) | \
454			     (1UL << SOCK_WIFI_STATUS) | \
455			     (1UL << SOCK_NOFCS) | \
456			     (1UL << SOCK_FILTER_LOCKED) | \
457			     (1UL << SOCK_TSTAMP_NEW))
458
459/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
460static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
461				     unsigned long mask)
462{
463	struct net *nnet = sock_net(nsk);
464
465	nsk->sk_userlocks = osk->sk_userlocks;
466	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
467		nsk->sk_sndbuf = osk->sk_sndbuf;
468	} else {
469		if (mask == SK_FLAGS_SMC_TO_CLC)
470			WRITE_ONCE(nsk->sk_sndbuf,
471				   READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
472		else
473			WRITE_ONCE(nsk->sk_sndbuf,
474				   2 * READ_ONCE(nnet->smc.sysctl_wmem));
475	}
476	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
477		nsk->sk_rcvbuf = osk->sk_rcvbuf;
478	} else {
479		if (mask == SK_FLAGS_SMC_TO_CLC)
480			WRITE_ONCE(nsk->sk_rcvbuf,
481				   READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
482		else
483			WRITE_ONCE(nsk->sk_rcvbuf,
484				   2 * READ_ONCE(nnet->smc.sysctl_rmem));
485	}
486}
487
488static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
489				   unsigned long mask)
490{
491	/* options we don't get control via setsockopt for */
492	nsk->sk_type = osk->sk_type;
493	nsk->sk_sndtimeo = osk->sk_sndtimeo;
494	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
495	nsk->sk_mark = READ_ONCE(osk->sk_mark);
496	nsk->sk_priority = READ_ONCE(osk->sk_priority);
497	nsk->sk_rcvlowat = osk->sk_rcvlowat;
498	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
499	nsk->sk_err = osk->sk_err;
500
501	nsk->sk_flags &= ~mask;
502	nsk->sk_flags |= osk->sk_flags & mask;
503
504	smc_adjust_sock_bufsizes(nsk, osk, mask);
505}
506
507static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
508{
509	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
510}
511
512#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
513			     (1UL << SOCK_KEEPOPEN) | \
514			     (1UL << SOCK_LINGER) | \
515			     (1UL << SOCK_DBG))
516/* copy only settings and flags relevant for smc from clc to smc socket */
517static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
518{
519	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
520}
521
522/* register the new vzalloced sndbuf on all links */
523static int smcr_lgr_reg_sndbufs(struct smc_link *link,
524				struct smc_buf_desc *snd_desc)
525{
526	struct smc_link_group *lgr = link->lgr;
527	int i, rc = 0;
528
529	if (!snd_desc->is_vm)
530		return -EINVAL;
531
532	/* protect against parallel smcr_link_reg_buf() */
533	down_write(&lgr->llc_conf_mutex);
534	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
535		if (!smc_link_active(&lgr->lnk[i]))
536			continue;
537		rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc);
538		if (rc)
539			break;
540	}
541	up_write(&lgr->llc_conf_mutex);
542	return rc;
543}
544
545/* register the new rmb on all links */
546static int smcr_lgr_reg_rmbs(struct smc_link *link,
547			     struct smc_buf_desc *rmb_desc)
548{
549	struct smc_link_group *lgr = link->lgr;
550	bool do_slow = false;
551	int i, rc = 0;
552
553	rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
554	if (rc)
555		return rc;
556
557	down_read(&lgr->llc_conf_mutex);
558	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
559		if (!smc_link_active(&lgr->lnk[i]))
560			continue;
561		if (!rmb_desc->is_reg_mr[link->link_idx]) {
562			up_read(&lgr->llc_conf_mutex);
563			goto slow_path;
564		}
565	}
566	/* mr register already */
567	goto fast_path;
568slow_path:
569	do_slow = true;
570	/* protect against parallel smc_llc_cli_rkey_exchange() and
571	 * parallel smcr_link_reg_buf()
572	 */
573	down_write(&lgr->llc_conf_mutex);
574	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
575		if (!smc_link_active(&lgr->lnk[i]))
576			continue;
577		rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc);
578		if (rc)
579			goto out;
580	}
581fast_path:
582	/* exchange confirm_rkey msg with peer */
583	rc = smc_llc_do_confirm_rkey(link, rmb_desc);
584	if (rc) {
585		rc = -EFAULT;
586		goto out;
587	}
588	rmb_desc->is_conf_rkey = true;
589out:
590	do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex);
591	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
592	return rc;
593}
594
595static int smcr_clnt_conf_first_link(struct smc_sock *smc)
596{
597	struct smc_link *link = smc->conn.lnk;
598	struct smc_llc_qentry *qentry;
599	int rc;
600
601	/* Receive CONFIRM LINK request from server over RoCE fabric.
602	 * Increasing the client's timeout by twice as much as the server's
603	 * timeout by default can temporarily avoid decline messages of
604	 * both sides crossing or colliding
605	 */
606	qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
607			      SMC_LLC_CONFIRM_LINK);
608	if (!qentry) {
609		struct smc_clc_msg_decline dclc;
610
611		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
612				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
613		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
614	}
615	smc_llc_save_peer_uid(qentry);
616	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
617	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
618	if (rc)
619		return SMC_CLC_DECL_RMBE_EC;
620
621	rc = smc_ib_modify_qp_rts(link);
622	if (rc)
623		return SMC_CLC_DECL_ERR_RDYLNK;
624
625	smc_wr_remember_qp_attr(link);
626
627	/* reg the sndbuf if it was vzalloced */
628	if (smc->conn.sndbuf_desc->is_vm) {
629		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
630			return SMC_CLC_DECL_ERR_REGBUF;
631	}
632
633	/* reg the rmb */
634	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
635		return SMC_CLC_DECL_ERR_REGBUF;
636
637	/* confirm_rkey is implicit on 1st contact */
638	smc->conn.rmb_desc->is_conf_rkey = true;
639
640	/* send CONFIRM LINK response over RoCE fabric */
641	rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
642	if (rc < 0)
643		return SMC_CLC_DECL_TIMEOUT_CL;
644
645	smc_llc_link_active(link);
646	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
647
648	if (link->lgr->max_links > 1) {
649		/* optional 2nd link, receive ADD LINK request from server */
650		qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
651				      SMC_LLC_ADD_LINK);
652		if (!qentry) {
653			struct smc_clc_msg_decline dclc;
654
655			rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
656					      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
657			if (rc == -EAGAIN)
658				rc = 0; /* no DECLINE received, go with one link */
659			return rc;
660		}
661		smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
662		smc_llc_cli_add_link(link, qentry);
663	}
664	return 0;
665}
666
667static bool smc_isascii(char *hostname)
668{
669	int i;
670
671	for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
672		if (!isascii(hostname[i]))
673			return false;
674	return true;
675}
676
677static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
678					struct smc_clc_msg_accept_confirm *clc)
679{
680	struct smc_clc_first_contact_ext *fce;
681	int clc_v2_len;
682
683	if (clc->hdr.version == SMC_V1 ||
684	    !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
685		return;
686
687	if (smc->conn.lgr->is_smcd) {
688		memcpy(smc->conn.lgr->negotiated_eid, clc->d1.eid,
689		       SMC_MAX_EID_LEN);
690		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, d1);
691	} else {
692		memcpy(smc->conn.lgr->negotiated_eid, clc->r1.eid,
693		       SMC_MAX_EID_LEN);
694		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, r1);
695	}
696	fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc) + clc_v2_len);
697	smc->conn.lgr->peer_os = fce->os_type;
698	smc->conn.lgr->peer_smc_release = fce->release;
699	if (smc_isascii(fce->hostname))
700		memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
701		       SMC_MAX_HOSTNAME_LEN);
702}
703
704static void smcr_conn_save_peer_info(struct smc_sock *smc,
705				     struct smc_clc_msg_accept_confirm *clc)
706{
707	int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
708
709	smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
710	smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
711	smc->conn.peer_rmbe_size = bufsize;
712	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
713	smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
714}
715
716static void smcd_conn_save_peer_info(struct smc_sock *smc,
717				     struct smc_clc_msg_accept_confirm *clc)
718{
719	int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
720
721	smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
722	smc->conn.peer_token = ntohll(clc->d0.token);
723	/* msg header takes up space in the buffer */
724	smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
725	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
726	smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
727}
728
729static void smc_conn_save_peer_info(struct smc_sock *smc,
730				    struct smc_clc_msg_accept_confirm *clc)
731{
732	if (smc->conn.lgr->is_smcd)
733		smcd_conn_save_peer_info(smc, clc);
734	else
735		smcr_conn_save_peer_info(smc, clc);
736	smc_conn_save_peer_info_fce(smc, clc);
737}
738
739static void smc_link_save_peer_info(struct smc_link *link,
740				    struct smc_clc_msg_accept_confirm *clc,
741				    struct smc_init_info *ini)
742{
743	link->peer_qpn = ntoh24(clc->r0.qpn);
744	memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
745	memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
746	link->peer_psn = ntoh24(clc->r0.psn);
747	link->peer_mtu = clc->r0.qp_mtu;
748}
749
750static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
751				       struct smc_stats_fback *fback_arr)
752{
753	int cnt;
754
755	for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
756		if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
757			fback_arr[cnt].count++;
758			break;
759		}
760		if (!fback_arr[cnt].fback_code) {
761			fback_arr[cnt].fback_code = smc->fallback_rsn;
762			fback_arr[cnt].count++;
763			break;
764		}
765	}
766}
767
768static void smc_stat_fallback(struct smc_sock *smc)
769{
770	struct net *net = sock_net(&smc->sk);
771
772	mutex_lock(&net->smc.mutex_fback_rsn);
773	if (smc->listen_smc) {
774		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
775		net->smc.fback_rsn->srv_fback_cnt++;
776	} else {
777		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
778		net->smc.fback_rsn->clnt_fback_cnt++;
779	}
780	mutex_unlock(&net->smc.mutex_fback_rsn);
781}
782
783/* must be called under rcu read lock */
784static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
785{
786	struct socket_wq *wq;
787	__poll_t flags;
788
789	wq = rcu_dereference(smc->sk.sk_wq);
790	if (!skwq_has_sleeper(wq))
791		return;
792
793	/* wake up smc sk->sk_wq */
794	if (!key) {
795		/* sk_state_change */
796		wake_up_interruptible_all(&wq->wait);
797	} else {
798		flags = key_to_poll(key);
799		if (flags & (EPOLLIN | EPOLLOUT))
800			/* sk_data_ready or sk_write_space */
801			wake_up_interruptible_sync_poll(&wq->wait, flags);
802		else if (flags & EPOLLERR)
803			/* sk_error_report */
804			wake_up_interruptible_poll(&wq->wait, flags);
805	}
806}
807
808static int smc_fback_mark_woken(wait_queue_entry_t *wait,
809				unsigned int mode, int sync, void *key)
810{
811	struct smc_mark_woken *mark =
812		container_of(wait, struct smc_mark_woken, wait_entry);
813
814	mark->woken = true;
815	mark->key = key;
816	return 0;
817}
818
819static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
820				     void (*clcsock_callback)(struct sock *sk))
821{
822	struct smc_mark_woken mark = { .woken = false };
823	struct socket_wq *wq;
824
825	init_waitqueue_func_entry(&mark.wait_entry,
826				  smc_fback_mark_woken);
827	rcu_read_lock();
828	wq = rcu_dereference(clcsk->sk_wq);
829	if (!wq)
830		goto out;
831	add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
832	clcsock_callback(clcsk);
833	remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
834
835	if (mark.woken)
836		smc_fback_wakeup_waitqueue(smc, mark.key);
837out:
838	rcu_read_unlock();
839}
840
841static void smc_fback_state_change(struct sock *clcsk)
842{
843	struct smc_sock *smc;
844
845	read_lock_bh(&clcsk->sk_callback_lock);
846	smc = smc_clcsock_user_data(clcsk);
847	if (smc)
848		smc_fback_forward_wakeup(smc, clcsk,
849					 smc->clcsk_state_change);
850	read_unlock_bh(&clcsk->sk_callback_lock);
851}
852
853static void smc_fback_data_ready(struct sock *clcsk)
854{
855	struct smc_sock *smc;
856
857	read_lock_bh(&clcsk->sk_callback_lock);
858	smc = smc_clcsock_user_data(clcsk);
859	if (smc)
860		smc_fback_forward_wakeup(smc, clcsk,
861					 smc->clcsk_data_ready);
862	read_unlock_bh(&clcsk->sk_callback_lock);
863}
864
865static void smc_fback_write_space(struct sock *clcsk)
866{
867	struct smc_sock *smc;
868
869	read_lock_bh(&clcsk->sk_callback_lock);
870	smc = smc_clcsock_user_data(clcsk);
871	if (smc)
872		smc_fback_forward_wakeup(smc, clcsk,
873					 smc->clcsk_write_space);
874	read_unlock_bh(&clcsk->sk_callback_lock);
875}
876
877static void smc_fback_error_report(struct sock *clcsk)
878{
879	struct smc_sock *smc;
880
881	read_lock_bh(&clcsk->sk_callback_lock);
882	smc = smc_clcsock_user_data(clcsk);
883	if (smc)
884		smc_fback_forward_wakeup(smc, clcsk,
885					 smc->clcsk_error_report);
886	read_unlock_bh(&clcsk->sk_callback_lock);
887}
888
889static void smc_fback_replace_callbacks(struct smc_sock *smc)
890{
891	struct sock *clcsk = smc->clcsock->sk;
892
893	write_lock_bh(&clcsk->sk_callback_lock);
894	clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
895
896	smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
897			       &smc->clcsk_state_change);
898	smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
899			       &smc->clcsk_data_ready);
900	smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
901			       &smc->clcsk_write_space);
902	smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
903			       &smc->clcsk_error_report);
904
905	write_unlock_bh(&clcsk->sk_callback_lock);
906}
907
908static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
909{
910	int rc = 0;
911
912	mutex_lock(&smc->clcsock_release_lock);
913	if (!smc->clcsock) {
914		rc = -EBADF;
915		goto out;
916	}
917
918	smc->use_fallback = true;
919	smc->fallback_rsn = reason_code;
920	smc_stat_fallback(smc);
921	trace_smc_switch_to_fallback(smc, reason_code);
922	if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
923		smc->clcsock->file = smc->sk.sk_socket->file;
924		smc->clcsock->file->private_data = smc->clcsock;
925		smc->clcsock->wq.fasync_list =
926			smc->sk.sk_socket->wq.fasync_list;
927		smc->sk.sk_socket->wq.fasync_list = NULL;
928
929		/* There might be some wait entries remaining
930		 * in smc sk->sk_wq and they should be woken up
931		 * as clcsock's wait queue is woken up.
932		 */
933		smc_fback_replace_callbacks(smc);
934	}
935out:
936	mutex_unlock(&smc->clcsock_release_lock);
937	return rc;
938}
939
940/* fall back during connect */
941static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
942{
943	struct net *net = sock_net(&smc->sk);
944	int rc = 0;
945
946	rc = smc_switch_to_fallback(smc, reason_code);
947	if (rc) { /* fallback fails */
948		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
949		if (smc->sk.sk_state == SMC_INIT)
950			sock_put(&smc->sk); /* passive closing */
951		return rc;
952	}
953	smc_copy_sock_settings_to_clc(smc);
954	smc->connect_nonblock = 0;
955	if (smc->sk.sk_state == SMC_INIT)
956		smc->sk.sk_state = SMC_ACTIVE;
957	return 0;
958}
959
960/* decline and fall back during connect */
961static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
962					u8 version)
963{
964	struct net *net = sock_net(&smc->sk);
965	int rc;
966
967	if (reason_code < 0) { /* error, fallback is not possible */
968		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
969		if (smc->sk.sk_state == SMC_INIT)
970			sock_put(&smc->sk); /* passive closing */
971		return reason_code;
972	}
973	if (reason_code != SMC_CLC_DECL_PEERDECL) {
974		rc = smc_clc_send_decline(smc, reason_code, version);
975		if (rc < 0) {
976			this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
977			if (smc->sk.sk_state == SMC_INIT)
978				sock_put(&smc->sk); /* passive closing */
979			return rc;
980		}
981	}
982	return smc_connect_fallback(smc, reason_code);
983}
984
985static void smc_conn_abort(struct smc_sock *smc, int local_first)
986{
987	struct smc_connection *conn = &smc->conn;
988	struct smc_link_group *lgr = conn->lgr;
989	bool lgr_valid = false;
990
991	if (smc_conn_lgr_valid(conn))
992		lgr_valid = true;
993
994	smc_conn_free(conn);
995	if (local_first && lgr_valid)
996		smc_lgr_cleanup_early(lgr);
997}
998
999/* check if there is a rdma device available for this connection. */
1000/* called for connect and listen */
1001static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
1002{
1003	/* PNET table look up: search active ib_device and port
1004	 * within same PNETID that also contains the ethernet device
1005	 * used for the internal TCP socket
1006	 */
1007	smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
1008	if (!ini->check_smcrv2 && !ini->ib_dev)
1009		return SMC_CLC_DECL_NOSMCRDEV;
1010	if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
1011		return SMC_CLC_DECL_NOSMCRDEV;
1012	return 0;
1013}
1014
1015/* check if there is an ISM device available for this connection. */
1016/* called for connect and listen */
1017static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
1018{
1019	/* Find ISM device with same PNETID as connecting interface  */
1020	smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
1021	if (!ini->ism_dev[0])
1022		return SMC_CLC_DECL_NOSMCDDEV;
1023	else
1024		ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
1025	return 0;
1026}
1027
1028/* is chid unique for the ism devices that are already determined? */
1029static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
1030					   int cnt)
1031{
1032	int i = (!ini->ism_dev[0]) ? 1 : 0;
1033
1034	for (; i < cnt; i++)
1035		if (ini->ism_chid[i] == chid)
1036			return false;
1037	return true;
1038}
1039
1040/* determine possible V2 ISM devices (either without PNETID or with PNETID plus
1041 * PNETID matching net_device)
1042 */
1043static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
1044				       struct smc_init_info *ini)
1045{
1046	int rc = SMC_CLC_DECL_NOSMCDDEV;
1047	struct smcd_dev *smcd;
1048	int i = 1, entry = 1;
1049	bool is_emulated;
1050	u16 chid;
1051
1052	if (smcd_indicated(ini->smc_type_v1))
1053		rc = 0;		/* already initialized for V1 */
1054	mutex_lock(&smcd_dev_list.mutex);
1055	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1056		if (smcd->going_away || smcd == ini->ism_dev[0])
1057			continue;
1058		chid = smc_ism_get_chid(smcd);
1059		if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
1060			continue;
1061		is_emulated = __smc_ism_is_emulated(chid);
1062		if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
1063		    smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
1064			if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
1065				/* It's the last GID-CHID entry left in CLC
1066				 * Proposal SMC-Dv2 extension, but an Emulated-
1067				 * ISM device will take two entries. So give
1068				 * up it and try the next potential ISM device.
1069				 */
1070				continue;
1071			ini->ism_dev[i] = smcd;
1072			ini->ism_chid[i] = chid;
1073			ini->is_smcd = true;
1074			rc = 0;
1075			i++;
1076			entry = is_emulated ? entry + 2 : entry + 1;
1077			if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
1078				break;
1079		}
1080	}
1081	mutex_unlock(&smcd_dev_list.mutex);
1082	ini->ism_offered_cnt = i - 1;
1083	if (!ini->ism_dev[0] && !ini->ism_dev[1])
1084		ini->smcd_version = 0;
1085
1086	return rc;
1087}
1088
1089/* Check for VLAN ID and register it on ISM device just for CLC handshake */
1090static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
1091				      struct smc_init_info *ini)
1092{
1093	if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
1094		return SMC_CLC_DECL_ISMVLANERR;
1095	return 0;
1096}
1097
1098static int smc_find_proposal_devices(struct smc_sock *smc,
1099				     struct smc_init_info *ini)
1100{
1101	int rc = 0;
1102
1103	/* check if there is an ism device available */
1104	if (!(ini->smcd_version & SMC_V1) ||
1105	    smc_find_ism_device(smc, ini) ||
1106	    smc_connect_ism_vlan_setup(smc, ini))
1107		ini->smcd_version &= ~SMC_V1;
1108	/* else ISM V1 is supported for this connection */
1109
1110	/* check if there is an rdma device available */
1111	if (!(ini->smcr_version & SMC_V1) ||
1112	    smc_find_rdma_device(smc, ini))
1113		ini->smcr_version &= ~SMC_V1;
1114	/* else RDMA is supported for this connection */
1115
1116	ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
1117					      ini->smcr_version & SMC_V1);
1118
1119	/* check if there is an ism v2 device available */
1120	if (!(ini->smcd_version & SMC_V2) ||
1121	    !smc_ism_is_v2_capable() ||
1122	    smc_find_ism_v2_device_clnt(smc, ini))
1123		ini->smcd_version &= ~SMC_V2;
1124
1125	/* check if there is an rdma v2 device available */
1126	ini->check_smcrv2 = true;
1127	ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
1128	if (!(ini->smcr_version & SMC_V2) ||
1129	    smc->clcsock->sk->sk_family != AF_INET ||
1130	    !smc_clc_ueid_count() ||
1131	    smc_find_rdma_device(smc, ini))
1132		ini->smcr_version &= ~SMC_V2;
1133	ini->check_smcrv2 = false;
1134
1135	ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
1136					      ini->smcr_version & SMC_V2);
1137
1138	/* if neither ISM nor RDMA are supported, fallback */
1139	if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
1140		rc = SMC_CLC_DECL_NOSMCDEV;
1141
1142	return rc;
1143}
1144
1145/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
1146 * used, the VLAN ID will be registered again during the connection setup.
1147 */
1148static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
1149					struct smc_init_info *ini)
1150{
1151	if (!smcd_indicated(ini->smc_type_v1))
1152		return 0;
1153	if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
1154		return SMC_CLC_DECL_CNFERR;
1155	return 0;
1156}
1157
1158#define SMC_CLC_MAX_ACCEPT_LEN \
1159	(sizeof(struct smc_clc_msg_accept_confirm) + \
1160	 sizeof(struct smc_clc_first_contact_ext_v2x) + \
1161	 sizeof(struct smc_clc_msg_trail))
1162
1163/* CLC handshake during connect */
1164static int smc_connect_clc(struct smc_sock *smc,
1165			   struct smc_clc_msg_accept_confirm *aclc,
1166			   struct smc_init_info *ini)
1167{
1168	int rc = 0;
1169
1170	/* do inband token exchange */
1171	rc = smc_clc_send_proposal(smc, ini);
1172	if (rc)
1173		return rc;
1174	/* receive SMC Accept CLC message */
1175	return smc_clc_wait_msg(smc, aclc, SMC_CLC_MAX_ACCEPT_LEN,
1176				SMC_CLC_ACCEPT, CLC_WAIT_TIME);
1177}
1178
1179void smc_fill_gid_list(struct smc_link_group *lgr,
1180		       struct smc_gidlist *gidlist,
1181		       struct smc_ib_device *known_dev, u8 *known_gid)
1182{
1183	struct smc_init_info *alt_ini = NULL;
1184
1185	memset(gidlist, 0, sizeof(*gidlist));
1186	memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
1187
1188	alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
1189	if (!alt_ini)
1190		goto out;
1191
1192	alt_ini->vlan_id = lgr->vlan_id;
1193	alt_ini->check_smcrv2 = true;
1194	alt_ini->smcrv2.saddr = lgr->saddr;
1195	smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
1196
1197	if (!alt_ini->smcrv2.ib_dev_v2)
1198		goto out;
1199
1200	memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
1201	       SMC_GID_SIZE);
1202
1203out:
1204	kfree(alt_ini);
1205}
1206
1207static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
1208				       struct smc_clc_msg_accept_confirm *aclc,
1209				       struct smc_init_info *ini)
1210{
1211	struct smc_clc_first_contact_ext *fce =
1212		smc_get_clc_first_contact_ext(aclc, false);
1213	struct net *net = sock_net(&smc->sk);
1214	int rc;
1215
1216	if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
1217		return 0;
1218
1219	if (fce->v2_direct) {
1220		memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
1221		ini->smcrv2.uses_gateway = false;
1222	} else {
1223		if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
1224				      smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
1225				      ini->smcrv2.nexthop_mac,
1226				      &ini->smcrv2.uses_gateway))
1227			return SMC_CLC_DECL_NOROUTE;
1228		if (!ini->smcrv2.uses_gateway) {
1229			/* mismatch: peer claims indirect, but its direct */
1230			return SMC_CLC_DECL_NOINDIRECT;
1231		}
1232	}
1233
1234	ini->release_nr = fce->release;
1235	rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1236	if (rc)
1237		return rc;
1238
1239	return 0;
1240}
1241
1242/* setup for RDMA connection of client */
1243static int smc_connect_rdma(struct smc_sock *smc,
1244			    struct smc_clc_msg_accept_confirm *aclc,
1245			    struct smc_init_info *ini)
1246{
1247	int i, reason_code = 0;
1248	struct smc_link *link;
1249	u8 *eid = NULL;
1250
1251	ini->is_smcd = false;
1252	ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
1253	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1254	memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
1255	memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
1256	memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
1257	ini->max_conns = SMC_CONN_PER_LGR_MAX;
1258	ini->max_links = SMC_LINKS_ADD_LNK_MAX;
1259
1260	reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
1261	if (reason_code)
1262		return reason_code;
1263
1264	mutex_lock(&smc_client_lgr_pending);
1265	reason_code = smc_conn_create(smc, ini);
1266	if (reason_code) {
1267		mutex_unlock(&smc_client_lgr_pending);
1268		return reason_code;
1269	}
1270
1271	smc_conn_save_peer_info(smc, aclc);
1272
1273	if (ini->first_contact_local) {
1274		link = smc->conn.lnk;
1275	} else {
1276		/* set link that was assigned by server */
1277		link = NULL;
1278		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1279			struct smc_link *l = &smc->conn.lgr->lnk[i];
1280
1281			if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
1282			    !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
1283				    SMC_GID_SIZE) &&
1284			    (aclc->hdr.version > SMC_V1 ||
1285			     !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
1286				     sizeof(l->peer_mac)))) {
1287				link = l;
1288				break;
1289			}
1290		}
1291		if (!link) {
1292			reason_code = SMC_CLC_DECL_NOSRVLINK;
1293			goto connect_abort;
1294		}
1295		smc_switch_link_and_count(&smc->conn, link);
1296	}
1297
1298	/* create send buffer and rmb */
1299	if (smc_buf_create(smc, false)) {
1300		reason_code = SMC_CLC_DECL_MEM;
1301		goto connect_abort;
1302	}
1303
1304	if (ini->first_contact_local)
1305		smc_link_save_peer_info(link, aclc, ini);
1306
1307	if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
1308		reason_code = SMC_CLC_DECL_ERR_RTOK;
1309		goto connect_abort;
1310	}
1311
1312	smc_close_init(smc);
1313	smc_rx_init(smc);
1314
1315	if (ini->first_contact_local) {
1316		if (smc_ib_ready_link(link)) {
1317			reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1318			goto connect_abort;
1319		}
1320	} else {
1321		/* reg sendbufs if they were vzalloced */
1322		if (smc->conn.sndbuf_desc->is_vm) {
1323			if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
1324				reason_code = SMC_CLC_DECL_ERR_REGBUF;
1325				goto connect_abort;
1326			}
1327		}
1328		if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
1329			reason_code = SMC_CLC_DECL_ERR_REGBUF;
1330			goto connect_abort;
1331		}
1332	}
1333
1334	if (aclc->hdr.version > SMC_V1) {
1335		eid = aclc->r1.eid;
1336		if (ini->first_contact_local)
1337			smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
1338					  link->smcibdev, link->gid);
1339	}
1340
1341	reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
1342					   aclc->hdr.version, eid, ini);
1343	if (reason_code)
1344		goto connect_abort;
1345
1346	smc_tx_init(smc);
1347
1348	if (ini->first_contact_local) {
1349		/* QP confirmation over RoCE fabric */
1350		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1351		reason_code = smcr_clnt_conf_first_link(smc);
1352		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1353		if (reason_code)
1354			goto connect_abort;
1355	}
1356	mutex_unlock(&smc_client_lgr_pending);
1357
1358	smc_copy_sock_settings_to_clc(smc);
1359	smc->connect_nonblock = 0;
1360	if (smc->sk.sk_state == SMC_INIT)
1361		smc->sk.sk_state = SMC_ACTIVE;
1362
1363	return 0;
1364connect_abort:
1365	smc_conn_abort(smc, ini->first_contact_local);
1366	mutex_unlock(&smc_client_lgr_pending);
1367	smc->connect_nonblock = 0;
1368
1369	return reason_code;
1370}
1371
1372/* The server has chosen one of the proposed ISM devices for the communication.
1373 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1374 */
1375static int
1376smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm *aclc,
1377			       struct smc_init_info *ini)
1378{
1379	int i;
1380
1381	for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1382		if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
1383			ini->ism_selected = i;
1384			return 0;
1385		}
1386	}
1387
1388	return -EPROTO;
1389}
1390
1391/* setup for ISM connection of client */
1392static int smc_connect_ism(struct smc_sock *smc,
1393			   struct smc_clc_msg_accept_confirm *aclc,
1394			   struct smc_init_info *ini)
1395{
1396	u8 *eid = NULL;
1397	int rc = 0;
1398
1399	ini->is_smcd = true;
1400	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1401
1402	if (aclc->hdr.version == SMC_V2) {
1403		if (ini->first_contact_peer) {
1404			struct smc_clc_first_contact_ext *fce =
1405				smc_get_clc_first_contact_ext(aclc, true);
1406
1407			ini->release_nr = fce->release;
1408			rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1409			if (rc)
1410				return rc;
1411		}
1412
1413		rc = smc_v2_determine_accepted_chid(aclc, ini);
1414		if (rc)
1415			return rc;
1416
1417		if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
1418			ini->ism_peer_gid[ini->ism_selected].gid_ext =
1419						ntohll(aclc->d1.gid_ext);
1420		/* for non-Emulated-ISM devices, peer gid_ext remains 0. */
1421	}
1422	ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
1423
1424	/* there is only one lgr role for SMC-D; use server lock */
1425	mutex_lock(&smc_server_lgr_pending);
1426	rc = smc_conn_create(smc, ini);
1427	if (rc) {
1428		mutex_unlock(&smc_server_lgr_pending);
1429		return rc;
1430	}
1431
1432	/* Create send and receive buffers */
1433	rc = smc_buf_create(smc, true);
1434	if (rc) {
1435		rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1436		goto connect_abort;
1437	}
1438
1439	smc_conn_save_peer_info(smc, aclc);
1440	smc_close_init(smc);
1441	smc_rx_init(smc);
1442	smc_tx_init(smc);
1443
1444	if (aclc->hdr.version > SMC_V1)
1445		eid = aclc->d1.eid;
1446
1447	rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1448				  aclc->hdr.version, eid, ini);
1449	if (rc)
1450		goto connect_abort;
1451	mutex_unlock(&smc_server_lgr_pending);
1452
1453	smc_copy_sock_settings_to_clc(smc);
1454	smc->connect_nonblock = 0;
1455	if (smc->sk.sk_state == SMC_INIT)
1456		smc->sk.sk_state = SMC_ACTIVE;
1457
1458	return 0;
1459connect_abort:
1460	smc_conn_abort(smc, ini->first_contact_local);
1461	mutex_unlock(&smc_server_lgr_pending);
1462	smc->connect_nonblock = 0;
1463
1464	return rc;
1465}
1466
1467/* check if received accept type and version matches a proposed one */
1468static int smc_connect_check_aclc(struct smc_init_info *ini,
1469				  struct smc_clc_msg_accept_confirm *aclc)
1470{
1471	if (aclc->hdr.typev1 != SMC_TYPE_R &&
1472	    aclc->hdr.typev1 != SMC_TYPE_D)
1473		return SMC_CLC_DECL_MODEUNSUPP;
1474
1475	if (aclc->hdr.version >= SMC_V2) {
1476		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1477		     !smcr_indicated(ini->smc_type_v2)) ||
1478		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1479		     !smcd_indicated(ini->smc_type_v2)))
1480			return SMC_CLC_DECL_MODEUNSUPP;
1481	} else {
1482		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1483		     !smcr_indicated(ini->smc_type_v1)) ||
1484		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1485		     !smcd_indicated(ini->smc_type_v1)))
1486			return SMC_CLC_DECL_MODEUNSUPP;
1487	}
1488
1489	return 0;
1490}
1491
1492/* perform steps before actually connecting */
1493static int __smc_connect(struct smc_sock *smc)
1494{
1495	u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1496	struct smc_clc_msg_accept_confirm *aclc;
1497	struct smc_init_info *ini = NULL;
1498	u8 *buf = NULL;
1499	int rc = 0;
1500
1501	if (smc->use_fallback)
1502		return smc_connect_fallback(smc, smc->fallback_rsn);
1503
1504	/* if peer has not signalled SMC-capability, fall back */
1505	if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1506		return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1507
1508	/* IPSec connections opt out of SMC optimizations */
1509	if (using_ipsec(smc))
1510		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1511						    version);
1512
1513	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1514	if (!ini)
1515		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1516						    version);
1517
1518	ini->smcd_version = SMC_V1 | SMC_V2;
1519	ini->smcr_version = SMC_V1 | SMC_V2;
1520	ini->smc_type_v1 = SMC_TYPE_B;
1521	ini->smc_type_v2 = SMC_TYPE_B;
1522
1523	/* get vlan id from IP device */
1524	if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1525		ini->smcd_version &= ~SMC_V1;
1526		ini->smcr_version = 0;
1527		ini->smc_type_v1 = SMC_TYPE_N;
1528		if (!ini->smcd_version) {
1529			rc = SMC_CLC_DECL_GETVLANERR;
1530			goto fallback;
1531		}
1532	}
1533
1534	rc = smc_find_proposal_devices(smc, ini);
1535	if (rc)
1536		goto fallback;
1537
1538	buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1539	if (!buf) {
1540		rc = SMC_CLC_DECL_MEM;
1541		goto fallback;
1542	}
1543	aclc = (struct smc_clc_msg_accept_confirm *)buf;
1544
1545	/* perform CLC handshake */
1546	rc = smc_connect_clc(smc, aclc, ini);
1547	if (rc) {
1548		/* -EAGAIN on timeout, see tcp_recvmsg() */
1549		if (rc == -EAGAIN) {
1550			rc = -ETIMEDOUT;
1551			smc->sk.sk_err = ETIMEDOUT;
1552		}
1553		goto vlan_cleanup;
1554	}
1555
1556	/* check if smc modes and versions of CLC proposal and accept match */
1557	rc = smc_connect_check_aclc(ini, aclc);
1558	version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1559	if (rc)
1560		goto vlan_cleanup;
1561
1562	/* depending on previous steps, connect using rdma or ism */
1563	if (aclc->hdr.typev1 == SMC_TYPE_R) {
1564		ini->smcr_version = version;
1565		rc = smc_connect_rdma(smc, aclc, ini);
1566	} else if (aclc->hdr.typev1 == SMC_TYPE_D) {
1567		ini->smcd_version = version;
1568		rc = smc_connect_ism(smc, aclc, ini);
1569	}
1570	if (rc)
1571		goto vlan_cleanup;
1572
1573	SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1574	smc_connect_ism_vlan_cleanup(smc, ini);
1575	kfree(buf);
1576	kfree(ini);
1577	return 0;
1578
1579vlan_cleanup:
1580	smc_connect_ism_vlan_cleanup(smc, ini);
1581	kfree(buf);
1582fallback:
1583	kfree(ini);
1584	return smc_connect_decline_fallback(smc, rc, version);
1585}
1586
1587static void smc_connect_work(struct work_struct *work)
1588{
1589	struct smc_sock *smc = container_of(work, struct smc_sock,
1590					    connect_work);
1591	long timeo = smc->sk.sk_sndtimeo;
1592	int rc = 0;
1593
1594	if (!timeo)
1595		timeo = MAX_SCHEDULE_TIMEOUT;
1596	lock_sock(smc->clcsock->sk);
1597	if (smc->clcsock->sk->sk_err) {
1598		smc->sk.sk_err = smc->clcsock->sk->sk_err;
1599	} else if ((1 << smc->clcsock->sk->sk_state) &
1600					(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1601		rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1602		if ((rc == -EPIPE) &&
1603		    ((1 << smc->clcsock->sk->sk_state) &
1604					(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1605			rc = 0;
1606	}
1607	release_sock(smc->clcsock->sk);
1608	lock_sock(&smc->sk);
1609	if (rc != 0 || smc->sk.sk_err) {
1610		smc->sk.sk_state = SMC_CLOSED;
1611		if (rc == -EPIPE || rc == -EAGAIN)
1612			smc->sk.sk_err = EPIPE;
1613		else if (rc == -ECONNREFUSED)
1614			smc->sk.sk_err = ECONNREFUSED;
1615		else if (signal_pending(current))
1616			smc->sk.sk_err = -sock_intr_errno(timeo);
1617		sock_put(&smc->sk); /* passive closing */
1618		goto out;
1619	}
1620
1621	rc = __smc_connect(smc);
1622	if (rc < 0)
1623		smc->sk.sk_err = -rc;
1624
1625out:
1626	if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1627		if (smc->sk.sk_err) {
1628			smc->sk.sk_state_change(&smc->sk);
1629		} else { /* allow polling before and after fallback decision */
1630			smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1631			smc->sk.sk_write_space(&smc->sk);
1632		}
1633	}
1634	release_sock(&smc->sk);
1635}
1636
1637static int smc_connect(struct socket *sock, struct sockaddr *addr,
1638		       int alen, int flags)
1639{
1640	struct sock *sk = sock->sk;
1641	struct smc_sock *smc;
1642	int rc = -EINVAL;
1643
1644	smc = smc_sk(sk);
1645
1646	/* separate smc parameter checking to be safe */
1647	if (alen < sizeof(addr->sa_family))
1648		goto out_err;
1649	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1650		goto out_err;
1651
1652	lock_sock(sk);
1653	switch (sock->state) {
1654	default:
1655		rc = -EINVAL;
1656		goto out;
1657	case SS_CONNECTED:
1658		rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL;
1659		goto out;
1660	case SS_CONNECTING:
1661		if (sk->sk_state == SMC_ACTIVE)
1662			goto connected;
1663		break;
1664	case SS_UNCONNECTED:
1665		sock->state = SS_CONNECTING;
1666		break;
1667	}
1668
1669	switch (sk->sk_state) {
1670	default:
1671		goto out;
1672	case SMC_CLOSED:
1673		rc = sock_error(sk) ? : -ECONNABORTED;
1674		sock->state = SS_UNCONNECTED;
1675		goto out;
1676	case SMC_ACTIVE:
1677		rc = -EISCONN;
1678		goto out;
1679	case SMC_INIT:
1680		break;
1681	}
1682
1683	smc_copy_sock_settings_to_clc(smc);
1684	tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1685	if (smc->connect_nonblock) {
1686		rc = -EALREADY;
1687		goto out;
1688	}
1689	rc = kernel_connect(smc->clcsock, addr, alen, flags);
1690	if (rc && rc != -EINPROGRESS)
1691		goto out;
1692
1693	if (smc->use_fallback) {
1694		sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
1695		goto out;
1696	}
1697	sock_hold(&smc->sk); /* sock put in passive closing */
1698	if (flags & O_NONBLOCK) {
1699		if (queue_work(smc_hs_wq, &smc->connect_work))
1700			smc->connect_nonblock = 1;
1701		rc = -EINPROGRESS;
1702		goto out;
1703	} else {
1704		rc = __smc_connect(smc);
1705		if (rc < 0)
1706			goto out;
1707	}
1708
1709connected:
1710	rc = 0;
1711	sock->state = SS_CONNECTED;
1712out:
1713	release_sock(sk);
1714out_err:
1715	return rc;
1716}
1717
1718static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1719{
1720	struct socket *new_clcsock = NULL;
1721	struct sock *lsk = &lsmc->sk;
1722	struct sock *new_sk;
1723	int rc = -EINVAL;
1724
1725	release_sock(lsk);
1726	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1727	if (!new_sk) {
1728		rc = -ENOMEM;
1729		lsk->sk_err = ENOMEM;
1730		*new_smc = NULL;
1731		lock_sock(lsk);
1732		goto out;
1733	}
1734	*new_smc = smc_sk(new_sk);
1735
1736	mutex_lock(&lsmc->clcsock_release_lock);
1737	if (lsmc->clcsock)
1738		rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1739	mutex_unlock(&lsmc->clcsock_release_lock);
1740	lock_sock(lsk);
1741	if  (rc < 0 && rc != -EAGAIN)
1742		lsk->sk_err = -rc;
1743	if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1744		new_sk->sk_prot->unhash(new_sk);
1745		if (new_clcsock)
1746			sock_release(new_clcsock);
1747		new_sk->sk_state = SMC_CLOSED;
1748		smc_sock_set_flag(new_sk, SOCK_DEAD);
1749		sock_put(new_sk); /* final */
1750		*new_smc = NULL;
1751		goto out;
1752	}
1753
1754	/* new clcsock has inherited the smc listen-specific sk_data_ready
1755	 * function; switch it back to the original sk_data_ready function
1756	 */
1757	new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1758
1759	/* if new clcsock has also inherited the fallback-specific callback
1760	 * functions, switch them back to the original ones.
1761	 */
1762	if (lsmc->use_fallback) {
1763		if (lsmc->clcsk_state_change)
1764			new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
1765		if (lsmc->clcsk_write_space)
1766			new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
1767		if (lsmc->clcsk_error_report)
1768			new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
1769	}
1770
1771	(*new_smc)->clcsock = new_clcsock;
1772out:
1773	return rc;
1774}
1775
1776/* add a just created sock to the accept queue of the listen sock as
1777 * candidate for a following socket accept call from user space
1778 */
1779static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1780{
1781	struct smc_sock *par = smc_sk(parent);
1782
1783	sock_hold(sk); /* sock_put in smc_accept_unlink () */
1784	spin_lock(&par->accept_q_lock);
1785	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1786	spin_unlock(&par->accept_q_lock);
1787	sk_acceptq_added(parent);
1788}
1789
1790/* remove a socket from the accept queue of its parental listening socket */
1791static void smc_accept_unlink(struct sock *sk)
1792{
1793	struct smc_sock *par = smc_sk(sk)->listen_smc;
1794
1795	spin_lock(&par->accept_q_lock);
1796	list_del_init(&smc_sk(sk)->accept_q);
1797	spin_unlock(&par->accept_q_lock);
1798	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1799	sock_put(sk); /* sock_hold in smc_accept_enqueue */
1800}
1801
1802/* remove a sock from the accept queue to bind it to a new socket created
1803 * for a socket accept call from user space
1804 */
1805struct sock *smc_accept_dequeue(struct sock *parent,
1806				struct socket *new_sock)
1807{
1808	struct smc_sock *isk, *n;
1809	struct sock *new_sk;
1810
1811	list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1812		new_sk = (struct sock *)isk;
1813
1814		smc_accept_unlink(new_sk);
1815		if (new_sk->sk_state == SMC_CLOSED) {
1816			new_sk->sk_prot->unhash(new_sk);
1817			if (isk->clcsock) {
1818				sock_release(isk->clcsock);
1819				isk->clcsock = NULL;
1820			}
1821			sock_put(new_sk); /* final */
1822			continue;
1823		}
1824		if (new_sock) {
1825			sock_graft(new_sk, new_sock);
1826			new_sock->state = SS_CONNECTED;
1827			if (isk->use_fallback) {
1828				smc_sk(new_sk)->clcsock->file = new_sock->file;
1829				isk->clcsock->file->private_data = isk->clcsock;
1830			}
1831		}
1832		return new_sk;
1833	}
1834	return NULL;
1835}
1836
1837/* clean up for a created but never accepted sock */
1838void smc_close_non_accepted(struct sock *sk)
1839{
1840	struct smc_sock *smc = smc_sk(sk);
1841
1842	sock_hold(sk); /* sock_put below */
1843	lock_sock(sk);
1844	if (!sk->sk_lingertime)
1845		/* wait for peer closing */
1846		WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
1847	__smc_release(smc);
1848	release_sock(sk);
1849	sock_put(sk); /* sock_hold above */
1850	sock_put(sk); /* final sock_put */
1851}
1852
1853static int smcr_serv_conf_first_link(struct smc_sock *smc)
1854{
1855	struct smc_link *link = smc->conn.lnk;
1856	struct smc_llc_qentry *qentry;
1857	int rc;
1858
1859	/* reg the sndbuf if it was vzalloced*/
1860	if (smc->conn.sndbuf_desc->is_vm) {
1861		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
1862			return SMC_CLC_DECL_ERR_REGBUF;
1863	}
1864
1865	/* reg the rmb */
1866	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
1867		return SMC_CLC_DECL_ERR_REGBUF;
1868
1869	/* send CONFIRM LINK request to client over the RoCE fabric */
1870	rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1871	if (rc < 0)
1872		return SMC_CLC_DECL_TIMEOUT_CL;
1873
1874	/* receive CONFIRM LINK response from client over the RoCE fabric */
1875	qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1876			      SMC_LLC_CONFIRM_LINK);
1877	if (!qentry) {
1878		struct smc_clc_msg_decline dclc;
1879
1880		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1881				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1882		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1883	}
1884	smc_llc_save_peer_uid(qentry);
1885	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1886	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1887	if (rc)
1888		return SMC_CLC_DECL_RMBE_EC;
1889
1890	/* confirm_rkey is implicit on 1st contact */
1891	smc->conn.rmb_desc->is_conf_rkey = true;
1892
1893	smc_llc_link_active(link);
1894	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1895
1896	if (link->lgr->max_links > 1) {
1897		down_write(&link->lgr->llc_conf_mutex);
1898		/* initial contact - try to establish second link */
1899		smc_llc_srv_add_link(link, NULL);
1900		up_write(&link->lgr->llc_conf_mutex);
1901	}
1902	return 0;
1903}
1904
1905/* listen worker: finish */
1906static void smc_listen_out(struct smc_sock *new_smc)
1907{
1908	struct smc_sock *lsmc = new_smc->listen_smc;
1909	struct sock *newsmcsk = &new_smc->sk;
1910
1911	if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
1912		atomic_dec(&lsmc->queued_smc_hs);
1913
1914	if (lsmc->sk.sk_state == SMC_LISTEN) {
1915		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1916		smc_accept_enqueue(&lsmc->sk, newsmcsk);
1917		release_sock(&lsmc->sk);
1918	} else { /* no longer listening */
1919		smc_close_non_accepted(newsmcsk);
1920	}
1921
1922	/* Wake up accept */
1923	lsmc->sk.sk_data_ready(&lsmc->sk);
1924	sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1925}
1926
1927/* listen worker: finish in state connected */
1928static void smc_listen_out_connected(struct smc_sock *new_smc)
1929{
1930	struct sock *newsmcsk = &new_smc->sk;
1931
1932	if (newsmcsk->sk_state == SMC_INIT)
1933		newsmcsk->sk_state = SMC_ACTIVE;
1934
1935	smc_listen_out(new_smc);
1936}
1937
1938/* listen worker: finish in error state */
1939static void smc_listen_out_err(struct smc_sock *new_smc)
1940{
1941	struct sock *newsmcsk = &new_smc->sk;
1942	struct net *net = sock_net(newsmcsk);
1943
1944	this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1945	if (newsmcsk->sk_state == SMC_INIT)
1946		sock_put(&new_smc->sk); /* passive closing */
1947	newsmcsk->sk_state = SMC_CLOSED;
1948
1949	smc_listen_out(new_smc);
1950}
1951
1952/* listen worker: decline and fall back if possible */
1953static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1954			       int local_first, u8 version)
1955{
1956	/* RDMA setup failed, switch back to TCP */
1957	smc_conn_abort(new_smc, local_first);
1958	if (reason_code < 0 ||
1959	    smc_switch_to_fallback(new_smc, reason_code)) {
1960		/* error, no fallback possible */
1961		smc_listen_out_err(new_smc);
1962		return;
1963	}
1964	if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1965		if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1966			smc_listen_out_err(new_smc);
1967			return;
1968		}
1969	}
1970	smc_listen_out_connected(new_smc);
1971}
1972
1973/* listen worker: version checking */
1974static int smc_listen_v2_check(struct smc_sock *new_smc,
1975			       struct smc_clc_msg_proposal *pclc,
1976			       struct smc_init_info *ini)
1977{
1978	struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1979	struct smc_clc_v2_extension *pclc_v2_ext;
1980	int rc = SMC_CLC_DECL_PEERNOSMC;
1981
1982	ini->smc_type_v1 = pclc->hdr.typev1;
1983	ini->smc_type_v2 = pclc->hdr.typev2;
1984	ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1985	ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1986	if (pclc->hdr.version > SMC_V1) {
1987		if (smcd_indicated(ini->smc_type_v2))
1988			ini->smcd_version |= SMC_V2;
1989		if (smcr_indicated(ini->smc_type_v2))
1990			ini->smcr_version |= SMC_V2;
1991	}
1992	if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
1993		rc = SMC_CLC_DECL_PEERNOSMC;
1994		goto out;
1995	}
1996	pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1997	if (!pclc_v2_ext) {
1998		ini->smcd_version &= ~SMC_V2;
1999		ini->smcr_version &= ~SMC_V2;
2000		rc = SMC_CLC_DECL_NOV2EXT;
2001		goto out;
2002	}
2003	pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
2004	if (ini->smcd_version & SMC_V2) {
2005		if (!smc_ism_is_v2_capable()) {
2006			ini->smcd_version &= ~SMC_V2;
2007			rc = SMC_CLC_DECL_NOISM2SUPP;
2008		} else if (!pclc_smcd_v2_ext) {
2009			ini->smcd_version &= ~SMC_V2;
2010			rc = SMC_CLC_DECL_NOV2DEXT;
2011		} else if (!pclc_v2_ext->hdr.eid_cnt &&
2012			   !pclc_v2_ext->hdr.flag.seid) {
2013			ini->smcd_version &= ~SMC_V2;
2014			rc = SMC_CLC_DECL_NOUEID;
2015		}
2016	}
2017	if (ini->smcr_version & SMC_V2) {
2018		if (!pclc_v2_ext->hdr.eid_cnt) {
2019			ini->smcr_version &= ~SMC_V2;
2020			rc = SMC_CLC_DECL_NOUEID;
2021		}
2022	}
2023
2024	ini->release_nr = pclc_v2_ext->hdr.flag.release;
2025	if (pclc_v2_ext->hdr.flag.release > SMC_RELEASE)
2026		ini->release_nr = SMC_RELEASE;
2027
2028out:
2029	if (!ini->smcd_version && !ini->smcr_version)
2030		return rc;
2031
2032	return 0;
2033}
2034
2035/* listen worker: check prefixes */
2036static int smc_listen_prfx_check(struct smc_sock *new_smc,
2037				 struct smc_clc_msg_proposal *pclc)
2038{
2039	struct smc_clc_msg_proposal_prefix *pclc_prfx;
2040	struct socket *newclcsock = new_smc->clcsock;
2041
2042	if (pclc->hdr.typev1 == SMC_TYPE_N)
2043		return 0;
2044	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
2045	if (smc_clc_prfx_match(newclcsock, pclc_prfx))
2046		return SMC_CLC_DECL_DIFFPREFIX;
2047
2048	return 0;
2049}
2050
2051/* listen worker: initialize connection and buffers */
2052static int smc_listen_rdma_init(struct smc_sock *new_smc,
2053				struct smc_init_info *ini)
2054{
2055	int rc;
2056
2057	/* allocate connection / link group */
2058	rc = smc_conn_create(new_smc, ini);
2059	if (rc)
2060		return rc;
2061
2062	/* create send buffer and rmb */
2063	if (smc_buf_create(new_smc, false)) {
2064		smc_conn_abort(new_smc, ini->first_contact_local);
2065		return SMC_CLC_DECL_MEM;
2066	}
2067
2068	return 0;
2069}
2070
2071/* listen worker: initialize connection and buffers for SMC-D */
2072static int smc_listen_ism_init(struct smc_sock *new_smc,
2073			       struct smc_init_info *ini)
2074{
2075	int rc;
2076
2077	rc = smc_conn_create(new_smc, ini);
2078	if (rc)
2079		return rc;
2080
2081	/* Create send and receive buffers */
2082	rc = smc_buf_create(new_smc, true);
2083	if (rc) {
2084		smc_conn_abort(new_smc, ini->first_contact_local);
2085		return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
2086					 SMC_CLC_DECL_MEM;
2087	}
2088
2089	return 0;
2090}
2091
2092static bool smc_is_already_selected(struct smcd_dev *smcd,
2093				    struct smc_init_info *ini,
2094				    int matches)
2095{
2096	int i;
2097
2098	for (i = 0; i < matches; i++)
2099		if (smcd == ini->ism_dev[i])
2100			return true;
2101
2102	return false;
2103}
2104
2105/* check for ISM devices matching proposed ISM devices */
2106static void smc_check_ism_v2_match(struct smc_init_info *ini,
2107				   u16 proposed_chid,
2108				   struct smcd_gid *proposed_gid,
2109				   unsigned int *matches)
2110{
2111	struct smcd_dev *smcd;
2112
2113	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2114		if (smcd->going_away)
2115			continue;
2116		if (smc_is_already_selected(smcd, ini, *matches))
2117			continue;
2118		if (smc_ism_get_chid(smcd) == proposed_chid &&
2119		    !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
2120			ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
2121			if (__smc_ism_is_emulated(proposed_chid))
2122				ini->ism_peer_gid[*matches].gid_ext =
2123							proposed_gid->gid_ext;
2124				/* non-Emulated-ISM's peer gid_ext remains 0. */
2125			ini->ism_dev[*matches] = smcd;
2126			(*matches)++;
2127			break;
2128		}
2129	}
2130}
2131
2132static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
2133{
2134	if (!ini->rc)
2135		ini->rc = rc;
2136}
2137
2138static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
2139					struct smc_clc_msg_proposal *pclc,
2140					struct smc_init_info *ini)
2141{
2142	struct smc_clc_smcd_v2_extension *smcd_v2_ext;
2143	struct smc_clc_v2_extension *smc_v2_ext;
2144	struct smc_clc_msg_smcd *pclc_smcd;
2145	unsigned int matches = 0;
2146	struct smcd_gid smcd_gid;
2147	u8 smcd_version;
2148	u8 *eid = NULL;
2149	int i, rc;
2150	u16 chid;
2151
2152	if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
2153		goto not_found;
2154
2155	pclc_smcd = smc_get_clc_msg_smcd(pclc);
2156	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2157	smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
2158
2159	mutex_lock(&smcd_dev_list.mutex);
2160	if (pclc_smcd->ism.chid) {
2161		/* check for ISM device matching proposed native ISM device */
2162		smcd_gid.gid = ntohll(pclc_smcd->ism.gid);
2163		smcd_gid.gid_ext = 0;
2164		smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
2165				       &smcd_gid, &matches);
2166	}
2167	for (i = 0; i < smc_v2_ext->hdr.ism_gid_cnt; i++) {
2168		/* check for ISM devices matching proposed non-native ISM
2169		 * devices
2170		 */
2171		smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
2172		smcd_gid.gid_ext = 0;
2173		chid = ntohs(smcd_v2_ext->gidchid[i].chid);
2174		if (__smc_ism_is_emulated(chid)) {
2175			if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
2176			    chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
2177				/* each Emulated-ISM device takes two GID-CHID
2178				 * entries and CHID of the second entry repeats
2179				 * that of the first entry.
2180				 *
2181				 * So check if the next GID-CHID entry exists
2182				 * and both two entries' CHIDs are the same.
2183				 */
2184				continue;
2185			smcd_gid.gid_ext =
2186				ntohll(smcd_v2_ext->gidchid[++i].gid);
2187		}
2188		smc_check_ism_v2_match(ini, chid, &smcd_gid, &matches);
2189	}
2190	mutex_unlock(&smcd_dev_list.mutex);
2191
2192	if (!ini->ism_dev[0]) {
2193		smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
2194		goto not_found;
2195	}
2196
2197	smc_ism_get_system_eid(&eid);
2198	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
2199			       smcd_v2_ext->system_eid, eid))
2200		goto not_found;
2201
2202	/* separate - outside the smcd_dev_list.lock */
2203	smcd_version = ini->smcd_version;
2204	for (i = 0; i < matches; i++) {
2205		ini->smcd_version = SMC_V2;
2206		ini->is_smcd = true;
2207		ini->ism_selected = i;
2208		rc = smc_listen_ism_init(new_smc, ini);
2209		if (rc) {
2210			smc_find_ism_store_rc(rc, ini);
2211			/* try next active ISM device */
2212			continue;
2213		}
2214		return; /* matching and usable V2 ISM device found */
2215	}
2216	/* no V2 ISM device could be initialized */
2217	ini->smcd_version = smcd_version;	/* restore original value */
2218	ini->negotiated_eid[0] = 0;
2219
2220not_found:
2221	ini->smcd_version &= ~SMC_V2;
2222	ini->ism_dev[0] = NULL;
2223	ini->is_smcd = false;
2224}
2225
2226static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
2227					struct smc_clc_msg_proposal *pclc,
2228					struct smc_init_info *ini)
2229{
2230	struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
2231	int rc = 0;
2232
2233	/* check if ISM V1 is available */
2234	if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
2235		goto not_found;
2236	ini->is_smcd = true; /* prepare ISM check */
2237	ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
2238	ini->ism_peer_gid[0].gid_ext = 0;
2239	rc = smc_find_ism_device(new_smc, ini);
2240	if (rc)
2241		goto not_found;
2242	ini->ism_selected = 0;
2243	rc = smc_listen_ism_init(new_smc, ini);
2244	if (!rc)
2245		return;		/* V1 ISM device found */
2246
2247not_found:
2248	smc_find_ism_store_rc(rc, ini);
2249	ini->smcd_version &= ~SMC_V1;
2250	ini->ism_dev[0] = NULL;
2251	ini->is_smcd = false;
2252}
2253
2254/* listen worker: register buffers */
2255static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
2256{
2257	struct smc_connection *conn = &new_smc->conn;
2258
2259	if (!local_first) {
2260		/* reg sendbufs if they were vzalloced */
2261		if (conn->sndbuf_desc->is_vm) {
2262			if (smcr_lgr_reg_sndbufs(conn->lnk,
2263						 conn->sndbuf_desc))
2264				return SMC_CLC_DECL_ERR_REGBUF;
2265		}
2266		if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
2267			return SMC_CLC_DECL_ERR_REGBUF;
2268	}
2269
2270	return 0;
2271}
2272
2273static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
2274					 struct smc_clc_msg_proposal *pclc,
2275					 struct smc_init_info *ini)
2276{
2277	struct smc_clc_v2_extension *smc_v2_ext;
2278	u8 smcr_version;
2279	int rc;
2280
2281	if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
2282		goto not_found;
2283
2284	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2285	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
2286		goto not_found;
2287
2288	/* prepare RDMA check */
2289	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2290	memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
2291	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2292	ini->check_smcrv2 = true;
2293	ini->smcrv2.clc_sk = new_smc->clcsock->sk;
2294	ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
2295	ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
2296	rc = smc_find_rdma_device(new_smc, ini);
2297	if (rc) {
2298		smc_find_ism_store_rc(rc, ini);
2299		goto not_found;
2300	}
2301	if (!ini->smcrv2.uses_gateway)
2302		memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
2303
2304	smcr_version = ini->smcr_version;
2305	ini->smcr_version = SMC_V2;
2306	rc = smc_listen_rdma_init(new_smc, ini);
2307	if (!rc) {
2308		rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2309		if (rc)
2310			smc_conn_abort(new_smc, ini->first_contact_local);
2311	}
2312	if (!rc)
2313		return;
2314	ini->smcr_version = smcr_version;
2315	smc_find_ism_store_rc(rc, ini);
2316
2317not_found:
2318	ini->smcr_version &= ~SMC_V2;
2319	ini->smcrv2.ib_dev_v2 = NULL;
2320	ini->check_smcrv2 = false;
2321}
2322
2323static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
2324					struct smc_clc_msg_proposal *pclc,
2325					struct smc_init_info *ini)
2326{
2327	int rc;
2328
2329	if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
2330		return SMC_CLC_DECL_NOSMCDEV;
2331
2332	/* prepare RDMA check */
2333	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2334	memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
2335	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2336	rc = smc_find_rdma_device(new_smc, ini);
2337	if (rc) {
2338		/* no RDMA device found */
2339		return SMC_CLC_DECL_NOSMCDEV;
2340	}
2341	rc = smc_listen_rdma_init(new_smc, ini);
2342	if (rc)
2343		return rc;
2344	return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2345}
2346
2347/* determine the local device matching to proposal */
2348static int smc_listen_find_device(struct smc_sock *new_smc,
2349				  struct smc_clc_msg_proposal *pclc,
2350				  struct smc_init_info *ini)
2351{
2352	int prfx_rc;
2353
2354	/* check for ISM device matching V2 proposed device */
2355	smc_find_ism_v2_device_serv(new_smc, pclc, ini);
2356	if (ini->ism_dev[0])
2357		return 0;
2358
2359	/* check for matching IP prefix and subnet length (V1) */
2360	prfx_rc = smc_listen_prfx_check(new_smc, pclc);
2361	if (prfx_rc)
2362		smc_find_ism_store_rc(prfx_rc, ini);
2363
2364	/* get vlan id from IP device */
2365	if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
2366		return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
2367
2368	/* check for ISM device matching V1 proposed device */
2369	if (!prfx_rc)
2370		smc_find_ism_v1_device_serv(new_smc, pclc, ini);
2371	if (ini->ism_dev[0])
2372		return 0;
2373
2374	if (!smcr_indicated(pclc->hdr.typev1) &&
2375	    !smcr_indicated(pclc->hdr.typev2))
2376		/* skip RDMA and decline */
2377		return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
2378
2379	/* check if RDMA V2 is available */
2380	smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
2381	if (ini->smcrv2.ib_dev_v2)
2382		return 0;
2383
2384	/* check if RDMA V1 is available */
2385	if (!prfx_rc) {
2386		int rc;
2387
2388		rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
2389		smc_find_ism_store_rc(rc, ini);
2390		return (!rc) ? 0 : ini->rc;
2391	}
2392	return prfx_rc;
2393}
2394
2395/* listen worker: finish RDMA setup */
2396static int smc_listen_rdma_finish(struct smc_sock *new_smc,
2397				  struct smc_clc_msg_accept_confirm *cclc,
2398				  bool local_first,
2399				  struct smc_init_info *ini)
2400{
2401	struct smc_link *link = new_smc->conn.lnk;
2402	int reason_code = 0;
2403
2404	if (local_first)
2405		smc_link_save_peer_info(link, cclc, ini);
2406
2407	if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
2408		return SMC_CLC_DECL_ERR_RTOK;
2409
2410	if (local_first) {
2411		if (smc_ib_ready_link(link))
2412			return SMC_CLC_DECL_ERR_RDYLNK;
2413		/* QP confirmation over RoCE fabric */
2414		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
2415		reason_code = smcr_serv_conf_first_link(new_smc);
2416		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
2417	}
2418	return reason_code;
2419}
2420
2421/* setup for connection of server */
2422static void smc_listen_work(struct work_struct *work)
2423{
2424	struct smc_sock *new_smc = container_of(work, struct smc_sock,
2425						smc_listen_work);
2426	struct socket *newclcsock = new_smc->clcsock;
2427	struct smc_clc_msg_accept_confirm *cclc;
2428	struct smc_clc_msg_proposal_area *buf;
2429	struct smc_clc_msg_proposal *pclc;
2430	struct smc_init_info *ini = NULL;
2431	u8 proposal_version = SMC_V1;
2432	u8 accept_version;
2433	int rc = 0;
2434
2435	if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
2436		return smc_listen_out_err(new_smc);
2437
2438	if (new_smc->use_fallback) {
2439		smc_listen_out_connected(new_smc);
2440		return;
2441	}
2442
2443	/* check if peer is smc capable */
2444	if (!tcp_sk(newclcsock->sk)->syn_smc) {
2445		rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
2446		if (rc)
2447			smc_listen_out_err(new_smc);
2448		else
2449			smc_listen_out_connected(new_smc);
2450		return;
2451	}
2452
2453	/* do inband token exchange -
2454	 * wait for and receive SMC Proposal CLC message
2455	 */
2456	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
2457	if (!buf) {
2458		rc = SMC_CLC_DECL_MEM;
2459		goto out_decl;
2460	}
2461	pclc = (struct smc_clc_msg_proposal *)buf;
2462	rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
2463			      SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
2464	if (rc)
2465		goto out_decl;
2466
2467	if (pclc->hdr.version > SMC_V1)
2468		proposal_version = SMC_V2;
2469
2470	/* IPSec connections opt out of SMC optimizations */
2471	if (using_ipsec(new_smc)) {
2472		rc = SMC_CLC_DECL_IPSEC;
2473		goto out_decl;
2474	}
2475
2476	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
2477	if (!ini) {
2478		rc = SMC_CLC_DECL_MEM;
2479		goto out_decl;
2480	}
2481
2482	/* initial version checking */
2483	rc = smc_listen_v2_check(new_smc, pclc, ini);
2484	if (rc)
2485		goto out_decl;
2486
2487	rc = smc_clc_srv_v2x_features_validate(new_smc, pclc, ini);
2488	if (rc)
2489		goto out_decl;
2490
2491	mutex_lock(&smc_server_lgr_pending);
2492	smc_close_init(new_smc);
2493	smc_rx_init(new_smc);
2494	smc_tx_init(new_smc);
2495
2496	/* determine ISM or RoCE device used for connection */
2497	rc = smc_listen_find_device(new_smc, pclc, ini);
2498	if (rc)
2499		goto out_unlock;
2500
2501	/* send SMC Accept CLC message */
2502	accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
2503	rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
2504				 accept_version, ini->negotiated_eid, ini);
2505	if (rc)
2506		goto out_unlock;
2507
2508	/* SMC-D does not need this lock any more */
2509	if (ini->is_smcd)
2510		mutex_unlock(&smc_server_lgr_pending);
2511
2512	/* receive SMC Confirm CLC message */
2513	memset(buf, 0, sizeof(*buf));
2514	cclc = (struct smc_clc_msg_accept_confirm *)buf;
2515	rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
2516			      SMC_CLC_CONFIRM, CLC_WAIT_TIME);
2517	if (rc) {
2518		if (!ini->is_smcd)
2519			goto out_unlock;
2520		goto out_decl;
2521	}
2522
2523	rc = smc_clc_v2x_features_confirm_check(cclc, ini);
2524	if (rc) {
2525		if (!ini->is_smcd)
2526			goto out_unlock;
2527		goto out_decl;
2528	}
2529
2530	/* fce smc release version is needed in smc_listen_rdma_finish,
2531	 * so save fce info here.
2532	 */
2533	smc_conn_save_peer_info_fce(new_smc, cclc);
2534
2535	/* finish worker */
2536	if (!ini->is_smcd) {
2537		rc = smc_listen_rdma_finish(new_smc, cclc,
2538					    ini->first_contact_local, ini);
2539		if (rc)
2540			goto out_unlock;
2541		mutex_unlock(&smc_server_lgr_pending);
2542	}
2543	smc_conn_save_peer_info(new_smc, cclc);
2544	smc_listen_out_connected(new_smc);
2545	SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
2546	goto out_free;
2547
2548out_unlock:
2549	mutex_unlock(&smc_server_lgr_pending);
2550out_decl:
2551	smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
2552			   proposal_version);
2553out_free:
2554	kfree(ini);
2555	kfree(buf);
2556}
2557
2558static void smc_tcp_listen_work(struct work_struct *work)
2559{
2560	struct smc_sock *lsmc = container_of(work, struct smc_sock,
2561					     tcp_listen_work);
2562	struct sock *lsk = &lsmc->sk;
2563	struct smc_sock *new_smc;
2564	int rc = 0;
2565
2566	lock_sock(lsk);
2567	while (lsk->sk_state == SMC_LISTEN) {
2568		rc = smc_clcsock_accept(lsmc, &new_smc);
2569		if (rc) /* clcsock accept queue empty or error */
2570			goto out;
2571		if (!new_smc)
2572			continue;
2573
2574		if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
2575			atomic_inc(&lsmc->queued_smc_hs);
2576
2577		new_smc->listen_smc = lsmc;
2578		new_smc->use_fallback = lsmc->use_fallback;
2579		new_smc->fallback_rsn = lsmc->fallback_rsn;
2580		sock_hold(lsk); /* sock_put in smc_listen_work */
2581		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2582		smc_copy_sock_settings_to_smc(new_smc);
2583		sock_hold(&new_smc->sk); /* sock_put in passive closing */
2584		if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2585			sock_put(&new_smc->sk);
2586	}
2587
2588out:
2589	release_sock(lsk);
2590	sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2591}
2592
2593static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2594{
2595	struct smc_sock *lsmc;
2596
2597	read_lock_bh(&listen_clcsock->sk_callback_lock);
2598	lsmc = smc_clcsock_user_data(listen_clcsock);
2599	if (!lsmc)
2600		goto out;
2601	lsmc->clcsk_data_ready(listen_clcsock);
2602	if (lsmc->sk.sk_state == SMC_LISTEN) {
2603		sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2604		if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
2605			sock_put(&lsmc->sk);
2606	}
2607out:
2608	read_unlock_bh(&listen_clcsock->sk_callback_lock);
2609}
2610
2611static int smc_listen(struct socket *sock, int backlog)
2612{
2613	struct sock *sk = sock->sk;
2614	struct smc_sock *smc;
2615	int rc;
2616
2617	smc = smc_sk(sk);
2618	lock_sock(sk);
2619
2620	rc = -EINVAL;
2621	if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2622	    smc->connect_nonblock || sock->state != SS_UNCONNECTED)
2623		goto out;
2624
2625	rc = 0;
2626	if (sk->sk_state == SMC_LISTEN) {
2627		sk->sk_max_ack_backlog = backlog;
2628		goto out;
2629	}
2630	/* some socket options are handled in core, so we could not apply
2631	 * them to the clc socket -- copy smc socket options to clc socket
2632	 */
2633	smc_copy_sock_settings_to_clc(smc);
2634	if (!smc->use_fallback)
2635		tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2636
2637	/* save original sk_data_ready function and establish
2638	 * smc-specific sk_data_ready function
2639	 */
2640	write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2641	smc->clcsock->sk->sk_user_data =
2642		(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2643	smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
2644			       smc_clcsock_data_ready, &smc->clcsk_data_ready);
2645	write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2646
2647	/* save original ops */
2648	smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
2649
2650	smc->af_ops = *smc->ori_af_ops;
2651	smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
2652
2653	inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
2654
2655	if (smc->limit_smc_hs)
2656		tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
2657
2658	rc = kernel_listen(smc->clcsock, backlog);
2659	if (rc) {
2660		write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2661		smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
2662				       &smc->clcsk_data_ready);
2663		smc->clcsock->sk->sk_user_data = NULL;
2664		write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2665		goto out;
2666	}
2667	sk->sk_max_ack_backlog = backlog;
2668	sk->sk_ack_backlog = 0;
2669	sk->sk_state = SMC_LISTEN;
2670
2671out:
2672	release_sock(sk);
2673	return rc;
2674}
2675
2676static int smc_accept(struct socket *sock, struct socket *new_sock,
2677		      int flags, bool kern)
2678{
2679	struct sock *sk = sock->sk, *nsk;
2680	DECLARE_WAITQUEUE(wait, current);
2681	struct smc_sock *lsmc;
2682	long timeo;
2683	int rc = 0;
2684
2685	lsmc = smc_sk(sk);
2686	sock_hold(sk); /* sock_put below */
2687	lock_sock(sk);
2688
2689	if (lsmc->sk.sk_state != SMC_LISTEN) {
2690		rc = -EINVAL;
2691		release_sock(sk);
2692		goto out;
2693	}
2694
2695	/* Wait for an incoming connection */
2696	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2697	add_wait_queue_exclusive(sk_sleep(sk), &wait);
2698	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2699		set_current_state(TASK_INTERRUPTIBLE);
2700		if (!timeo) {
2701			rc = -EAGAIN;
2702			break;
2703		}
2704		release_sock(sk);
2705		timeo = schedule_timeout(timeo);
2706		/* wakeup by sk_data_ready in smc_listen_work() */
2707		sched_annotate_sleep();
2708		lock_sock(sk);
2709		if (signal_pending(current)) {
2710			rc = sock_intr_errno(timeo);
2711			break;
2712		}
2713	}
2714	set_current_state(TASK_RUNNING);
2715	remove_wait_queue(sk_sleep(sk), &wait);
2716
2717	if (!rc)
2718		rc = sock_error(nsk);
2719	release_sock(sk);
2720	if (rc)
2721		goto out;
2722
2723	if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2724		/* wait till data arrives on the socket */
2725		timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2726								MSEC_PER_SEC);
2727		if (smc_sk(nsk)->use_fallback) {
2728			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2729
2730			lock_sock(clcsk);
2731			if (skb_queue_empty(&clcsk->sk_receive_queue))
2732				sk_wait_data(clcsk, &timeo, NULL);
2733			release_sock(clcsk);
2734		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2735			lock_sock(nsk);
2736			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2737			release_sock(nsk);
2738		}
2739	}
2740
2741out:
2742	sock_put(sk); /* sock_hold above */
2743	return rc;
2744}
2745
2746static int smc_getname(struct socket *sock, struct sockaddr *addr,
2747		       int peer)
2748{
2749	struct smc_sock *smc;
2750
2751	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2752	    (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2753		return -ENOTCONN;
2754
2755	smc = smc_sk(sock->sk);
2756
2757	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2758}
2759
2760static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2761{
2762	struct sock *sk = sock->sk;
2763	struct smc_sock *smc;
2764	int rc;
2765
2766	smc = smc_sk(sk);
2767	lock_sock(sk);
2768
2769	/* SMC does not support connect with fastopen */
2770	if (msg->msg_flags & MSG_FASTOPEN) {
2771		/* not connected yet, fallback */
2772		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2773			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2774			if (rc)
2775				goto out;
2776		} else {
2777			rc = -EINVAL;
2778			goto out;
2779		}
2780	} else if ((sk->sk_state != SMC_ACTIVE) &&
2781		   (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2782		   (sk->sk_state != SMC_INIT)) {
2783		rc = -EPIPE;
2784		goto out;
2785	}
2786
2787	if (smc->use_fallback) {
2788		rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2789	} else {
2790		rc = smc_tx_sendmsg(smc, msg, len);
2791		SMC_STAT_TX_PAYLOAD(smc, len, rc);
2792	}
2793out:
2794	release_sock(sk);
2795	return rc;
2796}
2797
2798static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2799		       int flags)
2800{
2801	struct sock *sk = sock->sk;
2802	struct smc_sock *smc;
2803	int rc = -ENOTCONN;
2804
2805	smc = smc_sk(sk);
2806	lock_sock(sk);
2807	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2808		/* socket was connected before, no more data to read */
2809		rc = 0;
2810		goto out;
2811	}
2812	if ((sk->sk_state == SMC_INIT) ||
2813	    (sk->sk_state == SMC_LISTEN) ||
2814	    (sk->sk_state == SMC_CLOSED))
2815		goto out;
2816
2817	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2818		rc = 0;
2819		goto out;
2820	}
2821
2822	if (smc->use_fallback) {
2823		rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2824	} else {
2825		msg->msg_namelen = 0;
2826		rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2827		SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2828	}
2829
2830out:
2831	release_sock(sk);
2832	return rc;
2833}
2834
2835static __poll_t smc_accept_poll(struct sock *parent)
2836{
2837	struct smc_sock *isk = smc_sk(parent);
2838	__poll_t mask = 0;
2839
2840	spin_lock(&isk->accept_q_lock);
2841	if (!list_empty(&isk->accept_q))
2842		mask = EPOLLIN | EPOLLRDNORM;
2843	spin_unlock(&isk->accept_q_lock);
2844
2845	return mask;
2846}
2847
2848static __poll_t smc_poll(struct file *file, struct socket *sock,
2849			     poll_table *wait)
2850{
2851	struct sock *sk = sock->sk;
2852	struct smc_sock *smc;
2853	__poll_t mask = 0;
2854
2855	if (!sk)
2856		return EPOLLNVAL;
2857
2858	smc = smc_sk(sock->sk);
2859	if (smc->use_fallback) {
2860		/* delegate to CLC child sock */
2861		mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2862		sk->sk_err = smc->clcsock->sk->sk_err;
2863	} else {
2864		if (sk->sk_state != SMC_CLOSED)
2865			sock_poll_wait(file, sock, wait);
2866		if (sk->sk_err)
2867			mask |= EPOLLERR;
2868		if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2869		    (sk->sk_state == SMC_CLOSED))
2870			mask |= EPOLLHUP;
2871		if (sk->sk_state == SMC_LISTEN) {
2872			/* woken up by sk_data_ready in smc_listen_work() */
2873			mask |= smc_accept_poll(sk);
2874		} else if (smc->use_fallback) { /* as result of connect_work()*/
2875			mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2876							   wait);
2877			sk->sk_err = smc->clcsock->sk->sk_err;
2878		} else {
2879			if ((sk->sk_state != SMC_INIT &&
2880			     atomic_read(&smc->conn.sndbuf_space)) ||
2881			    sk->sk_shutdown & SEND_SHUTDOWN) {
2882				mask |= EPOLLOUT | EPOLLWRNORM;
2883			} else {
2884				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2885				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2886			}
2887			if (atomic_read(&smc->conn.bytes_to_rcv))
2888				mask |= EPOLLIN | EPOLLRDNORM;
2889			if (sk->sk_shutdown & RCV_SHUTDOWN)
2890				mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2891			if (sk->sk_state == SMC_APPCLOSEWAIT1)
2892				mask |= EPOLLIN;
2893			if (smc->conn.urg_state == SMC_URG_VALID)
2894				mask |= EPOLLPRI;
2895		}
2896	}
2897
2898	return mask;
2899}
2900
2901static int smc_shutdown(struct socket *sock, int how)
2902{
2903	struct sock *sk = sock->sk;
2904	bool do_shutdown = true;
2905	struct smc_sock *smc;
2906	int rc = -EINVAL;
2907	int old_state;
2908	int rc1 = 0;
2909
2910	smc = smc_sk(sk);
2911
2912	if ((how < SHUT_RD) || (how > SHUT_RDWR))
2913		return rc;
2914
2915	lock_sock(sk);
2916
2917	if (sock->state == SS_CONNECTING) {
2918		if (sk->sk_state == SMC_ACTIVE)
2919			sock->state = SS_CONNECTED;
2920		else if (sk->sk_state == SMC_PEERCLOSEWAIT1 ||
2921			 sk->sk_state == SMC_PEERCLOSEWAIT2 ||
2922			 sk->sk_state == SMC_APPCLOSEWAIT1 ||
2923			 sk->sk_state == SMC_APPCLOSEWAIT2 ||
2924			 sk->sk_state == SMC_APPFINCLOSEWAIT)
2925			sock->state = SS_DISCONNECTING;
2926	}
2927
2928	rc = -ENOTCONN;
2929	if ((sk->sk_state != SMC_ACTIVE) &&
2930	    (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2931	    (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2932	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2933	    (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2934	    (sk->sk_state != SMC_APPFINCLOSEWAIT))
2935		goto out;
2936	if (smc->use_fallback) {
2937		rc = kernel_sock_shutdown(smc->clcsock, how);
2938		sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2939		if (sk->sk_shutdown == SHUTDOWN_MASK) {
2940			sk->sk_state = SMC_CLOSED;
2941			sk->sk_socket->state = SS_UNCONNECTED;
2942			sock_put(sk);
2943		}
2944		goto out;
2945	}
2946	switch (how) {
2947	case SHUT_RDWR:		/* shutdown in both directions */
2948		old_state = sk->sk_state;
2949		rc = smc_close_active(smc);
2950		if (old_state == SMC_ACTIVE &&
2951		    sk->sk_state == SMC_PEERCLOSEWAIT1)
2952			do_shutdown = false;
2953		break;
2954	case SHUT_WR:
2955		rc = smc_close_shutdown_write(smc);
2956		break;
2957	case SHUT_RD:
2958		rc = 0;
2959		/* nothing more to do because peer is not involved */
2960		break;
2961	}
2962	if (do_shutdown && smc->clcsock)
2963		rc1 = kernel_sock_shutdown(smc->clcsock, how);
2964	/* map sock_shutdown_cmd constants to sk_shutdown value range */
2965	sk->sk_shutdown |= how + 1;
2966
2967	if (sk->sk_state == SMC_CLOSED)
2968		sock->state = SS_UNCONNECTED;
2969	else
2970		sock->state = SS_DISCONNECTING;
2971out:
2972	release_sock(sk);
2973	return rc ? rc : rc1;
2974}
2975
2976static int __smc_getsockopt(struct socket *sock, int level, int optname,
2977			    char __user *optval, int __user *optlen)
2978{
2979	struct smc_sock *smc;
2980	int val, len;
2981
2982	smc = smc_sk(sock->sk);
2983
2984	if (get_user(len, optlen))
2985		return -EFAULT;
2986
2987	len = min_t(int, len, sizeof(int));
2988
2989	if (len < 0)
2990		return -EINVAL;
2991
2992	switch (optname) {
2993	case SMC_LIMIT_HS:
2994		val = smc->limit_smc_hs;
2995		break;
2996	default:
2997		return -EOPNOTSUPP;
2998	}
2999
3000	if (put_user(len, optlen))
3001		return -EFAULT;
3002	if (copy_to_user(optval, &val, len))
3003		return -EFAULT;
3004
3005	return 0;
3006}
3007
3008static int __smc_setsockopt(struct socket *sock, int level, int optname,
3009			    sockptr_t optval, unsigned int optlen)
3010{
3011	struct sock *sk = sock->sk;
3012	struct smc_sock *smc;
3013	int val, rc;
3014
3015	smc = smc_sk(sk);
3016
3017	lock_sock(sk);
3018	switch (optname) {
3019	case SMC_LIMIT_HS:
3020		if (optlen < sizeof(int)) {
3021			rc = -EINVAL;
3022			break;
3023		}
3024		if (copy_from_sockptr(&val, optval, sizeof(int))) {
3025			rc = -EFAULT;
3026			break;
3027		}
3028
3029		smc->limit_smc_hs = !!val;
3030		rc = 0;
3031		break;
3032	default:
3033		rc = -EOPNOTSUPP;
3034		break;
3035	}
3036	release_sock(sk);
3037
3038	return rc;
3039}
3040
3041static int smc_setsockopt(struct socket *sock, int level, int optname,
3042			  sockptr_t optval, unsigned int optlen)
3043{
3044	struct sock *sk = sock->sk;
3045	struct smc_sock *smc;
3046	int val, rc;
3047
3048	if (level == SOL_TCP && optname == TCP_ULP)
3049		return -EOPNOTSUPP;
3050	else if (level == SOL_SMC)
3051		return __smc_setsockopt(sock, level, optname, optval, optlen);
3052
3053	smc = smc_sk(sk);
3054
3055	/* generic setsockopts reaching us here always apply to the
3056	 * CLC socket
3057	 */
3058	mutex_lock(&smc->clcsock_release_lock);
3059	if (!smc->clcsock) {
3060		mutex_unlock(&smc->clcsock_release_lock);
3061		return -EBADF;
3062	}
3063	if (unlikely(!smc->clcsock->ops->setsockopt))
3064		rc = -EOPNOTSUPP;
3065	else
3066		rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
3067						   optval, optlen);
3068	if (smc->clcsock->sk->sk_err) {
3069		sk->sk_err = smc->clcsock->sk->sk_err;
3070		sk_error_report(sk);
3071	}
3072	mutex_unlock(&smc->clcsock_release_lock);
3073
3074	if (optlen < sizeof(int))
3075		return -EINVAL;
3076	if (copy_from_sockptr(&val, optval, sizeof(int)))
3077		return -EFAULT;
3078
3079	lock_sock(sk);
3080	if (rc || smc->use_fallback)
3081		goto out;
3082	switch (optname) {
3083	case TCP_FASTOPEN:
3084	case TCP_FASTOPEN_CONNECT:
3085	case TCP_FASTOPEN_KEY:
3086	case TCP_FASTOPEN_NO_COOKIE:
3087		/* option not supported by SMC */
3088		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
3089			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
3090		} else {
3091			rc = -EINVAL;
3092		}
3093		break;
3094	case TCP_NODELAY:
3095		if (sk->sk_state != SMC_INIT &&
3096		    sk->sk_state != SMC_LISTEN &&
3097		    sk->sk_state != SMC_CLOSED) {
3098			if (val) {
3099				SMC_STAT_INC(smc, ndly_cnt);
3100				smc_tx_pending(&smc->conn);
3101				cancel_delayed_work(&smc->conn.tx_work);
3102			}
3103		}
3104		break;
3105	case TCP_CORK:
3106		if (sk->sk_state != SMC_INIT &&
3107		    sk->sk_state != SMC_LISTEN &&
3108		    sk->sk_state != SMC_CLOSED) {
3109			if (!val) {
3110				SMC_STAT_INC(smc, cork_cnt);
3111				smc_tx_pending(&smc->conn);
3112				cancel_delayed_work(&smc->conn.tx_work);
3113			}
3114		}
3115		break;
3116	case TCP_DEFER_ACCEPT:
3117		smc->sockopt_defer_accept = val;
3118		break;
3119	default:
3120		break;
3121	}
3122out:
3123	release_sock(sk);
3124
3125	return rc;
3126}
3127
3128static int smc_getsockopt(struct socket *sock, int level, int optname,
3129			  char __user *optval, int __user *optlen)
3130{
3131	struct smc_sock *smc;
3132	int rc;
3133
3134	if (level == SOL_SMC)
3135		return __smc_getsockopt(sock, level, optname, optval, optlen);
3136
3137	smc = smc_sk(sock->sk);
3138	mutex_lock(&smc->clcsock_release_lock);
3139	if (!smc->clcsock) {
3140		mutex_unlock(&smc->clcsock_release_lock);
3141		return -EBADF;
3142	}
3143	/* socket options apply to the CLC socket */
3144	if (unlikely(!smc->clcsock->ops->getsockopt)) {
3145		mutex_unlock(&smc->clcsock_release_lock);
3146		return -EOPNOTSUPP;
3147	}
3148	rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
3149					   optval, optlen);
3150	mutex_unlock(&smc->clcsock_release_lock);
3151	return rc;
3152}
3153
3154static int smc_ioctl(struct socket *sock, unsigned int cmd,
3155		     unsigned long arg)
3156{
3157	union smc_host_cursor cons, urg;
3158	struct smc_connection *conn;
3159	struct smc_sock *smc;
3160	int answ;
3161
3162	smc = smc_sk(sock->sk);
3163	conn = &smc->conn;
3164	lock_sock(&smc->sk);
3165	if (smc->use_fallback) {
3166		if (!smc->clcsock) {
3167			release_sock(&smc->sk);
3168			return -EBADF;
3169		}
3170		answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
3171		release_sock(&smc->sk);
3172		return answ;
3173	}
3174	switch (cmd) {
3175	case SIOCINQ: /* same as FIONREAD */
3176		if (smc->sk.sk_state == SMC_LISTEN) {
3177			release_sock(&smc->sk);
3178			return -EINVAL;
3179		}
3180		if (smc->sk.sk_state == SMC_INIT ||
3181		    smc->sk.sk_state == SMC_CLOSED)
3182			answ = 0;
3183		else
3184			answ = atomic_read(&smc->conn.bytes_to_rcv);
3185		break;
3186	case SIOCOUTQ:
3187		/* output queue size (not send + not acked) */
3188		if (smc->sk.sk_state == SMC_LISTEN) {
3189			release_sock(&smc->sk);
3190			return -EINVAL;
3191		}
3192		if (smc->sk.sk_state == SMC_INIT ||
3193		    smc->sk.sk_state == SMC_CLOSED)
3194			answ = 0;
3195		else
3196			answ = smc->conn.sndbuf_desc->len -
3197					atomic_read(&smc->conn.sndbuf_space);
3198		break;
3199	case SIOCOUTQNSD:
3200		/* output queue size (not send only) */
3201		if (smc->sk.sk_state == SMC_LISTEN) {
3202			release_sock(&smc->sk);
3203			return -EINVAL;
3204		}
3205		if (smc->sk.sk_state == SMC_INIT ||
3206		    smc->sk.sk_state == SMC_CLOSED)
3207			answ = 0;
3208		else
3209			answ = smc_tx_prepared_sends(&smc->conn);
3210		break;
3211	case SIOCATMARK:
3212		if (smc->sk.sk_state == SMC_LISTEN) {
3213			release_sock(&smc->sk);
3214			return -EINVAL;
3215		}
3216		if (smc->sk.sk_state == SMC_INIT ||
3217		    smc->sk.sk_state == SMC_CLOSED) {
3218			answ = 0;
3219		} else {
3220			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
3221			smc_curs_copy(&urg, &conn->urg_curs, conn);
3222			answ = smc_curs_diff(conn->rmb_desc->len,
3223					     &cons, &urg) == 1;
3224		}
3225		break;
3226	default:
3227		release_sock(&smc->sk);
3228		return -ENOIOCTLCMD;
3229	}
3230	release_sock(&smc->sk);
3231
3232	return put_user(answ, (int __user *)arg);
3233}
3234
3235/* Map the affected portions of the rmbe into an spd, note the number of bytes
3236 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
3237 * updates till whenever a respective page has been fully processed.
3238 * Note that subsequent recv() calls have to wait till all splice() processing
3239 * completed.
3240 */
3241static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
3242			       struct pipe_inode_info *pipe, size_t len,
3243			       unsigned int flags)
3244{
3245	struct sock *sk = sock->sk;
3246	struct smc_sock *smc;
3247	int rc = -ENOTCONN;
3248
3249	smc = smc_sk(sk);
3250	lock_sock(sk);
3251	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
3252		/* socket was connected before, no more data to read */
3253		rc = 0;
3254		goto out;
3255	}
3256	if (sk->sk_state == SMC_INIT ||
3257	    sk->sk_state == SMC_LISTEN ||
3258	    sk->sk_state == SMC_CLOSED)
3259		goto out;
3260
3261	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
3262		rc = 0;
3263		goto out;
3264	}
3265
3266	if (smc->use_fallback) {
3267		rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
3268						    pipe, len, flags);
3269	} else {
3270		if (*ppos) {
3271			rc = -ESPIPE;
3272			goto out;
3273		}
3274		if (flags & SPLICE_F_NONBLOCK)
3275			flags = MSG_DONTWAIT;
3276		else
3277			flags = 0;
3278		SMC_STAT_INC(smc, splice_cnt);
3279		rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
3280	}
3281out:
3282	release_sock(sk);
3283
3284	return rc;
3285}
3286
3287/* must look like tcp */
3288static const struct proto_ops smc_sock_ops = {
3289	.family		= PF_SMC,
3290	.owner		= THIS_MODULE,
3291	.release	= smc_release,
3292	.bind		= smc_bind,
3293	.connect	= smc_connect,
3294	.socketpair	= sock_no_socketpair,
3295	.accept		= smc_accept,
3296	.getname	= smc_getname,
3297	.poll		= smc_poll,
3298	.ioctl		= smc_ioctl,
3299	.listen		= smc_listen,
3300	.shutdown	= smc_shutdown,
3301	.setsockopt	= smc_setsockopt,
3302	.getsockopt	= smc_getsockopt,
3303	.sendmsg	= smc_sendmsg,
3304	.recvmsg	= smc_recvmsg,
3305	.mmap		= sock_no_mmap,
3306	.splice_read	= smc_splice_read,
3307};
3308
3309static int __smc_create(struct net *net, struct socket *sock, int protocol,
3310			int kern, struct socket *clcsock)
3311{
3312	int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
3313	struct smc_sock *smc;
3314	struct sock *sk;
3315	int rc;
3316
3317	rc = -ESOCKTNOSUPPORT;
3318	if (sock->type != SOCK_STREAM)
3319		goto out;
3320
3321	rc = -EPROTONOSUPPORT;
3322	if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
3323		goto out;
3324
3325	rc = -ENOBUFS;
3326	sock->ops = &smc_sock_ops;
3327	sock->state = SS_UNCONNECTED;
3328	sk = smc_sock_alloc(net, sock, protocol);
3329	if (!sk)
3330		goto out;
3331
3332	/* create internal TCP socket for CLC handshake and fallback */
3333	smc = smc_sk(sk);
3334	smc->use_fallback = false; /* assume rdma capability first */
3335	smc->fallback_rsn = 0;
3336
3337	/* default behavior from limit_smc_hs in every net namespace */
3338	smc->limit_smc_hs = net->smc.limit_smc_hs;
3339
3340	rc = 0;
3341	if (!clcsock) {
3342		rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
3343				      &smc->clcsock);
3344		if (rc) {
3345			sk_common_release(sk);
3346			goto out;
3347		}
3348
3349		/* smc_clcsock_release() does not wait smc->clcsock->sk's
3350		 * destruction;  its sk_state might not be TCP_CLOSE after
3351		 * smc->sk is close()d, and TCP timers can be fired later,
3352		 * which need net ref.
3353		 */
3354		sk = smc->clcsock->sk;
3355		__netns_tracker_free(net, &sk->ns_tracker, false);
3356		sk->sk_net_refcnt = 1;
3357		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
3358		sock_inuse_add(net, 1);
3359	} else {
3360		smc->clcsock = clcsock;
3361	}
3362
3363out:
3364	return rc;
3365}
3366
3367static int smc_create(struct net *net, struct socket *sock, int protocol,
3368		      int kern)
3369{
3370	return __smc_create(net, sock, protocol, kern, NULL);
3371}
3372
3373static const struct net_proto_family smc_sock_family_ops = {
3374	.family	= PF_SMC,
3375	.owner	= THIS_MODULE,
3376	.create	= smc_create,
3377};
3378
3379static int smc_ulp_init(struct sock *sk)
3380{
3381	struct socket *tcp = sk->sk_socket;
3382	struct net *net = sock_net(sk);
3383	struct socket *smcsock;
3384	int protocol, ret;
3385
3386	/* only TCP can be replaced */
3387	if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
3388	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
3389		return -ESOCKTNOSUPPORT;
3390	/* don't handle wq now */
3391	if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
3392		return -ENOTCONN;
3393
3394	if (sk->sk_family == AF_INET)
3395		protocol = SMCPROTO_SMC;
3396	else
3397		protocol = SMCPROTO_SMC6;
3398
3399	smcsock = sock_alloc();
3400	if (!smcsock)
3401		return -ENFILE;
3402
3403	smcsock->type = SOCK_STREAM;
3404	__module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
3405	ret = __smc_create(net, smcsock, protocol, 1, tcp);
3406	if (ret) {
3407		sock_release(smcsock); /* module_put() which ops won't be NULL */
3408		return ret;
3409	}
3410
3411	/* replace tcp socket to smc */
3412	smcsock->file = tcp->file;
3413	smcsock->file->private_data = smcsock;
3414	smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
3415	smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
3416	tcp->file = NULL;
3417
3418	return ret;
3419}
3420
3421static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
3422			  const gfp_t priority)
3423{
3424	struct inet_connection_sock *icsk = inet_csk(newsk);
3425
3426	/* don't inherit ulp ops to child when listen */
3427	icsk->icsk_ulp_ops = NULL;
3428}
3429
3430static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
3431	.name		= "smc",
3432	.owner		= THIS_MODULE,
3433	.init		= smc_ulp_init,
3434	.clone		= smc_ulp_clone,
3435};
3436
3437unsigned int smc_net_id;
3438
3439static __net_init int smc_net_init(struct net *net)
3440{
3441	int rc;
3442
3443	rc = smc_sysctl_net_init(net);
3444	if (rc)
3445		return rc;
3446	return smc_pnet_net_init(net);
3447}
3448
3449static void __net_exit smc_net_exit(struct net *net)
3450{
3451	smc_sysctl_net_exit(net);
3452	smc_pnet_net_exit(net);
3453}
3454
3455static __net_init int smc_net_stat_init(struct net *net)
3456{
3457	return smc_stats_init(net);
3458}
3459
3460static void __net_exit smc_net_stat_exit(struct net *net)
3461{
3462	smc_stats_exit(net);
3463}
3464
3465static struct pernet_operations smc_net_ops = {
3466	.init = smc_net_init,
3467	.exit = smc_net_exit,
3468	.id   = &smc_net_id,
3469	.size = sizeof(struct smc_net),
3470};
3471
3472static struct pernet_operations smc_net_stat_ops = {
3473	.init = smc_net_stat_init,
3474	.exit = smc_net_stat_exit,
3475};
3476
3477static int __init smc_init(void)
3478{
3479	int rc;
3480
3481	rc = register_pernet_subsys(&smc_net_ops);
3482	if (rc)
3483		return rc;
3484
3485	rc = register_pernet_subsys(&smc_net_stat_ops);
3486	if (rc)
3487		goto out_pernet_subsys;
3488
3489	rc = smc_ism_init();
3490	if (rc)
3491		goto out_pernet_subsys_stat;
3492	smc_clc_init();
3493
3494	rc = smc_nl_init();
3495	if (rc)
3496		goto out_ism;
3497
3498	rc = smc_pnet_init();
3499	if (rc)
3500		goto out_nl;
3501
3502	rc = -ENOMEM;
3503
3504	smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
3505	if (!smc_tcp_ls_wq)
3506		goto out_pnet;
3507
3508	smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
3509	if (!smc_hs_wq)
3510		goto out_alloc_tcp_ls_wq;
3511
3512	smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
3513	if (!smc_close_wq)
3514		goto out_alloc_hs_wq;
3515
3516	rc = smc_core_init();
3517	if (rc) {
3518		pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
3519		goto out_alloc_wqs;
3520	}
3521
3522	rc = smc_llc_init();
3523	if (rc) {
3524		pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
3525		goto out_core;
3526	}
3527
3528	rc = smc_cdc_init();
3529	if (rc) {
3530		pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
3531		goto out_core;
3532	}
3533
3534	rc = proto_register(&smc_proto, 1);
3535	if (rc) {
3536		pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
3537		goto out_core;
3538	}
3539
3540	rc = proto_register(&smc_proto6, 1);
3541	if (rc) {
3542		pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
3543		goto out_proto;
3544	}
3545
3546	rc = sock_register(&smc_sock_family_ops);
3547	if (rc) {
3548		pr_err("%s: sock_register fails with %d\n", __func__, rc);
3549		goto out_proto6;
3550	}
3551	INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
3552	INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
3553
3554	rc = smc_ib_register_client();
3555	if (rc) {
3556		pr_err("%s: ib_register fails with %d\n", __func__, rc);
3557		goto out_sock;
3558	}
3559
3560	rc = tcp_register_ulp(&smc_ulp_ops);
3561	if (rc) {
3562		pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
3563		goto out_ib;
3564	}
3565
3566	static_branch_enable(&tcp_have_smc);
3567	return 0;
3568
3569out_ib:
3570	smc_ib_unregister_client();
3571out_sock:
3572	sock_unregister(PF_SMC);
3573out_proto6:
3574	proto_unregister(&smc_proto6);
3575out_proto:
3576	proto_unregister(&smc_proto);
3577out_core:
3578	smc_core_exit();
3579out_alloc_wqs:
3580	destroy_workqueue(smc_close_wq);
3581out_alloc_hs_wq:
3582	destroy_workqueue(smc_hs_wq);
3583out_alloc_tcp_ls_wq:
3584	destroy_workqueue(smc_tcp_ls_wq);
3585out_pnet:
3586	smc_pnet_exit();
3587out_nl:
3588	smc_nl_exit();
3589out_ism:
3590	smc_clc_exit();
3591	smc_ism_exit();
3592out_pernet_subsys_stat:
3593	unregister_pernet_subsys(&smc_net_stat_ops);
3594out_pernet_subsys:
3595	unregister_pernet_subsys(&smc_net_ops);
3596
3597	return rc;
3598}
3599
3600static void __exit smc_exit(void)
3601{
3602	static_branch_disable(&tcp_have_smc);
3603	tcp_unregister_ulp(&smc_ulp_ops);
3604	sock_unregister(PF_SMC);
3605	smc_core_exit();
3606	smc_ib_unregister_client();
3607	smc_ism_exit();
3608	destroy_workqueue(smc_close_wq);
3609	destroy_workqueue(smc_tcp_ls_wq);
3610	destroy_workqueue(smc_hs_wq);
3611	proto_unregister(&smc_proto6);
3612	proto_unregister(&smc_proto);
3613	smc_pnet_exit();
3614	smc_nl_exit();
3615	smc_clc_exit();
3616	unregister_pernet_subsys(&smc_net_stat_ops);
3617	unregister_pernet_subsys(&smc_net_ops);
3618	rcu_barrier();
3619}
3620
3621module_init(smc_init);
3622module_exit(smc_exit);
3623
3624MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
3625MODULE_DESCRIPTION("smc socket address family");
3626MODULE_LICENSE("GPL");
3627MODULE_ALIAS_NETPROTO(PF_SMC);
3628MODULE_ALIAS_TCP_ULP("smc");
3629MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);
3630