Lines Matching defs:ssk

41 	 * new ssk completes join, inherits options from s0 // seq 2
42 * Needs sync from mptcp join logic, but ssk->seq == msk->seq
44 * Set High order bits to sk_state so ssk->seq == msk->seq test
79 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
80 bool slow = lock_sock_fast(ssk);
84 sock_valbool_flag(ssk, SOCK_DBG, !!val);
87 if (ssk->sk_prot->keepalive)
88 ssk->sk_prot->keepalive(ssk, !!val);
89 sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val);
92 WRITE_ONCE(ssk->sk_priority, val);
96 ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
97 WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
98 mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
102 ssk->sk_userlocks |= SOCK_RCVBUF_LOCK;
103 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
106 if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) {
107 WRITE_ONCE(ssk->sk_mark, sk->sk_mark);
108 sk_dst_reset(ssk);
112 WRITE_ONCE(ssk->sk_incoming_cpu, val);
117 unlock_sock_fast(ssk, slow);
161 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
162 bool slow = lock_sock_fast(ssk);
165 unlock_sock_fast(ssk, slow);
239 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
240 bool slow = lock_sock_fast(ssk);
243 unlock_sock_fast(ssk, slow);
274 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
275 bool slow = lock_sock_fast(ssk);
278 sock_reset_flag(ssk, SOCK_LINGER);
280 ssk->sk_lingertime = sk->sk_lingertime;
281 sock_set_flag(ssk, SOCK_LINGER);
285 unlock_sock_fast(ssk, slow);
296 struct sock *ssk;
305 ssk = __mptcp_nmpc_sk(msk);
306 if (IS_ERR(ssk)) {
308 return PTR_ERR(ssk);
311 ret = sk_setsockopt(ssk, SOL_SOCKET, optname, optval, optlen);
314 sk->sk_reuseport = ssk->sk_reuseport;
316 sk->sk_reuse = ssk->sk_reuse;
318 sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
320 sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
394 struct sock *ssk;
401 ssk = __mptcp_nmpc_sk(msk);
402 if (IS_ERR(ssk)) {
404 return PTR_ERR(ssk);
407 ret = tcp_setsockopt(ssk, SOL_IPV6, optname, optval, optlen);
417 sk->sk_ipv6only = ssk->sk_ipv6only;
421 inet_test_bit(TRANSPARENT, ssk));
425 inet_test_bit(FREEBIND, ssk));
609 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
612 lock_sock(ssk);
613 err = tcp_set_congestion_control(ssk, name, true, cap_net_admin);
617 release_sock(ssk);
635 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
637 lock_sock(ssk);
638 __tcp_sock_set_cork(ssk, !!val);
639 release_sock(ssk);
655 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
657 lock_sock(ssk);
658 __tcp_sock_set_nodelay(ssk, !!val);
659 release_sock(ssk);
670 struct sock *ssk;
679 ssk = __mptcp_nmpc_sk(msk);
680 if (IS_ERR(ssk)) {
682 return PTR_ERR(ssk);
687 inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
690 inet_assign_bit(TRANSPARENT, ssk,
694 inet_assign_bit(BIND_ADDRESS_NO_PORT, ssk,
698 WRITE_ONCE(inet_sk(ssk)->local_port_range,
728 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
731 slow = lock_sock_fast(ssk);
732 __ip_sock_set_tos(ssk, val);
733 unlock_sock_fast(ssk, slow);
760 struct sock *ssk;
765 ssk = __mptcp_nmpc_sk(msk);
766 if (IS_ERR(ssk)) {
767 ret = PTR_ERR(ssk);
771 ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
835 struct sock *ssk;
852 ssk = __mptcp_tcp_fallback(msk);
854 if (ssk)
855 return tcp_setsockopt(ssk, level, optname, optval, optlen);
873 struct sock *ssk;
877 ssk = msk->first;
878 if (ssk) {
879 ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
883 ssk = __mptcp_nmpc_sk(msk);
884 if (IS_ERR(ssk)) {
885 ret = PTR_ERR(ssk);
889 ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
1052 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1059 tcp_get_info(ssk, &info);
1144 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1151 mptcp_get_sub_addrs(ssk, &a);
1252 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1266 mptcp_get_sub_addrs(ssk, &sfinfo.addrs);
1271 tcp_get_info(ssk, &tcp_info);
1389 struct sock *ssk;
1400 ssk = __mptcp_tcp_fallback(msk);
1402 if (ssk)
1403 return tcp_getsockopt(ssk, level, optname, optval, option);
1414 static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
1419 if (ssk->sk_prot->keepalive) {
1421 ssk->sk_prot->keepalive(ssk, 1);
1423 ssk->sk_prot->keepalive(ssk, 0);
1426 ssk->sk_priority = sk->sk_priority;
1427 ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
1428 ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
1429 ssk->sk_ipv6only = sk->sk_ipv6only;
1430 __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
1433 ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
1435 WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
1436 mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
1439 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
1443 ssk->sk_lingertime = sk->sk_lingertime;
1444 sock_set_flag(ssk, SOCK_LINGER);
1446 sock_reset_flag(ssk, SOCK_LINGER);
1449 if (sk->sk_mark != ssk->sk_mark) {
1450 ssk->sk_mark = sk->sk_mark;
1451 sk_dst_reset(ssk);
1454 sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
1456 if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
1457 tcp_set_congestion_control(ssk, msk->ca_name, false, true);
1458 __tcp_sock_set_cork(ssk, !!msk->cork);
1459 __tcp_sock_set_nodelay(ssk, !!msk->nodelay);
1461 inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
1462 inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
1463 inet_assign_bit(BIND_ADDRESS_NO_PORT, ssk, inet_test_bit(BIND_ADDRESS_NO_PORT, sk));
1464 WRITE_ONCE(inet_sk(ssk)->local_port_range, READ_ONCE(inet_sk(sk)->local_port_range));
1467 void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
1469 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1473 ssk->sk_rcvlowat = 0;
1479 tcp_sk(ssk)->notsent_lowat = UINT_MAX;
1482 sync_socket_options(msk, ssk);
1521 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1524 slow = lock_sock_fast(ssk);
1525 WRITE_ONCE(ssk->sk_rcvbuf, space);
1526 tcp_sk(ssk)->window_clamp = val;
1527 unlock_sock_fast(ssk, slow);