Lines Matching refs:msk

44 	if (subflow_req->msk)
45 sock_put((struct sock *)subflow_req->msk);
61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 return mptcp_is_fully_established((void *)msk) &&
64 ((mptcp_pm_is_userspace(msk) &&
65 mptcp_userspace_pm_active(msk)) ||
66 READ_ONCE(msk->pm.accept_subflow));
72 struct mptcp_sock *msk = subflow_req->msk;
77 subflow_generate_hmac(READ_ONCE(msk->local_key),
78 READ_ONCE(msk->remote_key),
88 struct mptcp_sock *msk;
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 if (!msk) {
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
99 sock_put((struct sock *)msk);
104 return msk;
115 subflow_req->msk = NULL;
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
121 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
211 subflow_req->msk = subflow_token_join_request(req);
214 if (!subflow_req->msk) {
219 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
222 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
223 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
234 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
242 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
243 subflow_req->remote_nonce, subflow_req->msk);
436 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
438 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
444 struct mptcp_sock *msk = mptcp_sk(sk);
445 struct sock *ssk = msk->first;
449 if (!msk->rcvspace_init)
450 mptcp_rcv_space_init(msk, ssk);
456 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
457 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
463 static void subflow_set_remote_key(struct mptcp_sock *msk,
478 WRITE_ONCE(msk->remote_key, subflow->remote_key);
479 WRITE_ONCE(msk->ack_seq, subflow->iasn);
480 WRITE_ONCE(msk->can_ack, true);
481 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
488 struct mptcp_sock *msk = mptcp_sk(sk);
495 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
496 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
497 subflow_set_remote_key(msk, subflow, mp_opt);
503 msk->pending_state = ssk->sk_state;
504 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
514 struct mptcp_sock *msk;
522 msk = mptcp_sk(parent);
534 pr_fallback(msk);
539 WRITE_ONCE(msk->csum_enabled, true);
541 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
580 if (subflow_use_different_dport(msk, sk)) {
606 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
612 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
729 struct mptcp_sock *msk;
732 msk = subflow_req->msk;
733 if (!msk)
736 subflow_generate_hmac(READ_ONCE(msk->remote_key),
737 READ_ONCE(msk->local_key),
774 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
778 subflow_set_remote_key(msk, subflow, mp_opt);
780 WRITE_ONCE(msk->fully_established, true);
783 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
834 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
881 owner = subflow_req->msk;
887 /* move the msk reference ownership to the subflow */
888 subflow_req->msk = NULL;
1073 struct mptcp_sock *msk)
1076 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1125 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1139 mptcp_schedule_work((struct sock *)msk);
1152 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1160 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1239 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1245 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1246 mptcp_schedule_work((struct sock *)msk);
1251 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1255 else if (READ_ONCE(msk->csum_enabled))
1261 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1267 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1273 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1285 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1292 struct mptcp_sock *msk;
1300 msk = mptcp_sk(subflow->conn);
1305 status = get_mapping_status(ssk, msk);
1318 if (unlikely(!READ_ONCE(msk->can_ack)))
1321 old_ack = READ_ONCE(msk->ack_seq);
1323 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1336 subflow_sched_work_if_closed(msk, ssk);
1340 if (!__mptcp_check_fallback(msk)) {
1346 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1351 mptcp_subflow_fail(msk, ssk);
1378 subflow->map_seq = READ_ONCE(msk->ack_seq);
1427 * and msk socket spinlock
1445 struct mptcp_sock *msk;
1449 msk = mptcp_sk(parent);
1461 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1468 * respect the msk-level threshold eventually mandating an immediate ack
1470 if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1550 struct mptcp_sock *msk = mptcp_sk(sk);
1579 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1582 subflow->remote_key = READ_ONCE(msk->remote_key);
1583 subflow->local_key = READ_ONCE(msk->local_key);
1584 subflow->token = msk->token;
1598 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1604 subflow->subflow_id = msk->subflow_id++;
1608 list_add_tail(&subflow->node, &msk->conn_list);
1616 WRITE_ONCE(msk->allow_infinite_fallback, false);
1632 mptcp_pm_close_subflow(msk);
1790 struct mptcp_sock *msk;
1794 msk = mptcp_sk(parent);
1797 pr_fallback(msk);
1814 * ingress data fin, so that the msk state will follow along
1816 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1817 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1842 /* can't acquire the msk socket lock under the subflow one,
1881 /* we are still under the listener msk socket lock */
1943 /* if the msk has been orphaned, keep the ctx