Lines Matching refs:msk

302 					* protected by the msk data lock
353 static inline void msk_owned_by_me(const struct mptcp_sock *msk)
355 sock_owned_by_me((const struct sock *)msk);
376 /* the msk socket don't use the backlog, also account for the bulk
396 const struct mptcp_sock *msk = mptcp_sk(sk);
398 return READ_ONCE(msk->first_pending);
403 struct mptcp_sock *msk = mptcp_sk(sk);
406 cur = msk->first_pending;
407 return list_is_last(&cur->list, &msk->rtx_queue) ? NULL :
413 const struct mptcp_sock *msk = mptcp_sk(sk);
415 if (!msk->first_pending)
418 if (WARN_ON_ONCE(list_empty(&msk->rtx_queue)))
421 return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
426 struct mptcp_sock *msk = mptcp_sk(sk);
428 if (msk->snd_una == msk->snd_nxt)
431 return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
457 struct mptcp_sock *msk;
486 unsigned long avg_pacing_rate; /* protected by msk socket lock */
550 int cached_sndbuf; /* sndbuf size when last synced with the msk sndbuf,
551 * protected by the msk socket lock
688 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
703 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk);
726 int mptcp_init_sched(struct mptcp_sock *msk,
728 void mptcp_release_sched(struct mptcp_sock *msk);
731 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
732 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
733 int mptcp_sched_get_send(struct mptcp_sock *msk);
734 int mptcp_sched_get_retrans(struct mptcp_sock *msk);
736 static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
738 return READ_ONCE(msk->bytes_received) - READ_ONCE(msk->bytes_consumed);
800 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
829 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
849 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
850 static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
852 return READ_ONCE(msk->snd_data_fin_enable) &&
853 READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
867 const struct mptcp_sock *msk = mptcp_sk(sk);
870 notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt);
904 /* the msk max wmem limit is <nr_subflows> * tcp wmem[2] */
909 /* The called held both the msk socket and the subflow socket locks,
921 * BH context. Additionally this can be called under the msk data lock,
937 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
951 struct mptcp_sock *msk);
956 void mptcp_token_destroy(struct mptcp_sock *msk);
964 void mptcp_pm_data_init(struct mptcp_sock *msk);
965 void mptcp_pm_data_reset(struct mptcp_sock *msk);
974 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
975 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
976 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
977 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk);
978 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
979 void mptcp_pm_connection_closed(struct mptcp_sock *msk);
980 void mptcp_pm_subflow_established(struct mptcp_sock *msk);
981 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk);
982 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
986 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
988 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
989 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
990 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
994 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
998 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
1000 void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
1001 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
1003 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
1006 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
1008 int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
1011 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
1013 int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
1019 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
1022 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
1023 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
1024 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
1026 void mptcp_free_local_addr_list(struct mptcp_sock *msk);
1028 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
1031 void mptcp_event_addr_removed(const struct mptcp_sock *msk, u8 id);
1034 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
1036 void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
1043 static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
1045 return READ_ONCE(msk->pm.addr_signal) &
1049 static inline bool mptcp_pm_should_add_signal_addr(struct mptcp_sock *msk)
1051 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL);
1054 static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk)
1056 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO);
1059 static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
1061 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL);
1064 static inline bool mptcp_pm_is_userspace(const struct mptcp_sock *msk)
1066 return READ_ONCE(msk->pm.pm_type) == MPTCP_PM_TYPE_USERSPACE;
1069 static inline bool mptcp_pm_is_kernel(const struct mptcp_sock *msk)
1071 return READ_ONCE(msk->pm.pm_type) == MPTCP_PM_TYPE_KERNEL;
1097 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
1101 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
1103 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
1104 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
1105 int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
1126 void mptcp_pm_nl_work(struct mptcp_sock *msk);
1127 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
1129 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
1130 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
1131 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
1132 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
1135 static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
1137 if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
1138 WRITE_ONCE(msk->pm.accept_subflow, true);
1141 static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
1143 spin_lock_bh(&msk->pm.lock);
1144 __mptcp_pm_close_subflow(msk);
1145 spin_unlock_bh(&msk->pm.lock);
1148 void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
1149 void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
1158 static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk)
1160 return test_bit(MPTCP_FALLBACK_DONE, &msk->flags);
1166 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1168 return __mptcp_check_fallback(msk);
1171 static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
1173 if (__mptcp_check_fallback(msk)) {
1174 pr_debug("TCP fallback already done (msk=%p)", msk);
1177 set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
1180 static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
1182 struct sock *ssk = READ_ONCE(msk->first);
1193 struct mptcp_sock *msk;
1195 msk = mptcp_sk(sk);
1196 __mptcp_do_fallback(msk);
1197 if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
1210 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)