Lines Matching refs:msk

52 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
54 return pm_nl_get_pernet(sock_net((struct sock *)msk));
148 const struct mptcp_sock *msk)
152 msk_owned_by_me(msk);
159 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
170 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
177 * Note: removal from the local address list during the msk life-cycle
181 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
194 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
196 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
202 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
204 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
210 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
212 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
218 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
220 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
226 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
228 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
230 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
231 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
233 WRITE_ONCE(msk->pm.work_pending, false);
240 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
245 lockdep_assert_held(&msk->pm.lock);
247 list_for_each_entry(entry, &msk->pm.anno_list, list) {
255 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
263 spin_lock_bh(&msk->pm.lock);
264 list_for_each_entry(entry, &msk->pm.anno_list, list) {
272 spin_unlock_bh(&msk->pm.lock);
279 struct mptcp_sock *msk = entry->sock;
280 struct sock *sk = (struct sock *)msk;
282 pr_debug("msk=%p", msk);
284 if (!msk)
293 if (mptcp_pm_should_add_signal_addr(msk)) {
298 spin_lock_bh(&msk->pm.lock);
300 if (!mptcp_pm_should_add_signal_addr(msk)) {
302 mptcp_pm_announce_addr(msk, &entry->addr, false);
303 mptcp_pm_add_addr_send_ack(msk);
311 spin_unlock_bh(&msk->pm.lock);
314 mptcp_pm_subflow_established(msk);
321 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
325 struct sock *sk = (struct sock *)msk;
327 spin_lock_bh(&msk->pm.lock);
328 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
331 spin_unlock_bh(&msk->pm.lock);
339 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
343 struct sock *sk = (struct sock *)msk;
346 lockdep_assert_held(&msk->pm.lock);
348 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
351 if (mptcp_pm_is_kernel(msk))
363 list_add(&add_entry->list, &msk->pm.anno_list);
366 add_entry->sock = msk;
376 void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
379 struct sock *sk = (struct sock *)msk;
382 pr_debug("msk=%p", msk);
384 spin_lock_bh(&msk->pm.lock);
385 list_splice_init(&msk->pm.anno_list, &free_list);
386 spin_unlock_bh(&msk->pm.lock);
397 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
402 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
403 struct sock *sk = (struct sock *)msk, *ssk;
409 subflows_max = mptcp_pm_get_subflows_max(msk);
422 msk->pm.subflows++;
431 mptcp_for_each_subflow(msk, subflow)
435 mptcp_for_each_subflow(msk, subflow) {
448 if (msk->pm.subflows < subflows_max) {
453 msk->pm.subflows++;
462 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
469 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
482 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
485 spin_unlock_bh(&msk->pm.lock);
486 __mptcp_pm_send_ack(msk, subflow, prio, backup);
487 spin_lock_bh(&msk->pm.lock);
514 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
516 struct sock *sk = (struct sock *)msk;
525 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
526 local_addr_max = mptcp_pm_get_local_addr_max(msk);
527 subflows_max = mptcp_pm_get_subflows_max(msk);
530 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
531 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
536 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
540 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
541 msk->mpc_endpoint_id = entry->addr.id;
547 mptcp_pm_send_ack(msk, subflow, true, backup);
549 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
553 msk->pm.local_addr_used, local_addr_max,
554 msk->pm.add_addr_signaled, add_addr_signal_max,
555 msk->pm.subflows, subflows_max);
558 if (msk->pm.add_addr_signaled < add_addr_signal_max) {
559 local = select_signal_address(pernet, msk);
568 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
572 if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
573 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
574 msk->pm.add_addr_signaled++;
575 mptcp_pm_announce_addr(msk, &local->addr, false);
576 mptcp_pm_nl_addr_send_ack(msk);
582 while (msk->pm.local_addr_used < local_addr_max &&
583 msk->pm.subflows < subflows_max) {
588 local = select_local_address(pernet, msk);
594 msk->pm.local_addr_used++;
595 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
596 nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
600 spin_unlock_bh(&msk->pm.lock);
603 spin_lock_bh(&msk->pm.lock);
605 mptcp_pm_nl_check_work_pending(msk);
608 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
610 mptcp_pm_create_subflow_or_signal_addr(msk);
613 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
615 mptcp_pm_create_subflow_or_signal_addr(msk);
621 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
625 struct sock *sk = (struct sock *)msk;
631 pernet = pm_nl_get_pernet_from_msk(msk);
632 subflows_max = mptcp_pm_get_subflows_max(msk);
642 if (msk->pm.subflows < subflows_max) {
643 msk->pm.subflows++;
666 msk->pm.subflows++;
673 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
676 struct sock *sk = (struct sock *)msk;
683 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
684 subflows_max = mptcp_pm_get_subflows_max(msk);
687 msk->pm.add_addr_accepted, add_addr_accept_max,
688 msk->pm.remote.family);
690 remote = msk->pm.remote;
691 mptcp_pm_announce_addr(msk, &remote, true);
692 mptcp_pm_nl_addr_send_ack(msk);
694 if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
704 nr = fill_local_addresses_vec(msk, &remote, addrs);
708 spin_unlock_bh(&msk->pm.lock);
712 spin_lock_bh(&msk->pm.lock);
715 msk->pm.add_addr_accepted++;
716 if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
717 msk->pm.subflows >= subflows_max)
718 WRITE_ONCE(msk->pm.accept_addr, false);
722 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
726 msk_owned_by_me(msk);
727 lockdep_assert_held(&msk->pm.lock);
729 if (!mptcp_pm_should_add_signal(msk) &&
730 !mptcp_pm_should_rm_signal(msk))
733 subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
735 mptcp_pm_send_ack(msk, subflow, false, false);
738 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
747 mptcp_for_each_subflow(msk, subflow) {
761 __mptcp_pm_send_ack(msk, subflow, true, bkup);
768 static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
770 return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
773 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
778 struct sock *sk = (struct sock *)msk;
784 msk_owned_by_me(msk);
792 if (list_empty(&msk->conn_list))
799 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
807 if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
812 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
813 spin_unlock_bh(&msk->pm.lock);
818 spin_lock_bh(&msk->pm.lock);
825 __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
831 if (!mptcp_pm_is_kernel(msk))
835 msk->pm.add_addr_accepted--;
836 WRITE_ONCE(msk->pm.accept_addr, true);
838 msk->pm.local_addr_used--;
843 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
845 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
848 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
851 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
854 void mptcp_pm_nl_work(struct mptcp_sock *msk)
856 struct mptcp_pm_data *pm = &msk->pm;
858 msk_owned_by_me(msk);
863 spin_lock_bh(&msk->pm.lock);
865 pr_debug("msk=%p status=%x", msk, pm->status);
868 mptcp_pm_nl_add_addr_received(msk);
872 mptcp_pm_nl_addr_send_ack(msk);
876 mptcp_pm_nl_rm_addr_received(msk);
880 mptcp_pm_nl_fully_established(msk);
884 mptcp_pm_nl_subflow_established(msk);
887 spin_unlock_bh(&msk->pm.lock);
1022 /* The subflow socket lock is acquired in a nested to the msk one
1023 * in several places, even by the TCP stack, and this msk is a kernel
1025 * modifiers in several places, re-init the lock class for the msk
1055 * under the msk socket lock. For the moment, that will not bring
1068 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
1074 pernet = pm_nl_get_pernet_from_msk(msk);
1115 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1118 struct sock *sk = (struct sock *)msk;
1130 mptcp_for_each_subflow(msk, iter) {
1144 * is cheap under the msk socket lock
1267 struct mptcp_sock *msk;
1270 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1271 struct sock *sk = (struct sock *)msk;
1273 if (!READ_ONCE(msk->fully_established) ||
1274 mptcp_pm_is_userspace(msk))
1278 spin_lock_bh(&msk->pm.lock);
1279 mptcp_pm_create_subflow_or_signal_addr(msk);
1280 spin_unlock_bh(&msk->pm.lock);
1359 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
1363 struct sock *sk = (struct sock *)msk;
1377 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
1382 entry = mptcp_pm_del_add_timer(msk, addr, false);
1392 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
1401 ret = remove_anno_list_by_saddr(msk, addr);
1403 spin_lock_bh(&msk->pm.lock);
1404 mptcp_pm_remove_addr(msk, &list);
1405 spin_unlock_bh(&msk->pm.lock);
1416 struct mptcp_sock *msk;
1422 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1423 struct sock *sk = (struct sock *)msk;
1426 if (mptcp_pm_is_userspace(msk))
1429 if (list_empty(&msk->conn_list)) {
1430 mptcp_pm_remove_anno_addr(msk, addr, false);
1435 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
1436 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
1439 mptcp_pm_remove_subflow(msk, &list);
1455 struct mptcp_sock *msk;
1459 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1460 struct sock *sk = (struct sock *)msk;
1463 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1466 mptcp_local_address((struct sock_common *)msk, &msk_local);
1471 spin_lock_bh(&msk->pm.lock);
1472 mptcp_pm_remove_addr(msk, &list);
1473 mptcp_pm_nl_rm_subflow_received(msk, &list);
1474 spin_unlock_bh(&msk->pm.lock);
1497 /* the zero id address is special: the first address used by the msk
1533 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
1539 if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
1540 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
1546 spin_lock_bh(&msk->pm.lock);
1547 mptcp_pm_remove_addr(msk, &alist);
1548 spin_unlock_bh(&msk->pm.lock);
1552 static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
1559 if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
1563 if (remove_anno_list_by_saddr(msk, &entry->addr) &&
1569 spin_lock_bh(&msk->pm.lock);
1570 mptcp_pm_remove_addr(msk, &alist);
1571 spin_unlock_bh(&msk->pm.lock);
1574 mptcp_pm_remove_subflow(msk, &slist);
1581 struct mptcp_sock *msk;
1586 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1587 struct sock *sk = (struct sock *)msk;
1589 if (!mptcp_pm_is_userspace(msk)) {
1591 mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
1853 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1860 spin_lock_bh(&msk->pm.lock);
1861 mptcp_pm_nl_rm_subflow_received(msk, &list);
1862 mptcp_pm_create_subflow_or_signal_addr(msk);
1863 spin_unlock_bh(&msk->pm.lock);
1871 struct mptcp_sock *msk;
1874 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1875 struct sock *sk = (struct sock *)msk;
1877 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1882 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup);
1884 mptcp_pm_nl_fullmesh(msk, addr);
1960 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk)
1963 sock_net((const struct sock *)msk),
2017 const struct mptcp_sock *msk,
2020 const struct sock *sk = (const struct sock *)msk;
2024 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
2050 const struct mptcp_sock *msk,
2053 return mptcp_event_put_token_and_ssk(skb, msk, ssk);
2057 const struct mptcp_sock *msk,
2062 if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
2079 const struct mptcp_sock *msk,
2082 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
2087 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
2093 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
2095 struct net *net = sock_net((const struct sock *)msk);
2110 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
2128 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2145 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
2233 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
2236 struct net *net = sock_net((const struct sock *)msk);
2257 if (mptcp_event_created(skb, msk, ssk) < 0)
2261 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
2271 if (mptcp_event_sub_established(skb, msk, ssk) < 0)
2275 if (mptcp_event_sub_closed(skb, msk, ssk) < 0)