Lines Matching defs:subflow

69 	struct mptcp_subflow_context *subflow;
80 subflow = mptcp_subflow_ctx(ssock->sk);
81 list_add(&subflow->node, &msk->conn_list);
83 subflow->request_mptcp = 1;
84 subflow->subflow_id = msk->subflow_id++;
86 /* This is the first subflow, always with id 0 */
87 WRITE_ONCE(subflow->local_id, 0);
94 /* If the MPC handshake is not started, returns the first subflow,
341 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
351 /* try to fetch required memory from subflow */
361 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
481 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
483 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
485 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
491 struct mptcp_subflow_context *subflow;
494 mptcp_for_each_subflow(mptcp_sk(sk), subflow)
495 tout = max(tout, mptcp_timeout_from_subflow(subflow));
522 struct mptcp_subflow_context *subflow;
524 mptcp_for_each_subflow(msk, subflow)
525 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
554 struct mptcp_subflow_context *subflow;
562 mptcp_for_each_subflow(msk, subflow) {
563 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
579 * at the subflow level and the msk lock was not held, so this
624 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
652 map_remaining = subflow->map_data_len -
653 mptcp_subflow_get_map_offset(subflow);
672 subflow->map_data_len = skb->len;
790 struct mptcp_subflow_context *subflow;
793 mptcp_for_each_subflow(msk, subflow)
794 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
827 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
832 * subflow at msk destruction time, but we must avoid enqueuing
835 if (unlikely(subflow->disposable))
886 struct mptcp_subflow_context *tmp, *subflow;
889 list_for_each_entry_safe(subflow, tmp, join_list, node) {
890 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
893 list_move_tail(&subflow->node, &msk->conn_list);
922 /* each subflow already holds a reference to the sk, and the
923 * workqueue is invoked by a subflow, so sk can't go away here.
933 struct mptcp_subflow_context *subflow;
937 mptcp_for_each_subflow(msk, subflow) {
938 if (READ_ONCE(subflow->data_avail))
939 return mptcp_subflow_tcp_sock(subflow);
1066 struct mptcp_subflow_context *subflow;
1070 mptcp_for_each_subflow(msk, subflow) {
1071 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1374 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1376 if (!subflow->stale)
1379 subflow->stale = 0;
1380 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1383 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1385 if (unlikely(subflow->stale)) {
1386 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1388 if (subflow->stale_rcv_tstamp == rcv_tstamp)
1391 mptcp_subflow_set_active(subflow);
1393 return __mptcp_subflow_active(subflow);
1401 * returns the subflow that will transmit the next DSS
1407 struct mptcp_subflow_context *subflow;
1415 /* pick the subflow with the lower wmem/wspace ratio */
1421 mptcp_for_each_subflow(msk, subflow) {
1422 trace_mptcp_subflow_get_send(subflow);
1423 ssk = mptcp_subflow_tcp_sock(subflow);
1424 if (!mptcp_subflow_active(subflow))
1427 tout = max(tout, mptcp_timeout_from_subflow(subflow));
1428 nr_active += !subflow->backup;
1429 pace = subflow->avg_pacing_rate;
1432 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1433 pace = subflow->avg_pacing_rate;
1439 if (linger_time < send_info[subflow->backup].linger_time) {
1440 send_info[subflow->backup].ssk = ssk;
1441 send_info[subflow->backup].linger_time = linger_time;
1446 /* pick the best backup if no other subflow is active */
1456 * otherwise do not use the picked, slower, subflow
1457 * We select the subflow with the shorter estimated time to flush
1459 * to check that subflow has a non empty cwin.
1470 subflow = mptcp_subflow_ctx(ssk);
1471 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1502 * that has been handed to the subflow for transmission
1573 struct mptcp_subflow_context *subflow;
1581 mptcp_for_each_subflow(msk, subflow) {
1582 if (READ_ONCE(subflow->scheduled)) {
1583 mptcp_subflow_set_scheduled(subflow, false);
1586 ssk = mptcp_subflow_tcp_sock(subflow);
1594 /* Need to lock the new subflow only if different
1616 /* at this point we held the socket lock for the last subflow we used */
1639 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1642 /* check for a different subflow usage only after
1646 mptcp_subflow_set_scheduled(subflow, false);
1658 if (READ_ONCE(subflow->scheduled)) {
1659 mptcp_subflow_set_scheduled(subflow, false);
1666 mptcp_for_each_subflow(msk, subflow) {
1667 if (READ_ONCE(subflow->scheduled)) {
1668 xmit_ssk = mptcp_subflow_tcp_sock(subflow);
1670 mptcp_subflow_delegate(subflow,
1705 * first subflow right now. Otherwise we are in the defer_connect
1706 * path, and the first subflow must be already present.
1708 * fastopen attempt, no need to check for additional subflow status.
1979 struct mptcp_subflow_context *subflow;
2003 mptcp_for_each_subflow(msk, subflow) {
2008 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
2048 * get drops at subflow level if skbs can't be moved to
2052 mptcp_for_each_subflow(msk, subflow) {
2056 ssk = mptcp_subflow_tcp_sock(subflow);
2298 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2301 * A backup subflow is returned only if that is the only kind available.
2306 struct mptcp_subflow_context *subflow;
2309 mptcp_for_each_subflow(msk, subflow) {
2310 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2312 if (!__mptcp_subflow_active(subflow))
2318 min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2322 if (subflow->backup) {
2381 * clean completely the subflow status when the subflow reaches
2385 struct mptcp_subflow_context *subflow,
2390 /* The MPTCP code never wait on the subflow sockets, TCP-level
2394 mptcp_subflow_ctx_reset(subflow);
2400 /* subflow sockets can be either outgoing (connect) or incoming
2409 struct mptcp_subflow_context *subflow,
2415 /* If the first subflow moved to a close state before accept, e.g. due
2416 * to an incoming reset or listener shutdown, the subflow socket is
2432 list_del(&subflow->node);
2442 subflow->send_fastclose = 1;
2447 __mptcp_subflow_disconnect(ssk, subflow, flags);
2453 subflow->disposable = 1;
2461 kfree_rcu(subflow, rcu);
2463 /* otherwise tcp will dispose of the ssk and subflow ctx */
2502 struct mptcp_subflow_context *subflow)
2507 /* subflow aborted before reaching the fully_established status
2508 * attempt the creation of the next subflow
2510 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow);
2512 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2522 struct mptcp_subflow_context *subflow, *tmp;
2527 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2528 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2537 mptcp_close_ssk(sk, ssk, subflow);
2554 struct mptcp_subflow_context *subflow, *tmp;
2562 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2563 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2604 struct mptcp_subflow_context *subflow;
2636 mptcp_for_each_subflow(msk, subflow) {
2637 if (READ_ONCE(subflow->scheduled)) {
2640 mptcp_subflow_set_scheduled(subflow, false);
2642 ssk = mptcp_subflow_tcp_sock(subflow);
2710 pr_debug("MP_FAIL doesn't respond, reset the subflow");
2720 struct mptcp_subflow_context *subflow, *tmp;
2724 mptcp_for_each_subflow_safe(msk, subflow, tmp)
2725 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
2726 subflow, MPTCP_CF_FASTCLOSE);
2890 pr_debug("Sending DATA_FIN on subflow %p", ssk);
2948 struct mptcp_subflow_context *subflow;
2964 mptcp_for_each_subflow(msk, subflow) {
2965 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
3043 struct mptcp_subflow_context *subflow;
3070 mptcp_for_each_subflow(msk, subflow) {
3071 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3080 subflow->fail_tout = 0;
3169 /* msk->subflow is still intact, the following will not free the first
3170 * subflow
3255 struct mptcp_subflow_context *subflow;
3290 /* passive msk is created after the first/MPC subflow */
3301 /* The msk maintain a ref to each subflow in the connections list */
3303 subflow = mptcp_subflow_ctx(ssk);
3304 list_add(&subflow->node, &msk->conn_list);
3307 /* new mpc subflow takes ownership of the newly
3321 __mptcp_subflow_fully_established(msk, subflow, mp_opt);
3347 struct mptcp_subflow_context *subflow, *tmp;
3353 mptcp_for_each_subflow_safe(msk, subflow, tmp)
3354 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
3377 /* allow the following to close even the initial subflow */
3422 /* the following actions acquire the subflow socket lock
3427 * the subflow socket lock
3448 * On sk release avoid actions depending on the first subflow
3461 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3462 * TCP can't schedule delack timer before the subflow is fully established.
3489 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3490 struct sock *sk = subflow->conn;
3539 struct mptcp_subflow_context *subflow;
3543 subflow = mptcp_subflow_ctx(ssk);
3544 sk = subflow->conn;
3547 pr_debug("msk=%p, token=%u", sk, subflow->token);
3549 subflow->map_seq = subflow->iasn;
3550 subflow->map_subflow_seq = 1;
3552 /* the socket is not connected yet, no msk/subflow ops can access/race
3555 WRITE_ONCE(msk->local_key, subflow->local_key);
3571 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3572 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3576 pr_debug("msk=%p, subflow=%p", msk, subflow);
3580 subflow->reset_reason = MPTCP_RST_EMPTCP;
3584 /* active subflow, already present inside the conn_list */
3585 if (!list_empty(&subflow->node)) {
3602 list_add_tail(&subflow->node, &msk->conn_list);
3606 list_add_tail(&subflow->node, &msk->join_list);
3613 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3649 /* the first subflow is disconnected after close - see
3696 struct mptcp_subflow_context *subflow)
3698 subflow->request_mptcp = 0;
3704 struct mptcp_subflow_context *subflow;
3714 subflow = mptcp_subflow_ctx(ssk);
3720 mptcp_subflow_early_fallback(msk, subflow);
3722 if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
3724 mptcp_subflow_early_fallback(msk, subflow);
3727 WRITE_ONCE(msk->write_seq, subflow->idsn);
3728 WRITE_ONCE(msk->snd_nxt, subflow->idsn);
3733 * acquired the subflow socket lock, too.
3885 * but no need to allocate the first subflow just to error out.
3896 pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
3898 struct mptcp_subflow_context *subflow;
3901 subflow = mptcp_subflow_ctx(newsk);
3902 new_mptcp_sock = subflow->conn;
3904 /* is_mptcp should be false if subflow->conn is missing, see
3926 mptcp_for_each_subflow(msk, subflow) {
3927 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3933 /* Do late cleanup for the first subflow as necessary. Also
4058 struct mptcp_subflow_context *subflow;
4062 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
4063 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4067 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4071 * In both case must dequeue the subflow here - on the same
4075 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);