Lines Matching defs:sk

330 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
338 sock_def_readable(&xs->sk);
426 xs->sk.sk_write_space(&xs->sk);
529 xs->sk.sk_write_space(&xs->sk);
588 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
594 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
601 struct xdp_sock *xs = xdp_sk(skb->sk);
612 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
630 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
664 refcount_add(ts, &xs->sk.sk_wmem_alloc);
694 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
718 page = alloc_page(xs->sk.sk_allocation);
729 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
766 skb->priority = READ_ONCE(xs->sk.sk_priority);
767 skb->mark = READ_ONCE(xs->sk.sk_mark);
788 static int __xsk_generic_xmit(struct sock *sk)
790 struct xdp_sock *xs = xdp_sk(sk);
868 sk->sk_write_space(sk);
874 static int xsk_generic_xmit(struct sock *sk)
880 ret = __xsk_generic_xmit(sk);
887 static bool xsk_no_wakeup(struct sock *sk)
891 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
892 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
911 struct sock *sk = sock->sk;
912 struct xdp_sock *xs = xdp_sk(sk);
924 if (sk_can_busy_loop(sk)) {
926 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
927 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
930 if (xs->zc && xsk_no_wakeup(sk))
937 return xsk_generic_xmit(sk);
956 struct sock *sk = sock->sk;
957 struct xdp_sock *xs = xdp_sk(sk);
968 if (sk_can_busy_loop(sk))
969 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
971 if (xsk_no_wakeup(sk))
994 struct sock *sk = sock->sk;
995 struct xdp_sock *xs = xdp_sk(sk);
1011 xsk_generic_xmit(sk);
1103 struct sock *sk = sock->sk;
1104 struct xdp_sock *xs = xdp_sk(sk);
1107 if (!sk)
1110 net = sock_net(sk);
1116 sk_del_node_init_rcu(sk);
1119 sock_prot_inuse_add(net, sk->sk_prot, -1);
1131 sock_orphan(sk);
1132 sock->sk = NULL;
1134 sock_put(sk);
1148 if (sock->sk->sk_family != PF_XDP) {
1164 struct sock *sk = sock->sk;
1165 struct xdp_sock *xs = xdp_sk(sk);
1181 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1192 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1228 umem_xs = xdp_sk(sock->sk);
1348 struct sock *sk = sock->sk;
1349 struct xdp_sock *xs = xdp_sk(sk);
1467 struct sock *sk = sock->sk;
1468 struct xdp_sock *xs = xdp_sk(sk);
1600 struct xdp_sock *xs = xdp_sk(sock->sk);
1638 struct sock *sk;
1643 sk_for_each(sk, &net->xdp.list) {
1644 struct xdp_sock *xs = xdp_sk(sk);
1648 sk->sk_err = ENETDOWN;
1649 if (!sock_flag(sk, SOCK_DEAD))
1650 sk_error_report(sk);
1691 static void xsk_destruct(struct sock *sk)
1693 struct xdp_sock *xs = xdp_sk(sk);
1695 if (!sock_flag(sk, SOCK_DEAD))
1706 struct sock *sk;
1718 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1719 if (!sk)
1724 sock_init_data(sock, sk);
1726 sk->sk_family = PF_XDP;
1728 sk->sk_destruct = xsk_destruct;
1730 sock_set_flag(sk, SOCK_RCU_FREE);
1732 xs = xdp_sk(sk);
1741 sk_add_node_rcu(sk, &net->xdp.list);