Lines Matching refs:xs

52 	struct xdp_sock *xs;
58 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
79 struct xdp_sock *xs;
85 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
147 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
149 xs->rx_queue_full++;
157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
169 err = __xsk_rcv_zc(xs, xskb, len, contd);
180 err = __xsk_rcv_zc(xs, pos, len, contd);
228 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
230 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
244 xsk_xdp = xsk_buff_alloc(xs->pool);
246 xs->rx_dropped++;
251 err = __xsk_rcv_zc(xs, xskb, len, 0);
262 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
263 xs->rx_dropped++;
266 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
267 xs->rx_queue_full++;
282 xsk_xdp = xsk_buff_alloc(xs->pool);
289 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
296 static bool xsk_tx_writeable(struct xdp_sock *xs)
298 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
304 static bool xsk_is_bound(struct xdp_sock *xs)
306 if (READ_ONCE(xs->state) == XSK_BOUND) {
314 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
319 if (!xsk_is_bound(xs))
322 if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
325 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
326 xs->rx_dropped++;
330 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
334 static void xsk_flush(struct xdp_sock *xs)
336 xskq_prod_submit(xs->rx);
337 __xskq_cons_release(xs->pool->fq);
338 sock_def_readable(&xs->sk);
341 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
346 spin_lock_bh(&xs->rx_lock);
347 err = xsk_rcv_check(xs, xdp, len);
349 err = __xsk_rcv(xs, xdp, len);
350 xsk_flush(xs);
352 spin_unlock_bh(&xs->rx_lock);
356 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
361 err = xsk_rcv_check(xs, xdp, len);
367 return xsk_rcv_zc(xs, xdp, len);
370 err = __xsk_rcv(xs, xdp, len);
376 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
381 err = xsk_rcv(xs, xdp);
385 if (!xs->flush_node.prev)
386 list_add(&xs->flush_node, flush_list);
394 struct xdp_sock *xs, *tmp;
396 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
397 xsk_flush(xs);
398 __list_del_clearprev(&xs->flush_node);
420 struct xdp_sock *xs;
423 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
424 __xskq_cons_release(xs->tx);
425 if (xsk_tx_writeable(xs))
426 xs->sk.sk_write_space(&xs->sk);
435 struct xdp_sock *xs;
439 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
440 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
445 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
446 if (xskq_has_descs(xs->tx))
447 xskq_cons_release(xs->tx);
451 xs->tx_budget_spent++;
461 xskq_cons_release(xs->tx);
467 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
468 xs->tx_budget_spent = 0;
494 struct xdp_sock *xs;
503 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
504 if (!xs) {
509 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
521 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
523 xs->tx->queue_empty_descs++;
527 __xskq_cons_release(xs->tx);
529 xs->sk.sk_write_space(&xs->sk);
537 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
539 struct net_device *dev = xs->dev;
541 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
544 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
549 spin_lock_irqsave(&xs->pool->cq_lock, flags);
550 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
551 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
556 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
560 spin_lock_irqsave(&xs->pool->cq_lock, flags);
561 xskq_prod_submit_n(xs->pool->cq, n);
562 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
565 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
569 spin_lock_irqsave(&xs->pool->cq_lock, flags);
570 xskq_prod_cancel_n(xs->pool->cq, n);
571 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
601 struct xdp_sock *xs = xdp_sk(skb->sk);
604 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
607 xs->skb = NULL;
616 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
619 struct xsk_buff_pool *pool = xs->pool;
621 struct sk_buff *skb = xs->skb;
628 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
630 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
664 refcount_add(ts, &xs->sk.sk_wmem_alloc);
669 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
673 struct net_device *dev = xs->dev;
674 struct sk_buff *skb = xs->skb;
679 skb = xsk_build_skb_zerocopy(xs, desc);
688 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
694 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
718 page = alloc_page(xs->sk.sk_allocation);
729 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
733 if (unlikely(xs->pool->tx_metadata_len == 0)) {
738 meta = buffer - xs->pool->tx_metadata_len;
756 if (unlikely(xs->pool->tx_sw_csum)) {
766 skb->priority = READ_ONCE(xs->sk.sk_priority);
767 skb->mark = READ_ONCE(xs->sk.sk_mark);
777 xsk_set_destructor_arg(xs->skb);
778 xsk_drop_skb(xs->skb);
779 xskq_cons_release(xs->tx);
782 xsk_cq_cancel_locked(xs, 1);
790 struct xdp_sock *xs = xdp_sk(sk);
797 mutex_lock(&xs->mutex);
800 if (unlikely(!xsk_is_bound(xs))) {
805 if (xs->queue_id >= xs->dev->real_num_tx_queues)
808 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
819 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
822 skb = xsk_build_skb(xs, &desc);
831 xskq_cons_release(xs->tx);
834 xs->skb = skb;
838 err = __dev_direct_xmit(skb, xs->queue_id);
841 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
851 xs->skb = NULL;
856 xs->skb = NULL;
859 if (xskq_has_descs(xs->tx)) {
860 if (xs->skb)
861 xsk_drop_skb(xs->skb);
862 xskq_cons_release(xs->tx);
867 if (xsk_tx_writeable(xs))
870 mutex_unlock(&xs->mutex);
898 static int xsk_check_common(struct xdp_sock *xs)
900 if (unlikely(!xsk_is_bound(xs)))
902 if (unlikely(!(xs->dev->flags & IFF_UP)))
912 struct xdp_sock *xs = xdp_sk(sk);
916 err = xsk_check_common(xs);
921 if (unlikely(!xs->tx))
925 if (xs->zc)
926 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
930 if (xs->zc && xsk_no_wakeup(sk))
933 pool = xs->pool;
935 if (xs->zc)
936 return xsk_wakeup(xs, XDP_WAKEUP_TX);
957 struct xdp_sock *xs = xdp_sk(sk);
960 err = xsk_check_common(xs);
963 if (unlikely(!xs->rx))
974 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
975 return xsk_wakeup(xs, XDP_WAKEUP_RX);
995 struct xdp_sock *xs = xdp_sk(sk);
1001 if (xsk_check_common(xs))
1004 pool = xs->pool;
1007 if (xs->zc)
1008 xsk_wakeup(xs, pool->cached_need_wakeup);
1009 else if (xs->tx)
1014 if (xs->rx && !xskq_prod_is_empty(xs->rx))
1016 if (xs->tx && xsk_tx_writeable(xs))
1041 static void xsk_unbind_dev(struct xdp_sock *xs)
1043 struct net_device *dev = xs->dev;
1045 if (xs->state != XSK_BOUND)
1047 WRITE_ONCE(xs->state, XSK_UNBOUND);
1050 xp_del_xsk(xs->pool, xs);
1055 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1063 spin_lock_bh(&xs->map_list_lock);
1064 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1071 spin_unlock_bh(&xs->map_list_lock);
1075 static void xsk_delete_from_maps(struct xdp_sock *xs)
1095 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1096 xsk_map_try_sock_delete(map, xs, map_entry);
1104 struct xdp_sock *xs = xdp_sk(sk);
1112 if (xs->skb)
1113 xsk_drop_skb(xs->skb);
1121 xsk_delete_from_maps(xs);
1122 mutex_lock(&xs->mutex);
1123 xsk_unbind_dev(xs);
1124 mutex_unlock(&xs->mutex);
1126 xskq_destroy(xs->rx);
1127 xskq_destroy(xs->tx);
1128 xskq_destroy(xs->fq_tmp);
1129 xskq_destroy(xs->cq_tmp);
1156 static bool xsk_validate_queues(struct xdp_sock *xs)
1158 return xs->fq_tmp && xs->cq_tmp;
1165 struct xdp_sock *xs = xdp_sk(sk);
1186 mutex_lock(&xs->mutex);
1187 if (xs->state != XSK_READY) {
1198 if (!xs->rx && !xs->tx) {
1216 if (xs->umem) {
1239 xs->pool = xp_create_and_assign_umem(xs,
1241 if (!xs->pool) {
1247 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1250 xp_destroy(xs->pool);
1251 xs->pool = NULL;
1257 if (xs->fq_tmp || xs->cq_tmp) {
1265 xs->pool = umem_xs->pool;
1271 if (xs->tx && !xs->pool->tx_descs) {
1272 err = xp_alloc_tx_descs(xs->pool, xs);
1274 xp_put_pool(xs->pool);
1275 xs->pool = NULL;
1283 WRITE_ONCE(xs->umem, umem_xs->umem);
1285 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1290 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1291 if (!xs->pool) {
1296 err = xp_assign_dev(xs->pool, dev, qid, flags);
1298 xp_destroy(xs->pool);
1299 xs->pool = NULL;
1305 xs->fq_tmp = NULL;
1306 xs->cq_tmp = NULL;
1308 xs->dev = dev;
1309 xs->zc = xs->umem->zc;
1310 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1311 xs->queue_id = qid;
1312 xp_add_xsk(xs->pool, xs);
1322 WRITE_ONCE(xs->state, XSK_BOUND);
1325 mutex_unlock(&xs->mutex);
1349 struct xdp_sock *xs = xdp_sk(sk);
1367 mutex_lock(&xs->mutex);
1368 if (xs->state != XSK_READY) {
1369 mutex_unlock(&xs->mutex);
1372 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1376 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1377 mutex_unlock(&xs->mutex);
1396 mutex_lock(&xs->mutex);
1397 if (xs->state != XSK_READY || xs->umem) {
1398 mutex_unlock(&xs->mutex);
1404 mutex_unlock(&xs->mutex);
1410 WRITE_ONCE(xs->umem, umem);
1411 mutex_unlock(&xs->mutex);
1425 mutex_lock(&xs->mutex);
1426 if (xs->state != XSK_READY) {
1427 mutex_unlock(&xs->mutex);
1431 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1432 &xs->cq_tmp;
1434 mutex_unlock(&xs->mutex);
1468 struct xdp_sock *xs = xdp_sk(sk);
1495 mutex_lock(&xs->mutex);
1496 stats.rx_dropped = xs->rx_dropped;
1498 stats.rx_ring_full = xs->rx_queue_full;
1500 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1501 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1503 stats.rx_dropped += xs->rx_queue_full;
1505 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1506 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1507 mutex_unlock(&xs->mutex);
1575 mutex_lock(&xs->mutex);
1576 if (xs->zc)
1578 mutex_unlock(&xs->mutex);
1600 struct xdp_sock *xs = xdp_sk(sock->sk);
1601 int state = READ_ONCE(xs->state);
1608 q = READ_ONCE(xs->rx);
1610 q = READ_ONCE(xs->tx);
1615 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1616 READ_ONCE(xs->pool->fq);
1618 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1619 READ_ONCE(xs->pool->cq);
1644 struct xdp_sock *xs = xdp_sk(sk);
1646 mutex_lock(&xs->mutex);
1647 if (xs->dev == dev) {
1652 xsk_unbind_dev(xs);
1655 xp_clear_dev(xs->pool);
1657 mutex_unlock(&xs->mutex);
1693 struct xdp_sock *xs = xdp_sk(sk);
1698 if (!xp_put_pool(xs->pool))
1699 xdp_put_umem(xs->umem, !xs->pool);
1705 struct xdp_sock *xs;
1732 xs = xdp_sk(sk);
1733 xs->state = XSK_READY;
1734 mutex_init(&xs->mutex);
1735 spin_lock_init(&xs->rx_lock);
1737 INIT_LIST_HEAD(&xs->map_list);
1738 spin_lock_init(&xs->map_list_lock);