Lines Matching refs:xsk

46  *       then remove xsk sockets from queue 0 on both veth interfaces and
102 #include "xsk.h"
165 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
167 memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
168 memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
233 static void enable_busy_poll(struct xsk_socket_info *xsk)
238 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
243 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
247 sock_opt = xsk->batch_size;
248 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
253 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
260 xsk->umem = umem;
261 cfg.rx_size = xsk->rxqsize;
269 txr = ifobject->tx_on ? &xsk->tx : NULL;
270 rxr = ifobject->rx_on ? &xsk->rx : NULL;
271 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
278 struct xsk_socket_info *xsk;
298 xsk = calloc(1, sizeof(struct xsk_socket_info));
299 if (!xsk)
303 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
304 ret = __xsk_configure_socket(xsk, umem, ifobject, false);
308 xsk_socket__delete(xsk->xsk);
309 free(xsk);
452 ifobj->xsk = &ifobj->xsk_arr[0];
499 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
501 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
604 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
605 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
608 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
609 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
613 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
614 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
735 test->ifobj_tx->xsk->pkt_stream = pkt_stream;
737 test->ifobj_rx->xsk->pkt_stream = pkt_stream;
746 pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
747 for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
750 ifobj->xsk->pkt_stream = pkt_stream;
761 struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
764 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
766 pkt_stream = test->ifobj_rx->xsk->pkt_stream;
803 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
812 gen_eth_hdr(xsk, data);
879 test->ifobj_tx->xsk->pkt_stream = pkt_stream;
882 test->ifobj_rx->xsk->pkt_stream = pkt_stream;
1036 static int kick_tx(struct xsk_socket_info *xsk)
1040 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
1050 static int kick_rx(struct xsk_socket_info *xsk)
1054 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1061 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
1067 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1068 ret = kick_tx(xsk);
1073 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
1075 if (rcvd > xsk->outstanding_tx) {
1076 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
1084 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1085 xsk->outstanding_tx -= rcvd;
1091 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
1095 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1097 struct xsk_umem_info *umem = xsk->umem;
1103 fds.fd = xsk_socket__fd(xsk->xsk);
1106 ret = kick_rx(xsk);
1127 rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
1144 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
1190 xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1201 xsk_ring_cons__release(&xsk->rx, frags_processed);
1211 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
1214 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1234 struct xsk_socket_info *xsk;
1245 xsk = &test->ifobj_rx->xsk_arr[sock_num];
1247 if ((all_packets_received(test, xsk, sock_num, bitmap)))
1250 res = __receive_pkts(test, xsk);
1268 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
1271 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1279 if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
1281 ret = kick_tx(xsk);
1287 fds.fd = xsk_socket__fd(xsk->xsk);
1290 while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
1310 complete_pkts(xsk, xsk->batch_size);
1313 for (i = 0; i < xsk->batch_size; i++) {
1321 if (nb_frags > xsk->batch_size - i) {
1323 xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
1329 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1343 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1367 xsk_ring_prod__submit(&xsk->tx, i);
1368 xsk->outstanding_tx += valid_frags;
1382 if (complete_pkts(xsk, i))
1392 static int wait_for_tx_completion(struct xsk_socket_info *xsk)
1402 while (xsk->outstanding_tx) {
1411 complete_pkts(xsk, xsk->batch_size);
1456 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1458 int fd = xsk_socket__fd(xsk), err;
1481 struct xsk_socket *xsk = ifobject->xsk->xsk;
1485 err = kick_rx(ifobject->xsk);
1489 err = get_xsk_stats(xsk, &stats);
1499 if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
1500 stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
1508 struct xsk_socket *xsk = ifobject->xsk->xsk;
1513 err = kick_rx(ifobject->xsk);
1517 err = get_xsk_stats(xsk, &stats);
1529 struct xsk_socket *xsk = ifobject->xsk->xsk;
1534 err = kick_rx(ifobject->xsk);
1538 err = get_xsk_stats(xsk, &stats);
1550 struct xsk_socket *xsk = ifobject->xsk->xsk;
1551 int fd = xsk_socket__fd(xsk);
1564 if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
1568 ifobject->xsk->pkt_stream->nb_pkts);
1603 ifobject->xsk = &ifobject->xsk_arr[0];
1678 ifobject->xsk = &ifobject->xsk_arr[0];
1683 xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
1686 ifobject->xsk = &ifobject->xsk_arr[i];
1687 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
1726 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
1845 pkt_stream_reset(ifobj2->xsk->pkt_stream);
1849 pkt_stream_reset(ifobj1->xsk->pkt_stream);
1877 xsk_socket__delete(ifobj2->xsk_arr[i].xsk);
1880 xsk_socket__delete(ifobj1->xsk_arr[i].xsk);
1969 test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
1970 test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
1972 ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
2022 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2024 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
2033 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2166 skel_rx->maps.xsk, skel_tx->maps.xsk);
2182 skel_rx->maps.xsk, skel_tx->maps.xsk);
2209 skel_rx->maps.xsk, skel_tx->maps.xsk);
2430 test->ifobj_tx->xsk->batch_size = 1;
2431 test->ifobj_rx->xsk->batch_size = 1;
2437 test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2438 test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2452 test->ifobj_rx->xsk->rxqsize = max_descs;
2453 test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2454 test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2461 test->ifobj_tx->xsk->batch_size = max_descs - 1;
2462 test->ifobj_rx->xsk->batch_size = max_descs - 1;