Searched refs:xdp (Results 151 - 175 of 209) sorted by relevance

123456789

/linux-master/drivers/net/ethernet/netronome/nfp/
H A Dnfp_net_common.c2187 xdp_attachment_setup(&nn->xdp, bpf);
2205 xdp_attachment_setup(&nn->xdp, bpf);
2221 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) argument
2225 switch (xdp->command) {
2227 return nfp_net_xdp_setup_drv(nn, xdp);
2229 return nfp_net_xdp_setup_hw(nn, xdp);
2231 return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
2232 xdp->xsk.queue_id);
2234 return nfp_app_bpf(nn->app, nn, xdp);
H A Dnfp_app.h135 struct netdev_bpf *xdp);
/linux-master/drivers/net/ethernet/mediatek/
H A Dmtk_eth_soc.c1943 struct xdp_buff *xdp, struct net_device *dev)
1957 act = bpf_prog_run_xdp(prog, xdp);
1963 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1971 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1993 virt_to_head_page(xdp->data), true);
2070 struct xdp_buff xdp;
2085 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2086 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2088 xdp_buff_clear_frags_flag(&xdp);
2090 ret = mtk_xdp_run(eth, ring, &xdp, netde
1936 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, struct xdp_buff *xdp, struct net_device *dev) argument
2064 struct xdp_buff xdp; local
3525 mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp) argument
[all...]
/linux-master/net/core/
H A Ddev.c138 #include <trace/events/xdp.h>
4892 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4915 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4916 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4920 xdp_buff_set_frags_flag(xdp);
4922 xdp_buff_clear_frags_flag(xdp);
4925 orig_data_end = xdp->data_end;
4926 orig_data = xdp->data;
4927 eth = (struct ethhdr *)xdp->data;
4932 act = bpf_prog_run_xdp(xdp_prog, xdp);
4891 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) argument
5016 netif_receive_generic_xdp(struct sk_buff **pskb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) argument
5099 struct xdp_buff xdp; local
5773 generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) argument
9339 struct netdev_bpf xdp; local
[all...]
/linux-master/drivers/net/ethernet/ti/
H A Dcpsw.c357 struct xdp_buff xdp; local
397 xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
403 xdp_prepare_buff(&xdp, pa, headroom, size, false);
406 ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
410 headroom = xdp.data - xdp.data_hard_start;
H A Dcpsw_new.c295 struct xdp_buff xdp; local
342 xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
348 xdp_prepare_buff(&xdp, pa, headroom, size, false);
350 ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
354 headroom = xdp.data - xdp.data_hard_start;
H A Dcpsw_priv.c1324 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, argument
1338 act = bpf_prog_run_xdp(prog, xdp);
1340 *len = xdp->data_end - xdp->data;
1347 xdpf = xdp_convert_buff_to_frame(xdp);
1355 if (xdp_do_redirect(ndev, xdp, prog))
H A Dam65-cpsw-nuss.c995 struct xdp_buff *xdp,
1012 act = bpf_prog_run_xdp(prog, xdp);
1014 *len = xdp->data_end - xdp->data;
1024 xdpf = xdp_convert_buff_to_frame(xdp);
1040 if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
1057 page = virt_to_head_page(xdp->data);
1112 struct xdp_buff xdp; local
1164 xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
1166 xdp_prepare_buff(&xdp, page_add
993 am65_cpsw_run_xdp(struct am65_cpsw_common *common, struct am65_cpsw_port *port, struct xdp_buff *xdp, int desc_idx, int cpu, int *len) argument
[all...]
H A Dcpsw_priv.h9 #include <net/xdp.h>
450 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
/linux-master/drivers/net/ethernet/freescale/enetc/
H A Denetc.h14 #include <net/xdp.h>
130 struct enetc_xdp_data xdp; member in struct:enetc_bdr
/linux-master/drivers/net/ethernet/google/gve/
H A Dgve_main.c1712 static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) argument
1720 switch (xdp->command) {
1722 return gve_set_xdp(priv, xdp->prog, xdp->extack);
1724 if (xdp->xsk.pool)
1725 return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1727 return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
H A Dgve.h16 #include <net/xdp.h>
334 u16 size; /* size of xmitted xdp pkt */
336 } xdp; member in struct:gve_tx_buffer_state
/linux-master/drivers/net/ethernet/microchip/lan966x/
H A Dlan966x_main.h17 #include <net/xdp.h>
696 int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
/linux-master/include/net/mana/
H A Dmana.h7 #include <net/xdp.h>
468 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dparams.c625 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) argument
627 if (xdp)
657 #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024)
/linux-master/drivers/net/ethernet/amazon/ena/
H A Dena_netdev.h17 #include <net/xdp.h>
136 * the xdp queues
241 /* Used for rx queues only to point to the xdp tx ring, to
H A Dena_netdev.c232 /* Don't init RX queues for xdp queues */
805 is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
1171 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) argument
1179 "xdp: dropped unsupported multi-buffer packets\n");
1185 xdp_prepare_buff(xdp, page_address(rx_info->page),
1189 ret = ena_xdp_execute(rx_ring, xdp);
1191 /* The xdp program might expand the headers */
1193 rx_info->buf_offset = xdp->data - xdp->data_hard_start;
1194 rx_ring->ena_bufs[0].len = xdp
1219 struct xdp_buff xdp; local
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_tunnel_kern.c966 SEC("xdp")
967 int xfrm_get_state_xdp(struct xdp_md *xdp) argument
978 if (bpf_dynptr_from_xdp(xdp, 0, &ptr))
997 x = bpf_xdp_get_xfrm_state(xdp, &opts, sizeof(opts));
/linux-master/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c1639 u64 buf_addr, bool xdp)
1644 if (xdp) {
1664 struct cqe_rx_t *cqe_rx, bool xdp)
1701 phys_addr, xdp);
1711 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
1638 nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, u64 buf_addr, bool xdp) argument
1663 nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx, bool xdp) argument
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c1411 struct xdp_buff xdp; local
1421 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1424 xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
1427 act = bpf_prog_run_xdp(prog, &xdp);
1439 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
/linux-master/drivers/net/ethernet/marvell/mvpp2/
H A Dmvpp2_main.c3746 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) argument
3753 xdpf = xdp_convert_buff_to_frame(xdp);
3826 struct xdp_buff *xdp, struct page_pool *pp,
3833 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3834 act = bpf_prog_run_xdp(prog, xdp);
3837 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3846 err = xdp_do_redirect(port->dev, xdp, prog);
3849 page = virt_to_head_page(xdp
3825 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, struct xdp_buff *xdp, struct page_pool *pp, struct mvpp2_pcpu_stats *stats) argument
3917 struct xdp_buff xdp; local
5395 mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) argument
[all...]
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_main.c1131 static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) argument
1133 struct bpf_prog *old_prog, *prog = xdp->prog;
1140 NL_SET_ERR_MSG_MOD(xdp->extack,
1150 NL_SET_ERR_MSG_MOD(xdp->extack,
1176 static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp) argument
1178 switch (xdp->command) {
1180 return fun_xdp_setup(dev, xdp);
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c16 #include <net/xdp.h>
1481 uint pkt_len, struct xdp_buff *xdp)
1488 if (xdp->data_hard_start) {
1489 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1490 skb_put(skb, xdp->data_end - xdp->data);
1508 struct xdp_buff xdp = {}; local
1521 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1529 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1480 mana_build_skb(struct mana_rxq *rxq, void *buf_va, uint pkt_len, struct xdp_buff *xdp) argument
[all...]
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_int.h61 #include <net/xdp.h>
/linux-master/drivers/net/ethernet/intel/ixgbevf/
H A Dixgbevf.h14 #include <net/xdp.h>

Completed in 540 milliseconds

123456789