Lines Matching refs:xdp

81 #include <net/xdp.h>
3883 BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp)
3885 return xdp_get_buff_len(xdp);
3904 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3906 return xdp_data_meta_unsupported(xdp) ? 0 :
3907 xdp->data - xdp->data_meta;
3910 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3912 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3913 unsigned long metalen = xdp_get_metalen(xdp);
3915 void *data = xdp->data + offset;
3918 data > xdp->data_end - ETH_HLEN))
3922 memmove(xdp->data_meta + offset,
3923 xdp->data_meta, metalen);
3924 xdp->data_meta += offset;
3925 xdp->data = data;
3938 void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
3947 if (likely(xdp->data_end - xdp->data >= off + len)) {
3948 src = flush ? buf : xdp->data + off;
3949 dst = flush ? xdp->data + off : buf;
3954 sinfo = xdp_get_shared_info_from_buff(xdp);
3958 ptr_len = xdp->data_end - xdp->data;
3959 ptr_buf = xdp->data;
3985 void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
3987 u32 size = xdp->data_end - xdp->data;
3989 void *addr = xdp->data;
3995 if (unlikely(offset + len > xdp_get_buff_len(xdp)))
4001 sinfo = xdp_get_shared_info_from_buff(xdp);
4017 BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset,
4022 ptr = bpf_xdp_pointer(xdp, offset, len);
4027 bpf_xdp_copy_buf(xdp, offset, buf, len, false);
4044 int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
4046 return ____bpf_xdp_load_bytes(xdp, offset, buf, len);
4049 BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset,
4054 ptr = bpf_xdp_pointer(xdp, offset, len);
4059 bpf_xdp_copy_buf(xdp, offset, buf, len, true);
4076 int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
4078 return ____bpf_xdp_store_bytes(xdp, offset, buf, len);
4081 static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
4083 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
4085 struct xdp_rxq_info *rxq = xdp->rxq;
4088 if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
4099 xsk_buff_get_tail(xdp)->data_end += offset;
4104 static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
4107 struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
4117 static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
4120 struct xdp_mem_info *mem_info = &xdp->rxq->mem;
4124 bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
4138 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
4140 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
4143 if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN))
4152 if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
4163 xdp_buff_clear_frags_flag(xdp);
4164 xdp->data_end -= offset;
4170 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
4172 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
4173 void *data_end = xdp->data_end + offset;
4175 if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */
4177 return bpf_xdp_frags_shrink_tail(xdp, -offset);
4179 return bpf_xdp_frags_increase_tail(xdp, offset);
4186 if (unlikely(data_end < xdp->data + ETH_HLEN))
4191 memset(xdp->data_end, 0, offset);
4193 xdp->data_end = data_end;
4206 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
4208 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
4209 void *meta = xdp->data_meta + offset;
4210 unsigned long metalen = xdp->data - meta;
4212 if (xdp_data_meta_unsupported(xdp))
4215 meta > xdp->data))
4220 xdp->data_meta = meta;
4234 * DOC: xdp redirect
4311 u32 xdp_master_redirect(struct xdp_buff *xdp)
4316 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
4317 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
4318 if (slave && slave != xdp->rxq->dev) {
4335 struct xdp_buff *xdp,
4346 err = __xsk_map_redirect(fwd, xdp);
4428 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
4435 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
4437 return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
4442 int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
4449 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
4457 struct xdp_buff *xdp,
4491 err = xsk_generic_rcv(fwd, xdp);
4514 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
4544 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
5010 struct xdp_buff *xdp = (struct xdp_buff *)ctx;
5012 bpf_xdp_copy_buf(xdp, off, dst, len, false);
5016 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
5024 if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp)))
5027 return bpf_event_output(map, flags, meta, meta_size, xdp,
5964 /* xdp and cls_bpf programs are run in RCU-bh so
6124 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
6292 BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
6295 struct net_device *dev = xdp->rxq->dev;
6296 int xdp_len = xdp->data_end - xdp->data;
11642 DEFINE_BPF_DISPATCHER(xdp)
11646 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
11869 __bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags,
11877 bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));