Lines Matching refs:skb

23 			       struct sk_buff *skb, unsigned int thoff)
30 tcph = (void *)(skb_network_header(skb) + thoff);
39 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
44 tcph = (void *)(skb_network_header(skb) + thoff);
45 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
48 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
53 udph = (void *)(skb_network_header(skb) + thoff);
54 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
55 inet_proto_csum_replace4(&udph->check, skb, addr,
62 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
68 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
71 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
77 struct sk_buff *skb, struct iphdr *iph,
96 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
100 struct sk_buff *skb, struct iphdr *iph,
119 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
122 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
127 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
128 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
131 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
132 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
141 static void nf_flow_tuple_encap(struct sk_buff *skb,
148 if (skb_vlan_tag_present(skb)) {
149 tuple->encap[i].id = skb_vlan_tag_get(skb);
150 tuple->encap[i].proto = skb->vlan_proto;
153 switch (skb->protocol) {
155 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
157 tuple->encap[i].proto = skb->protocol;
160 phdr = (struct pppoe_hdr *)skb_network_header(skb);
162 tuple->encap[i].proto = skb->protocol;
173 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
181 if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
184 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
213 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
219 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
226 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
233 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
240 nf_flow_tuple_encap(skb, tuple);
246 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
248 if (skb->len <= mtu)
251 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
266 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
270 skb_orphan(skb);
271 skb_dst_set_noref(skb, dst);
272 dst_output(state->net, state->sk, skb);
276 static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
282 switch (skb->protocol) {
284 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
291 if (nf_flow_pppoe_proto(skb, &inner_proto) &&
302 static void nf_flow_encap_pop(struct sk_buff *skb,
309 if (skb_vlan_tag_present(skb)) {
310 __vlan_hwaccel_clear_tag(skb);
313 switch (skb->protocol) {
315 vlan_hdr = (struct vlan_hdr *)skb->data;
316 __skb_pull(skb, VLAN_HLEN);
317 vlan_set_encap_proto(skb, vlan_hdr);
318 skb_reset_network_header(skb);
321 skb->protocol = __nf_flow_pppoe_proto(skb);
322 skb_pull(skb, PPPOE_SES_HLEN);
323 skb_reset_network_header(skb);
329 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
339 skb->dev = outdev;
340 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
341 tuplehash->tuple.out.h_source, skb->len);
342 dev_queue_xmit(skb);
349 struct nf_flowtable *flow_table, struct sk_buff *skb)
353 if (skb->protocol != htons(ETH_P_IP) &&
354 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
357 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
366 struct sk_buff *skb)
377 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
380 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
382 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
390 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
395 nf_flow_encap_pop(skb, tuplehash);
398 iph = ip_hdr(skb);
399 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
402 skb_clear_tstamp(skb);
405 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
411 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
426 tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
430 ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
438 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
439 IPCB(skb)->iif = skb->dev->ifindex;
440 IPCB(skb)->flags = IPSKB_FORWARDED;
441 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
451 skb->dev = outdev;
453 skb_dst_set_noref(skb, &rt->dst);
454 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
458 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
472 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
479 tcph = (void *)(skb_network_header(skb) + thoff);
480 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
484 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
490 udph = (void *)(skb_network_header(skb) + thoff);
491 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
492 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
499 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
505 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
508 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
514 struct sk_buff *skb, struct ipv6hdr *ip6h,
533 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
537 struct sk_buff *skb, struct ipv6hdr *ip6h,
556 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
560 struct sk_buff *skb,
567 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
568 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
571 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
572 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
576 static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
585 if (!pskb_may_pull(skb, thoff))
588 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
610 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
616 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
623 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
630 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
637 nf_flow_tuple_encap(skb, tuple);
645 struct sk_buff *skb)
656 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
659 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
661 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
669 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
674 nf_flow_encap_pop(skb, tuplehash);
676 ip6h = ipv6_hdr(skb);
677 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
680 skb_clear_tstamp(skb);
683 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
691 struct sk_buff *skb)
695 if (skb->protocol != htons(ETH_P_IPV6) &&
696 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
699 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
706 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
721 tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
725 ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
733 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
734 IP6CB(skb)->iif = skb->dev->ifindex;
735 IP6CB(skb)->flags = IP6SKB_FORWARDED;
736 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
746 skb->dev = outdev;
748 skb_dst_set_noref(skb, &rt->dst);
749 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
753 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);