Lines Matching refs:skb

94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
96 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
97 unsigned int offset = skb_gro_offset(skb);
98 unsigned int headlen = skb_headlen(skb);
99 unsigned int len = skb_gro_len(skb);
112 if (p->pp_recycle != skb->pp_recycle)
120 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
124 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
131 segs = NAPI_GRO_CB(skb)->count;
158 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
159 delta_truesize = skb->truesize - new_truesize;
161 skb->truesize = new_truesize;
162 skb->len -= skb->data_len;
163 skb->data_len = 0;
165 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
167 } else if (skb->head_frag) {
170 struct page *page = virt_to_head_page(skb->head);
177 first_offset = skb->data -
189 delta_truesize = skb->truesize - new_truesize;
190 skb->truesize = new_truesize;
191 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
197 skb->destructor = NULL;
198 skb->sk = NULL;
199 delta_truesize = skb->truesize;
205 skb->data_len -= eat;
206 skb->len -= eat;
210 __skb_pull(skb, offset);
213 skb_shinfo(p)->frag_list = skb;
215 NAPI_GRO_CB(p)->last->next = skb;
216 NAPI_GRO_CB(p)->last = skb;
217 __skb_header_release(skb);
230 NAPI_GRO_CB(skb)->same_flow = 1;
234 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
236 if (unlikely(p->len + skb->len >= 65536))
240 skb_shinfo(p)->frag_list = skb;
242 NAPI_GRO_CB(p)->last->next = skb;
244 skb_pull(skb, skb_gro_offset(skb));
246 NAPI_GRO_CB(p)->last = skb;
248 p->data_len += skb->len;
251 skb->destructor = NULL;
252 skb->sk = NULL;
253 p->truesize += skb->truesize;
254 p->len += skb->len;
256 NAPI_GRO_CB(skb)->same_flow = 1;
262 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
266 __be16 type = skb->protocol;
269 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
271 if (NAPI_GRO_CB(skb)->count == 1) {
272 skb_shinfo(skb)->gso_size = 0;
283 skb, 0);
290 kfree_skb(skb);
295 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
302 struct sk_buff *skb, *p;
304 list_for_each_entry_safe_reverse(skb, p, head, list) {
305 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
307 skb_list_del_init(skb);
308 napi_gro_complete(napi, skb);
333 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
341 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
352 const struct sk_buff *skb)
354 unsigned int maclen = skb->dev->hard_header_len;
355 u32 hash = skb_get_hash_raw(skb);
366 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
367 diffs |= p->vlan_all ^ skb->vlan_all;
368 diffs |= skb_metadata_differs(p, skb);
371 skb_mac_header(skb));
374 skb_mac_header(skb),
382 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
383 diffs |= p->sk != skb->sk;
384 diffs |= skb_metadata_dst_cmp(p, skb);
385 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
387 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
394 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
400 NAPI_GRO_CB(skb)->network_offset = 0;
401 NAPI_GRO_CB(skb)->data_offset = 0;
402 headlen = skb_headlen(skb);
403 NAPI_GRO_CB(skb)->frag0 = skb->data;
404 NAPI_GRO_CB(skb)->frag0_len = headlen;
408 pinfo = skb_shinfo(skb);
413 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
414 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
416 skb->end - skb->tail);
420 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
422 struct skb_shared_info *pinfo = skb_shinfo(skb);
424 BUG_ON(skb->end - skb->tail < grow);
426 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
428 skb->data_len -= grow;
429 skb->tail += grow;
435 skb_frag_unref(skb, 0);
441 static void gro_try_pull_from_frag0(struct sk_buff *skb)
443 int grow = skb_gro_offset(skb) - skb_headlen(skb);
446 gro_pull_from_frag0(skb, grow);
468 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
470 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
474 __be16 type = skb->protocol;
479 if (netif_elide_gro(skb->dev))
482 gro_list_prepare(&gro_list->list, skb);
493 skb_set_network_header(skb, skb_gro_offset(skb));
494 skb_reset_mac_len(skb);
498 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
499 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
500 NAPI_GRO_CB(skb)->count = 1;
501 if (unlikely(skb_is_gso(skb))) {
502 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
504 if (!skb_is_gso_tcp(skb) ||
505 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
506 NAPI_GRO_CB(skb)->flush = 1;
510 switch (skb->ip_summed) {
512 NAPI_GRO_CB(skb)->csum = skb->csum;
513 NAPI_GRO_CB(skb)->csum_valid = 1;
516 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
522 &gro_list->list, skb);
531 same_flow = NAPI_GRO_CB(skb)->same_flow;
532 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
543 if (NAPI_GRO_CB(skb)->flush)
551 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
552 gro_try_pull_from_frag0(skb);
553 NAPI_GRO_CB(skb)->age = jiffies;
554 NAPI_GRO_CB(skb)->last = skb;
555 if (!skb_is_gso(skb))
556 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
557 list_add(&skb->list, &gro_list->list);
571 gro_try_pull_from_frag0(skb);
604 struct sk_buff *skb,
609 gro_normal_one(napi, skb, 1);
613 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
614 napi_skb_free_stolen_head(skb);
615 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
616 __kfree_skb(skb);
618 __napi_kfree_skb(skb, SKB_CONSUMED);
630 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
634 skb_mark_napi_id(skb, napi);
635 trace_napi_gro_receive_entry(skb);
637 skb_gro_reset_offset(skb, 0);
639 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
646 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
648 if (unlikely(skb->pfmemalloc)) {
649 consume_skb(skb);
652 __skb_pull(skb, skb_headlen(skb));
654 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
655 __vlan_hwaccel_clear_tag(skb);
656 skb->dev = napi->dev;
657 skb->skb_iif = 0;
660 skb->pkt_type = PACKET_HOST;
662 skb->encapsulation = 0;
663 skb_shinfo(skb)->gso_type = 0;
664 skb_shinfo(skb)->gso_size = 0;
665 if (unlikely(skb->slow_gro)) {
666 skb_orphan(skb);
667 skb_ext_reset(skb);
668 nf_reset_ct(skb);
669 skb->slow_gro = 0;
672 napi->skb = skb;
677 struct sk_buff *skb = napi->skb;
679 if (!skb) {
680 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
681 if (skb) {
682 napi->skb = skb;
683 skb_mark_napi_id(skb, napi);
686 return skb;
691 struct sk_buff *skb,
697 __skb_push(skb, ETH_HLEN);
698 skb->protocol = eth_type_trans(skb, skb->dev);
700 gro_normal_one(napi, skb, 1);
704 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
705 napi_skb_free_stolen_head(skb);
707 napi_reuse_skb(napi, skb);
720 * We copy ethernet header into skb->data to have a common layout.
724 struct sk_buff *skb = napi->skb;
728 napi->skb = NULL;
730 skb_reset_mac_header(skb);
731 skb_gro_reset_offset(skb, hlen);
733 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
734 eth = skb_gro_header_slow(skb, hlen, 0);
736 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
738 napi_reuse_skb(napi, skb);
742 eth = (const struct ethhdr *)skb->data;
744 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
745 gro_pull_from_frag0(skb, hlen);
747 NAPI_GRO_CB(skb)->frag0 += hlen;
748 NAPI_GRO_CB(skb)->frag0_len -= hlen;
750 __skb_pull(skb, hlen);
757 skb->protocol = eth->h_proto;
759 return skb;
765 struct sk_buff *skb = napi_frags_skb(napi);
767 trace_napi_gro_frags_entry(skb);
769 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
779 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
784 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
786 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
787 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
790 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
791 !skb->csum_complete_sw)
792 netdev_rx_csum_fault(skb->dev, skb);
795 NAPI_GRO_CB(skb)->csum = wsum;
796 NAPI_GRO_CB(skb)->csum_valid = 1;